blob: 4071be258d128d68ceb86aaffc87e7efc580aa0d [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020053#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
181/* Context bank attribute registers */
182#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
183#define CBAR_VMID_SHIFT 0
184#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000185#define CBAR_S1_BPSHCFG_SHIFT 8
186#define CBAR_S1_BPSHCFG_MASK 3
187#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100188#define CBAR_S1_MEMATTR_SHIFT 12
189#define CBAR_S1_MEMATTR_MASK 0xf
190#define CBAR_S1_MEMATTR_WB 0xf
191#define CBAR_TYPE_SHIFT 16
192#define CBAR_TYPE_MASK 0x3
193#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
194#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
195#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
196#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
197#define CBAR_IRPTNDX_SHIFT 24
198#define CBAR_IRPTNDX_MASK 0xff
199
Shalaj Jain04059c52015-03-03 13:34:59 -0800200#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
201#define CBFRSYNRA_SID_MASK (0xffff)
202
Will Deacon45ae7cf2013-06-24 18:31:25 +0100203#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
204#define CBA2R_RW64_32BIT (0 << 0)
205#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800206#define CBA2R_VMID_SHIFT 16
207#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100208
209/* Translation context bank */
210#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100211#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
213#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100214#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215#define ARM_SMMU_CB_RESUME 0x8
216#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100217#define ARM_SMMU_CB_TTBR0 0x20
218#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600220#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100221#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000222#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100223#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700225#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100226#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000228#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100229#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000230#define ARM_SMMU_CB_S1_TLBIVAL 0x620
231#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
232#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700233#define ARM_SMMU_CB_TLBSYNC 0x7f0
234#define ARM_SMMU_CB_TLBSTATUS 0x7f4
235#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100236#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000237#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100238
239#define SCTLR_S1_ASIDPNE (1 << 12)
240#define SCTLR_CFCFG (1 << 7)
241#define SCTLR_CFIE (1 << 6)
242#define SCTLR_CFRE (1 << 5)
243#define SCTLR_E (1 << 4)
244#define SCTLR_AFE (1 << 2)
245#define SCTLR_TRE (1 << 1)
246#define SCTLR_M (1 << 0)
247#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
248
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100249#define ARM_MMU500_ACTLR_CPRE (1 << 1)
250
Peng Fan3ca37122016-05-03 21:50:30 +0800251#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
252
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700253/* Definitions for implementation-defined registers */
254#define ACTLR_QCOM_OSH_SHIFT 28
255#define ACTLR_QCOM_OSH 1
256
257#define ACTLR_QCOM_ISH_SHIFT 29
258#define ACTLR_QCOM_ISH 1
259
260#define ACTLR_QCOM_NSH_SHIFT 30
261#define ACTLR_QCOM_NSH 1
262
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700263#define ARM_SMMU_IMPL_DEF0(smmu) \
264 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
265#define ARM_SMMU_IMPL_DEF1(smmu) \
266 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
267#define IMPL_DEF1_MICRO_MMU_CTRL 0
268#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
269#define MICRO_MMU_CTRL_IDLE (1 << 3)
270
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000271#define CB_PAR_F (1 << 0)
272
273#define ATSR_ACTIVE (1 << 0)
274
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275#define RESUME_RETRY (0 << 0)
276#define RESUME_TERMINATE (1 << 0)
277
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100279#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100281#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100282
283#define FSR_MULTI (1 << 31)
284#define FSR_SS (1 << 30)
285#define FSR_UUT (1 << 8)
286#define FSR_ASF (1 << 7)
287#define FSR_TLBLKF (1 << 6)
288#define FSR_TLBMCF (1 << 5)
289#define FSR_EF (1 << 4)
290#define FSR_PF (1 << 3)
291#define FSR_AFF (1 << 2)
292#define FSR_TF (1 << 1)
293
Mitchel Humpherys29073202014-07-08 09:52:18 -0700294#define FSR_IGN (FSR_AFF | FSR_ASF | \
295 FSR_TLBMCF | FSR_TLBLKF)
296#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100297 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100298
299#define FSYNR0_WNR (1 << 4)
300
Will Deacon4cf740b2014-07-14 19:47:39 +0100301static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000302module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100303MODULE_PARM_DESC(force_stage,
304 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000305static bool disable_bypass;
306module_param(disable_bypass, bool, S_IRUGO);
307MODULE_PARM_DESC(disable_bypass,
308 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100309
Robin Murphy09360402014-08-28 17:51:59 +0100310enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100311 ARM_SMMU_V1,
312 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100313 ARM_SMMU_V2,
314};
315
Robin Murphy67b65a32016-04-13 18:12:57 +0100316enum arm_smmu_implementation {
317 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100318 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100319 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700320 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100321};
322
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700323struct arm_smmu_impl_def_reg {
324 u32 offset;
325 u32 value;
326};
327
Will Deacon45ae7cf2013-06-24 18:31:25 +0100328struct arm_smmu_smr {
329 u8 idx;
330 u16 mask;
331 u16 id;
332};
333
Will Deacona9a1b0b2014-05-01 18:05:08 +0100334struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335 int num_streamids;
336 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100337 struct arm_smmu_smr *smrs;
338};
339
Will Deacona9a1b0b2014-05-01 18:05:08 +0100340struct arm_smmu_master {
341 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100342 struct rb_node node;
343 struct arm_smmu_master_cfg cfg;
344};
345
Will Deacon45ae7cf2013-06-24 18:31:25 +0100346struct arm_smmu_device {
347 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100348
349 void __iomem *base;
350 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100351 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100352
353#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
354#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
355#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
356#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
357#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000358#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800359#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100360#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
361#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
362#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
363#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
364#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100365 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000366
367#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800368#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800369#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700370#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000371 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100372 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100373 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100374
375 u32 num_context_banks;
376 u32 num_s2_context_banks;
377 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
378 atomic_t irptndx;
379
380 u32 num_mapping_groups;
381 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
382
Will Deacon518f7132014-11-14 17:17:54 +0000383 unsigned long va_size;
384 unsigned long ipa_size;
385 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100386 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100387
388 u32 num_global_irqs;
389 u32 num_context_irqs;
390 unsigned int *irqs;
391
Will Deacon45ae7cf2013-06-24 18:31:25 +0100392 struct list_head list;
393 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800394
395 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700396 /* Specific to QCOM */
397 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
398 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800399
400 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700401
402 /* protects idr */
403 struct mutex idr_mutex;
404 struct idr asid_idr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100405};
406
Robin Murphy7602b872016-04-28 17:12:09 +0100407enum arm_smmu_context_fmt {
408 ARM_SMMU_CTX_FMT_NONE,
409 ARM_SMMU_CTX_FMT_AARCH64,
410 ARM_SMMU_CTX_FMT_AARCH32_L,
411 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100412};
413
414struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100415 u8 cbndx;
416 u8 irptndx;
417 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600418 u32 procid;
419 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100420 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100421};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100422#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600423#define INVALID_CBNDX 0xff
424#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700425/*
426 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
427 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
428 */
429#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100430
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600431#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800432#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100433
Will Deaconc752ce42014-06-25 22:46:31 +0100434enum arm_smmu_domain_stage {
435 ARM_SMMU_DOMAIN_S1 = 0,
436 ARM_SMMU_DOMAIN_S2,
437 ARM_SMMU_DOMAIN_NESTED,
438};
439
Will Deacon45ae7cf2013-06-24 18:31:25 +0100440struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100441 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000442 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700443 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000444 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100445 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100446 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000447 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700448 u32 attributes;
Joerg Roedel1d672632015-03-26 13:43:10 +0100449 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100450};
451
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200452struct arm_smmu_phandle_args {
453 struct device_node *np;
454 int args_count;
455 uint32_t args[MAX_MASTER_STREAMIDS];
456};
457
Will Deacon45ae7cf2013-06-24 18:31:25 +0100458static DEFINE_SPINLOCK(arm_smmu_devices_lock);
459static LIST_HEAD(arm_smmu_devices);
460
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000461struct arm_smmu_option_prop {
462 u32 opt;
463 const char *prop;
464};
465
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800466static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
467
Mitchel Humpherys29073202014-07-08 09:52:18 -0700468static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000469 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800470 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800471 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700472 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000473 { 0, NULL},
474};
475
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800476static int arm_smmu_halt(struct arm_smmu_device *smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700477static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
478static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800479static void arm_smmu_resume(struct arm_smmu_device *smmu);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800480static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
481 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700482static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
483 dma_addr_t iova);
484static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
485 struct iommu_domain *domain, dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600486static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800487
Joerg Roedel1d672632015-03-26 13:43:10 +0100488static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
489{
490 return container_of(dom, struct arm_smmu_domain, domain);
491}
492
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000493static void parse_driver_options(struct arm_smmu_device *smmu)
494{
495 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700496
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000497 do {
498 if (of_property_read_bool(smmu->dev->of_node,
499 arm_smmu_options[i].prop)) {
500 smmu->options |= arm_smmu_options[i].opt;
501 dev_notice(smmu->dev, "option %s\n",
502 arm_smmu_options[i].prop);
503 }
504 } while (arm_smmu_options[++i].opt);
505}
506
Patrick Dalyc190d932016-08-30 17:23:28 -0700507static bool is_dynamic_domain(struct iommu_domain *domain)
508{
509 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
510
511 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
512}
513
Will Deacon8f68f8e2014-07-15 11:27:08 +0100514static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100515{
516 if (dev_is_pci(dev)) {
517 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700518
Will Deacona9a1b0b2014-05-01 18:05:08 +0100519 while (!pci_is_root_bus(bus))
520 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100521 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100522 }
523
Will Deacon8f68f8e2014-07-15 11:27:08 +0100524 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100525}
526
Will Deacon45ae7cf2013-06-24 18:31:25 +0100527static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
528 struct device_node *dev_node)
529{
530 struct rb_node *node = smmu->masters.rb_node;
531
532 while (node) {
533 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700534
Will Deacon45ae7cf2013-06-24 18:31:25 +0100535 master = container_of(node, struct arm_smmu_master, node);
536
537 if (dev_node < master->of_node)
538 node = node->rb_left;
539 else if (dev_node > master->of_node)
540 node = node->rb_right;
541 else
542 return master;
543 }
544
545 return NULL;
546}
547
Will Deacona9a1b0b2014-05-01 18:05:08 +0100548static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100549find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100550{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100551 struct arm_smmu_master_cfg *cfg = NULL;
552 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100553
Will Deacon8f68f8e2014-07-15 11:27:08 +0100554 if (group) {
555 cfg = iommu_group_get_iommudata(group);
556 iommu_group_put(group);
557 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100558
Will Deacon8f68f8e2014-07-15 11:27:08 +0100559 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100560}
561
Will Deacon45ae7cf2013-06-24 18:31:25 +0100562static int insert_smmu_master(struct arm_smmu_device *smmu,
563 struct arm_smmu_master *master)
564{
565 struct rb_node **new, *parent;
566
567 new = &smmu->masters.rb_node;
568 parent = NULL;
569 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700570 struct arm_smmu_master *this
571 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100572
573 parent = *new;
574 if (master->of_node < this->of_node)
575 new = &((*new)->rb_left);
576 else if (master->of_node > this->of_node)
577 new = &((*new)->rb_right);
578 else
579 return -EEXIST;
580 }
581
582 rb_link_node(&master->node, parent, new);
583 rb_insert_color(&master->node, &smmu->masters);
584 return 0;
585}
586
587static int register_smmu_master(struct arm_smmu_device *smmu,
588 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200589 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100590{
591 int i;
592 struct arm_smmu_master *master;
593
594 master = find_smmu_master(smmu, masterspec->np);
595 if (master) {
596 dev_err(dev,
597 "rejecting multiple registrations for master device %s\n",
598 masterspec->np->name);
599 return -EBUSY;
600 }
601
602 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
603 dev_err(dev,
604 "reached maximum number (%d) of stream IDs for master device %s\n",
605 MAX_MASTER_STREAMIDS, masterspec->np->name);
606 return -ENOSPC;
607 }
608
609 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
610 if (!master)
611 return -ENOMEM;
612
Will Deacona9a1b0b2014-05-01 18:05:08 +0100613 master->of_node = masterspec->np;
614 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100615
Olav Haugan3c8766d2014-08-22 17:12:32 -0700616 for (i = 0; i < master->cfg.num_streamids; ++i) {
617 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100618
Olav Haugan3c8766d2014-08-22 17:12:32 -0700619 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
620 (streamid >= smmu->num_mapping_groups)) {
621 dev_err(dev,
622 "stream ID for master device %s greater than maximum allowed (%d)\n",
623 masterspec->np->name, smmu->num_mapping_groups);
624 return -ERANGE;
625 }
626 master->cfg.streamids[i] = streamid;
627 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100628 return insert_smmu_master(smmu, master);
629}
630
Will Deacon44680ee2014-06-25 11:29:12 +0100631static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100632{
Will Deacon44680ee2014-06-25 11:29:12 +0100633 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100634 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100635 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100636
637 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100638 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100639 master = find_smmu_master(smmu, dev_node);
640 if (master)
641 break;
642 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100643 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100644
Will Deacona9a1b0b2014-05-01 18:05:08 +0100645 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100646}
647
648static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
649{
650 int idx;
651
652 do {
653 idx = find_next_zero_bit(map, end, start);
654 if (idx == end)
655 return -ENOSPC;
656 } while (test_and_set_bit(idx, map));
657
658 return idx;
659}
660
661static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
662{
663 clear_bit(idx, map);
664}
665
666/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700667static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
668 int cbndx)
669{
670 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
671 u32 val;
672
673 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
674 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
675 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700676 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700677 dev_err(smmu->dev, "TLBSYNC timeout!\n");
678}
679
Will Deacon518f7132014-11-14 17:17:54 +0000680static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100681{
682 int count = 0;
683 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
684
685 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
686 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
687 & sTLBGSTATUS_GSACTIVE) {
688 cpu_relax();
689 if (++count == TLB_LOOP_TIMEOUT) {
690 dev_err_ratelimited(smmu->dev,
691 "TLB sync timed out -- SMMU may be deadlocked\n");
692 return;
693 }
694 udelay(1);
695 }
696}
697
Will Deacon518f7132014-11-14 17:17:54 +0000698static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100699{
Will Deacon518f7132014-11-14 17:17:54 +0000700 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700701 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000702}
703
704static void arm_smmu_tlb_inv_context(void *cookie)
705{
706 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100707 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
708 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100709 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000710 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100711
712 if (stage1) {
713 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800714 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100715 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700716 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100717 } else {
718 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800719 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100720 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700721 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100722 }
Will Deacon1463fe42013-07-31 19:21:27 +0100723}
724
Will Deacon518f7132014-11-14 17:17:54 +0000725static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000726 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000727{
728 struct arm_smmu_domain *smmu_domain = cookie;
729 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
730 struct arm_smmu_device *smmu = smmu_domain->smmu;
731 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
732 void __iomem *reg;
733
734 if (stage1) {
735 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
736 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
737
Robin Murphy7602b872016-04-28 17:12:09 +0100738 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000739 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800740 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000741 do {
742 writel_relaxed(iova, reg);
743 iova += granule;
744 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000745 } else {
746 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800747 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000748 do {
749 writeq_relaxed(iova, reg);
750 iova += granule >> 12;
751 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000752 }
Will Deacon518f7132014-11-14 17:17:54 +0000753 } else if (smmu->version == ARM_SMMU_V2) {
754 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
755 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
756 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000757 iova >>= 12;
758 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100759 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000760 iova += granule >> 12;
761 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000762 } else {
763 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800764 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000765 }
766}
767
Will Deacon518f7132014-11-14 17:17:54 +0000768static struct iommu_gather_ops arm_smmu_gather_ops = {
769 .tlb_flush_all = arm_smmu_tlb_inv_context,
770 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
771 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000772};
773
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700774static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
775 dma_addr_t iova, u32 fsr)
776{
777 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
778 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
779 struct arm_smmu_device *smmu;
780 void __iomem *cb_base;
781 u64 sctlr, sctlr_orig;
782 phys_addr_t phys;
783
784 smmu = smmu_domain->smmu;
785 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
786
787 arm_smmu_halt_nowait(smmu);
788
789 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
790
791 arm_smmu_wait_for_halt(smmu);
792
793 /* clear FSR to allow ATOS to log any faults */
794 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
795
796 /* disable stall mode momentarily */
797 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
798 sctlr = sctlr_orig & ~SCTLR_CFCFG;
799 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
800
801 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
802
803 if (!phys) {
804 dev_err(smmu->dev,
805 "ATOS failed. Will issue a TLBIALL and try again...\n");
806 arm_smmu_tlb_inv_context(smmu_domain);
807 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
808 if (phys)
809 dev_err(smmu->dev,
810 "ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
811 else
812 dev_err(smmu->dev,
813 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
814 }
815
816 /* restore SCTLR */
817 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
818
819 arm_smmu_resume(smmu);
820
821 return phys;
822}
823
Will Deacon45ae7cf2013-06-24 18:31:25 +0100824static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
825{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600826 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -0700827 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100828 unsigned long iova;
829 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100830 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100831 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
832 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100833 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -0800834 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800835 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800836 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -0800837 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -0700838 bool non_fatal_fault = !!(smmu_domain->attributes &
839 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100840
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -0700841 static DEFINE_RATELIMIT_STATE(_rs,
842 DEFAULT_RATELIMIT_INTERVAL,
843 DEFAULT_RATELIMIT_BURST);
844
Shalaj Jain04059c52015-03-03 13:34:59 -0800845 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100846 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100847 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
848
849 if (!(fsr & FSR_FAULT))
850 return IRQ_NONE;
851
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800852 if (fatal_asf && (fsr & FSR_ASF)) {
853 dev_err(smmu->dev,
854 "Took an address size fault. Refusing to recover.\n");
855 BUG();
856 }
857
Will Deacon45ae7cf2013-06-24 18:31:25 +0100858 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -0700859 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600860 if (fsr & FSR_TF)
861 flags |= IOMMU_FAULT_TRANSLATION;
862 if (fsr & FSR_PF)
863 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -0700864 if (fsr & FSR_EF)
865 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600866 if (fsr & FSR_SS)
867 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -0700868
Robin Murphyf9a05f02016-04-13 18:13:01 +0100869 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800870 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -0800871 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
872 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600873 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
874 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800875 dev_dbg(smmu->dev,
876 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
877 iova, fsr, fsynr, cfg->cbndx);
878 dev_dbg(smmu->dev,
879 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -0700880 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -0700881 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -0700882 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700883 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
884 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -0700885 if (__ratelimit(&_rs)) {
886 dev_err(smmu->dev,
887 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
888 iova, fsr, fsynr, cfg->cbndx);
889 dev_err(smmu->dev, "FAR = %016lx\n",
890 (unsigned long)iova);
891 dev_err(smmu->dev,
892 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
893 fsr,
894 (fsr & 0x02) ? "TF " : "",
895 (fsr & 0x04) ? "AFF " : "",
896 (fsr & 0x08) ? "PF " : "",
897 (fsr & 0x10) ? "EF " : "",
898 (fsr & 0x20) ? "TLBMCF " : "",
899 (fsr & 0x40) ? "TLBLKF " : "",
900 (fsr & 0x80) ? "MHF " : "",
901 (fsr & 0x40000000) ? "SS " : "",
902 (fsr & 0x80000000) ? "MULTI " : "");
903 dev_err(smmu->dev,
904 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -0800905 if (!phys_soft)
906 dev_err(smmu->dev,
907 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
908 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -0700909 dev_err(smmu->dev,
910 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
911 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
912 }
Patrick Daly5ba28112016-08-30 19:18:52 -0700913 ret = IRQ_NONE;
914 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -0700915 if (!non_fatal_fault) {
916 dev_err(smmu->dev,
917 "Unhandled arm-smmu context fault!\n");
918 BUG();
919 }
Patrick Daly5ba28112016-08-30 19:18:52 -0700920 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100921
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600922 /*
923 * If the client returns -EBUSY, do not clear FSR and do not RESUME
924 * if stalled. This is required to keep the IOMMU client stalled on
925 * the outstanding fault. This gives the client a chance to take any
926 * debug action and then terminate the stalled transaction.
927 * So, the sequence in case of stall on fault should be:
928 * 1) Do not clear FSR or write to RESUME here
929 * 2) Client takes any debug action
930 * 3) Client terminates the stalled transaction and resumes the IOMMU
931 * 4) Client clears FSR. The FSR should only be cleared after 3) and
932 * not before so that the fault remains outstanding. This ensures
933 * SCTLR.HUPCF has the desired effect if subsequent transactions also
934 * need to be terminated.
935 */
936 if (tmp != -EBUSY) {
937 /* Clear the faulting FSR */
938 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -0700939
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -0600940 /*
941 * Barrier required to ensure that the FSR is cleared
942 * before resuming SMMU operation
943 */
944 wmb();
945
946 /* Retry or terminate any stalled transactions */
947 if (fsr & FSR_SS)
948 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
949 }
Patrick Daly5ba28112016-08-30 19:18:52 -0700950
951 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100952}
953
954static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
955{
956 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
957 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000958 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100959
960 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
961 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
962 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
963 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
964
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000965 if (!gfsr)
966 return IRQ_NONE;
967
Will Deacon45ae7cf2013-06-24 18:31:25 +0100968 dev_err_ratelimited(smmu->dev,
969 "Unexpected global fault, this could be serious\n");
970 dev_err_ratelimited(smmu->dev,
971 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
972 gfsr, gfsynr0, gfsynr1, gfsynr2);
973
974 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100975 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100976}
977
Will Deacon518f7132014-11-14 17:17:54 +0000978static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
979 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100980{
981 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100982 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100983 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100984 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
985 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100986 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100987
Will Deacon45ae7cf2013-06-24 18:31:25 +0100988 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100989 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
990 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100991
Will Deacon4a1c93c2015-03-04 12:21:03 +0000992 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100993 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
994 reg = CBA2R_RW64_64BIT;
995 else
996 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800997 /* 16-bit VMIDs live in CBA2R */
998 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800999 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001000
Will Deacon4a1c93c2015-03-04 12:21:03 +00001001 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1002 }
1003
Will Deacon45ae7cf2013-06-24 18:31:25 +01001004 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001005 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001006 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001007 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001008
Will Deacon57ca90f2014-02-06 14:59:05 +00001009 /*
1010 * Use the weakest shareability/memory types, so they are
1011 * overridden by the ttbcr/pte.
1012 */
1013 if (stage1) {
1014 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1015 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001016 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1017 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001018 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001019 }
Will Deacon44680ee2014-06-25 11:29:12 +01001020 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001021
Will Deacon518f7132014-11-14 17:17:54 +00001022 /* TTBRs */
1023 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001024 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001025
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001026 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001027 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001028
1029 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001030 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001031 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001032 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001033 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001034 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001035 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001036
Will Deacon518f7132014-11-14 17:17:54 +00001037 /* TTBCR */
1038 if (stage1) {
1039 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1040 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
1041 if (smmu->version > ARM_SMMU_V1) {
1042 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +01001043 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +00001044 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001045 }
1046 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001047 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1048 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001049 }
1050
Will Deacon518f7132014-11-14 17:17:54 +00001051 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001052 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +00001053 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001054 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +00001055 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1056 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001057 }
1058
Will Deacon45ae7cf2013-06-24 18:31:25 +01001059 /* SCTLR */
Patrick Dalye62d3362016-03-15 18:58:28 -07001060 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_EAE_SBOP;
1061
1062 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1063 !stage1)
1064 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001065 if (stage1)
1066 reg |= SCTLR_S1_ASIDPNE;
1067#ifdef __BIG_ENDIAN
1068 reg |= SCTLR_E;
1069#endif
Will Deacon25724842013-08-21 13:49:53 +01001070 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001071}
1072
Patrick Dalyc190d932016-08-30 17:23:28 -07001073static int arm_smmu_init_asid(struct iommu_domain *domain,
1074 struct arm_smmu_device *smmu)
1075{
1076 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1077 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1078 bool dynamic = is_dynamic_domain(domain);
1079 int ret;
1080
1081 if (!dynamic) {
1082 cfg->asid = cfg->cbndx + 1;
1083 } else {
1084 mutex_lock(&smmu->idr_mutex);
1085 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1086 smmu->num_context_banks + 2,
1087 MAX_ASID + 1, GFP_KERNEL);
1088
1089 mutex_unlock(&smmu->idr_mutex);
1090 if (ret < 0) {
1091 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1092 ret);
1093 return ret;
1094 }
1095 cfg->asid = ret;
1096 }
1097 return 0;
1098}
1099
1100static void arm_smmu_free_asid(struct iommu_domain *domain)
1101{
1102 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1103 struct arm_smmu_device *smmu = smmu_domain->smmu;
1104 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1105 bool dynamic = is_dynamic_domain(domain);
1106
1107 if (cfg->asid == INVALID_ASID || !dynamic)
1108 return;
1109
1110 mutex_lock(&smmu->idr_mutex);
1111 idr_remove(&smmu->asid_idr, cfg->asid);
1112 mutex_unlock(&smmu->idr_mutex);
1113}
1114
Will Deacon45ae7cf2013-06-24 18:31:25 +01001115static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001116 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001117{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001118 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001119 unsigned long ias, oas;
1120 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001121 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001122 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001123 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Patrick Dalyc190d932016-08-30 17:23:28 -07001124 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001125
Will Deacon518f7132014-11-14 17:17:54 +00001126 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001127 if (smmu_domain->smmu)
1128 goto out_unlock;
1129
Patrick Dalyc190d932016-08-30 17:23:28 -07001130 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1131 smmu_domain->cfg.asid = INVALID_ASID;
1132
Robin Murphy98006992016-04-20 14:53:33 +01001133 /* We're bypassing these SIDs, so don't allocate an actual context */
1134 if (domain->type == IOMMU_DOMAIN_DMA) {
1135 smmu_domain->smmu = smmu;
1136 goto out_unlock;
1137 }
1138
Patrick Dalyc190d932016-08-30 17:23:28 -07001139 dynamic = is_dynamic_domain(domain);
1140 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1141 dev_err(smmu->dev, "dynamic domains not supported\n");
1142 ret = -EPERM;
1143 goto out_unlock;
1144 }
1145
Will Deaconc752ce42014-06-25 22:46:31 +01001146 /*
1147 * Mapping the requested stage onto what we support is surprisingly
1148 * complicated, mainly because the spec allows S1+S2 SMMUs without
1149 * support for nested translation. That means we end up with the
1150 * following table:
1151 *
1152 * Requested Supported Actual
1153 * S1 N S1
1154 * S1 S1+S2 S1
1155 * S1 S2 S2
1156 * S1 S1 S1
1157 * N N N
1158 * N S1+S2 S2
1159 * N S2 S2
1160 * N S1 S1
1161 *
1162 * Note that you can't actually request stage-2 mappings.
1163 */
1164 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1165 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1166 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1167 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1168
Robin Murphy7602b872016-04-28 17:12:09 +01001169 /*
1170 * Choosing a suitable context format is even more fiddly. Until we
1171 * grow some way for the caller to express a preference, and/or move
1172 * the decision into the io-pgtable code where it arguably belongs,
1173 * just aim for the closest thing to the rest of the system, and hope
1174 * that the hardware isn't esoteric enough that we can't assume AArch64
1175 * support to be a superset of AArch32 support...
1176 */
1177 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1178 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1179 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1180 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1181 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1182 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1183 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1184
1185 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1186 ret = -EINVAL;
1187 goto out_unlock;
1188 }
1189
Will Deaconc752ce42014-06-25 22:46:31 +01001190 switch (smmu_domain->stage) {
1191 case ARM_SMMU_DOMAIN_S1:
1192 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1193 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001194 ias = smmu->va_size;
1195 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001196 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001197 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001198 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001199 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001200 ias = min(ias, 32UL);
1201 oas = min(oas, 40UL);
1202 }
Will Deaconc752ce42014-06-25 22:46:31 +01001203 break;
1204 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205 /*
1206 * We will likely want to change this if/when KVM gets
1207 * involved.
1208 */
Will Deaconc752ce42014-06-25 22:46:31 +01001209 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001210 cfg->cbar = CBAR_TYPE_S2_TRANS;
1211 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001212 ias = smmu->ipa_size;
1213 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001214 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001215 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001216 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001217 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001218 ias = min(ias, 40UL);
1219 oas = min(oas, 40UL);
1220 }
Will Deaconc752ce42014-06-25 22:46:31 +01001221 break;
1222 default:
1223 ret = -EINVAL;
1224 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001225 }
1226
Patrick Dalyc190d932016-08-30 17:23:28 -07001227 /* Dynamic domains must set cbndx through domain attribute */
1228 if (!dynamic) {
1229 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001230 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001231 if (ret < 0)
1232 goto out_unlock;
1233 cfg->cbndx = ret;
1234 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001235 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001236 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1237 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001239 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001240 }
1241
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001242 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001243 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001244 .ias = ias,
1245 .oas = oas,
1246 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001247 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001248 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001249
Will Deacon518f7132014-11-14 17:17:54 +00001250 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001251 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1252 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001253 if (!pgtbl_ops) {
1254 ret = -ENOMEM;
1255 goto out_clear_smmu;
1256 }
1257
Robin Murphyd5466352016-05-09 17:20:09 +01001258 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001259 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001260
Patrick Dalyc190d932016-08-30 17:23:28 -07001261 /* Assign an asid */
1262 ret = arm_smmu_init_asid(domain, smmu);
1263 if (ret)
1264 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001265
Patrick Dalyc190d932016-08-30 17:23:28 -07001266 if (!dynamic) {
1267 /* Initialise the context bank with our page table cfg */
1268 arm_smmu_init_context_bank(smmu_domain,
1269 &smmu_domain->pgtbl_cfg);
1270
1271 /*
1272 * Request context fault interrupt. Do this last to avoid the
1273 * handler seeing a half-initialised domain state.
1274 */
1275 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1276 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001277 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1278 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001279 if (ret < 0) {
1280 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1281 cfg->irptndx, irq);
1282 cfg->irptndx = INVALID_IRPTNDX;
1283 goto out_clear_smmu;
1284 }
1285 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001286 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001287 }
Will Deacon518f7132014-11-14 17:17:54 +00001288 mutex_unlock(&smmu_domain->init_mutex);
1289
1290 /* Publish page table ops for map/unmap */
1291 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001292 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001293
Will Deacon518f7132014-11-14 17:17:54 +00001294out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001295 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001296 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001297out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001298 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001299 return ret;
1300}
1301
1302static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1303{
Joerg Roedel1d672632015-03-26 13:43:10 +01001304 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001305 struct arm_smmu_device *smmu = smmu_domain->smmu;
1306 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001307 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001308 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001309 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001310
Robin Murphy98006992016-04-20 14:53:33 +01001311 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001312 return;
1313
Patrick Dalyc190d932016-08-30 17:23:28 -07001314 dynamic = is_dynamic_domain(domain);
1315 if (dynamic) {
1316 arm_smmu_free_asid(domain);
1317 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1318 return;
1319 }
1320
Will Deacon518f7132014-11-14 17:17:54 +00001321 /*
1322 * Disable the context bank and free the page tables before freeing
1323 * it.
1324 */
Will Deacon44680ee2014-06-25 11:29:12 +01001325 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001326 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001327
Will Deacon44680ee2014-06-25 11:29:12 +01001328 if (cfg->irptndx != INVALID_IRPTNDX) {
1329 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001330 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331 }
1332
Markus Elfring44830b02015-11-06 18:32:41 +01001333 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001334 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001335}
1336
Joerg Roedel1d672632015-03-26 13:43:10 +01001337static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001338{
1339 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001340
Patrick Daly09801312016-08-29 17:02:52 -07001341 /* Do not support DOMAIN_DMA for now */
1342 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001343 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001344 /*
1345 * Allocate the domain and initialise some of its data structures.
1346 * We can't really do anything meaningful until we've added a
1347 * master.
1348 */
1349 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1350 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001351 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001352
Robin Murphy9adb9592016-01-26 18:06:36 +00001353 if (type == IOMMU_DOMAIN_DMA &&
1354 iommu_get_dma_cookie(&smmu_domain->domain)) {
1355 kfree(smmu_domain);
1356 return NULL;
1357 }
1358
Will Deacon518f7132014-11-14 17:17:54 +00001359 mutex_init(&smmu_domain->init_mutex);
1360 spin_lock_init(&smmu_domain->pgtbl_lock);
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06001361 smmu_domain->cfg.cbndx = INVALID_CBNDX;
Joerg Roedel1d672632015-03-26 13:43:10 +01001362
1363 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364}
1365
Joerg Roedel1d672632015-03-26 13:43:10 +01001366static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001367{
Joerg Roedel1d672632015-03-26 13:43:10 +01001368 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001369
1370 /*
1371 * Free the domain resources. We assume that all devices have
1372 * already been detached.
1373 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001374 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001375 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001376 kfree(smmu_domain);
1377}
1378
1379static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001380 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001381{
1382 int i;
1383 struct arm_smmu_smr *smrs;
1384 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1385
1386 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1387 return 0;
1388
Will Deacona9a1b0b2014-05-01 18:05:08 +01001389 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001390 return -EEXIST;
1391
Mitchel Humpherys29073202014-07-08 09:52:18 -07001392 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001393 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001394 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1395 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001396 return -ENOMEM;
1397 }
1398
Will Deacon44680ee2014-06-25 11:29:12 +01001399 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001400 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001401 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1402 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001403 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404 dev_err(smmu->dev, "failed to allocate free SMR\n");
1405 goto err_free_smrs;
1406 }
1407
1408 smrs[i] = (struct arm_smmu_smr) {
1409 .idx = idx,
1410 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001411 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001412 };
1413 }
1414
1415 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001416 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001417 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1418 smrs[i].mask << SMR_MASK_SHIFT;
1419 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1420 }
1421
Will Deacona9a1b0b2014-05-01 18:05:08 +01001422 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001423 return 0;
1424
1425err_free_smrs:
1426 while (--i >= 0)
1427 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1428 kfree(smrs);
1429 return -ENOSPC;
1430}
1431
1432static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001433 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001434{
1435 int i;
1436 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001437 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001438
Will Deacon43b412b2014-07-15 11:22:24 +01001439 if (!smrs)
1440 return;
1441
Will Deacon45ae7cf2013-06-24 18:31:25 +01001442 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001443 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001444 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001445
Will Deacon45ae7cf2013-06-24 18:31:25 +01001446 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1447 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1448 }
1449
Will Deacona9a1b0b2014-05-01 18:05:08 +01001450 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001451 kfree(smrs);
1452}
1453
Will Deacon45ae7cf2013-06-24 18:31:25 +01001454static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001455 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001456{
1457 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001458 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001459 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1460
Will Deacon5f634952016-04-20 14:53:32 +01001461 /*
1462 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1463 * for all devices behind the SMMU. Note that we need to take
1464 * care configuring SMRs for devices both a platform_device and
1465 * and a PCI device (i.e. a PCI host controller)
1466 */
1467 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1468 return 0;
1469
Will Deacon8f68f8e2014-07-15 11:27:08 +01001470 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001471 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001472 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001473 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001474
Will Deacona9a1b0b2014-05-01 18:05:08 +01001475 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001476 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001477
Will Deacona9a1b0b2014-05-01 18:05:08 +01001478 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Patrick Dalyf4930442016-06-27 20:50:14 -07001479 s2cr = S2CR_TYPE_TRANS |
Will Deacon44680ee2014-06-25 11:29:12 +01001480 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001481 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1482 }
1483
1484 return 0;
1485}
1486
1487static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001488 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001489{
Will Deacon43b412b2014-07-15 11:22:24 +01001490 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001491 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001492 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001493
Will Deacon8f68f8e2014-07-15 11:27:08 +01001494 /* An IOMMU group is torn down by the first device to be removed */
1495 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1496 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001497
1498 /*
1499 * We *must* clear the S2CR first, because freeing the SMR means
1500 * that it can be re-allocated immediately.
1501 */
Will Deacon43b412b2014-07-15 11:22:24 +01001502 for (i = 0; i < cfg->num_streamids; ++i) {
1503 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001504 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001505
Robin Murphy25a1c962016-02-10 14:25:33 +00001506 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001507 }
1508
Will Deacona9a1b0b2014-05-01 18:05:08 +01001509 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001510}
1511
Patrick Daly09801312016-08-29 17:02:52 -07001512static void arm_smmu_detach_dev(struct iommu_domain *domain,
1513 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001514{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001515 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001516 struct arm_smmu_device *smmu = smmu_domain->smmu;
1517 struct arm_smmu_master_cfg *cfg;
1518 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
1519
1520 if (dynamic)
1521 return;
1522
1523 cfg = find_smmu_master_cfg(dev);
1524 if (!cfg)
1525 return;
1526
1527 if (!smmu) {
1528 dev_err(dev, "Domain not attached; cannot detach!\n");
1529 return;
1530 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001531
1532 dev->archdata.iommu = NULL;
1533 arm_smmu_domain_remove_master(smmu_domain, cfg);
1534}
1535
Will Deacon45ae7cf2013-06-24 18:31:25 +01001536static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1537{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001538 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001539 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001540 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001541 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001542
Will Deacon8f68f8e2014-07-15 11:27:08 +01001543 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001544 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001545 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1546 return -ENXIO;
1547 }
1548
Will Deacon518f7132014-11-14 17:17:54 +00001549 /* Ensure that the domain is finalised */
1550 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001551 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001552 return ret;
1553
Patrick Dalyc190d932016-08-30 17:23:28 -07001554 /* Do not modify the SIDs, HW is still running */
1555 if (is_dynamic_domain(domain))
1556 return ret;
1557
Will Deacon45ae7cf2013-06-24 18:31:25 +01001558 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001559 * Sanity check the domain. We don't support domains across
1560 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001561 */
Will Deacon518f7132014-11-14 17:17:54 +00001562 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001563 dev_err(dev,
1564 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001565 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1566 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001567 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001568
1569 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001570 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001571 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001572 return -ENODEV;
1573
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001574 /* Detach the dev from its current domain */
1575 if (dev->archdata.iommu)
Patrick Daly09801312016-08-29 17:02:52 -07001576 arm_smmu_detach_dev(dev->archdata.iommu, dev);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001577
Will Deacon844e35b2014-07-17 11:23:51 +01001578 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1579 if (!ret)
1580 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001581 return ret;
1582}
1583
Will Deacon45ae7cf2013-06-24 18:31:25 +01001584static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001585 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001586{
Will Deacon518f7132014-11-14 17:17:54 +00001587 int ret;
1588 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001589 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001590 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001591
Will Deacon518f7132014-11-14 17:17:54 +00001592 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001593 return -ENODEV;
1594
Will Deacon518f7132014-11-14 17:17:54 +00001595 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1596 ret = ops->map(ops, iova, paddr, size, prot);
1597 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1598 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001599}
1600
1601static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1602 size_t size)
1603{
Will Deacon518f7132014-11-14 17:17:54 +00001604 size_t ret;
1605 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001606 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001607 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001608
Will Deacon518f7132014-11-14 17:17:54 +00001609 if (!ops)
1610 return 0;
1611
1612 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1613 ret = ops->unmap(ops, iova, size);
1614 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1615 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001616}
1617
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001618static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
1619 struct scatterlist *sg, unsigned int nents, int prot)
1620{
1621 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07001622 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001623 unsigned long flags;
1624 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1625 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1626
1627 if (!ops)
1628 return -ENODEV;
1629
1630 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07001631 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001632 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07001633
1634 if (!ret)
1635 arm_smmu_unmap(domain, iova, size);
1636
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001637 return ret;
1638}
1639
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001640static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001641 dma_addr_t iova, bool do_halt)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001642{
Joerg Roedel1d672632015-03-26 13:43:10 +01001643 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001644 struct arm_smmu_device *smmu = smmu_domain->smmu;
1645 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1646 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1647 struct device *dev = smmu->dev;
1648 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001649 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001650 u32 tmp;
1651 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001652 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001653
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001654 spin_lock_irqsave(&smmu->atos_lock, flags);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001655 if (do_halt && arm_smmu_halt(smmu)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001656 phys = 0;
1657 goto out_unlock;
1658 }
1659
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001660 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1661
Robin Murphy661d9622015-05-27 17:09:34 +01001662 /* ATS1 registers can only be written atomically */
1663 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001664 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001665 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1666 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001667 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001668
1669 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1670 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001671 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08001672 dev_err(dev,
1673 "iova to phys timed out on %pad. software table walk result=%pa.\n",
1674 &iova, &phys);
1675 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001676 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001677 }
1678
Robin Murphyf9a05f02016-04-13 18:13:01 +01001679 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001680 if (phys & CB_PAR_F) {
1681 dev_err(dev, "translation fault!\n");
1682 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001683 phys = 0;
1684 } else {
1685 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001686 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001687out_resume:
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001688 if (do_halt)
1689 arm_smmu_resume(smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001690out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001691 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001692 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001693}
1694
Will Deacon45ae7cf2013-06-24 18:31:25 +01001695static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001696 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001697{
Will Deacon518f7132014-11-14 17:17:54 +00001698 phys_addr_t ret;
1699 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001700 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001701 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001702
Will Deacon518f7132014-11-14 17:17:54 +00001703 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001704 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001705
Will Deacon518f7132014-11-14 17:17:54 +00001706 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07001707 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00001708 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001709
Will Deacon518f7132014-11-14 17:17:54 +00001710 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001711}
1712
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001713/*
1714 * This function can sleep, and cannot be called from atomic context. Will
1715 * power on register block if required. This restriction does not apply to the
1716 * original iova_to_phys() op.
1717 */
1718static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1719 dma_addr_t iova)
1720{
1721 phys_addr_t ret = 0;
1722 unsigned long flags;
1723 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1724
1725 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1726 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1727 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001728 ret = __arm_smmu_iova_to_phys_hard(domain, iova, true);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001729
1730 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1731
1732 return ret;
1733}
1734
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001735static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
1736 struct iommu_domain *domain, dma_addr_t iova)
1737{
1738 return __arm_smmu_iova_to_phys_hard(domain, iova, false);
1739}
1740
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001741static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001742{
Will Deacond0948942014-06-24 17:30:10 +01001743 switch (cap) {
1744 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001745 /*
1746 * Return true here as the SMMU can always send out coherent
1747 * requests.
1748 */
1749 return true;
Will Deacond0948942014-06-24 17:30:10 +01001750 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001751 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001752 case IOMMU_CAP_NOEXEC:
1753 return true;
Will Deacond0948942014-06-24 17:30:10 +01001754 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001755 return false;
Will Deacond0948942014-06-24 17:30:10 +01001756 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001757}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001758
Will Deacona9a1b0b2014-05-01 18:05:08 +01001759static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1760{
1761 *((u16 *)data) = alias;
1762 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001763}
1764
Will Deacon8f68f8e2014-07-15 11:27:08 +01001765static void __arm_smmu_release_pci_iommudata(void *data)
1766{
1767 kfree(data);
1768}
1769
Joerg Roedelaf659932015-10-21 23:51:41 +02001770static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1771 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772{
Will Deacon03edb222015-01-19 14:27:33 +00001773 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001774 u16 sid;
1775 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001776
Will Deacon03edb222015-01-19 14:27:33 +00001777 cfg = iommu_group_get_iommudata(group);
1778 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001779 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001780 if (!cfg)
1781 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001782
Will Deacon03edb222015-01-19 14:27:33 +00001783 iommu_group_set_iommudata(group, cfg,
1784 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001785 }
1786
Joerg Roedelaf659932015-10-21 23:51:41 +02001787 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1788 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001789
Will Deacon03edb222015-01-19 14:27:33 +00001790 /*
1791 * Assume Stream ID == Requester ID for now.
1792 * We need a way to describe the ID mappings in FDT.
1793 */
1794 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1795 for (i = 0; i < cfg->num_streamids; ++i)
1796 if (cfg->streamids[i] == sid)
1797 break;
1798
1799 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1800 if (i == cfg->num_streamids)
1801 cfg->streamids[cfg->num_streamids++] = sid;
1802
1803 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001804}
1805
Joerg Roedelaf659932015-10-21 23:51:41 +02001806static int arm_smmu_init_platform_device(struct device *dev,
1807 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001808{
Will Deacon03edb222015-01-19 14:27:33 +00001809 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001810 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001811
1812 if (!smmu)
1813 return -ENODEV;
1814
1815 master = find_smmu_master(smmu, dev->of_node);
1816 if (!master)
1817 return -ENODEV;
1818
Will Deacon03edb222015-01-19 14:27:33 +00001819 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001820
1821 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001822}
1823
1824static int arm_smmu_add_device(struct device *dev)
1825{
Joerg Roedelaf659932015-10-21 23:51:41 +02001826 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001827
Joerg Roedelaf659932015-10-21 23:51:41 +02001828 group = iommu_group_get_for_dev(dev);
1829 if (IS_ERR(group))
1830 return PTR_ERR(group);
1831
Peng Fan9a4a9d82015-11-20 16:56:18 +08001832 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001833 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001834}
1835
Will Deacon45ae7cf2013-06-24 18:31:25 +01001836static void arm_smmu_remove_device(struct device *dev)
1837{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001838 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001839}
1840
Joerg Roedelaf659932015-10-21 23:51:41 +02001841static struct iommu_group *arm_smmu_device_group(struct device *dev)
1842{
1843 struct iommu_group *group;
1844 int ret;
1845
1846 if (dev_is_pci(dev))
1847 group = pci_device_group(dev);
1848 else
1849 group = generic_device_group(dev);
1850
1851 if (IS_ERR(group))
1852 return group;
1853
1854 if (dev_is_pci(dev))
1855 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1856 else
1857 ret = arm_smmu_init_platform_device(dev, group);
1858
1859 if (ret) {
1860 iommu_group_put(group);
1861 group = ERR_PTR(ret);
1862 }
1863
1864 return group;
1865}
1866
Will Deaconc752ce42014-06-25 22:46:31 +01001867static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1868 enum iommu_attr attr, void *data)
1869{
Joerg Roedel1d672632015-03-26 13:43:10 +01001870 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06001871 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01001872
1873 switch (attr) {
1874 case DOMAIN_ATTR_NESTING:
1875 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1876 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08001877 case DOMAIN_ATTR_PT_BASE_ADDR:
1878 *((phys_addr_t *)data) =
1879 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
1880 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06001881 case DOMAIN_ATTR_CONTEXT_BANK:
1882 /* context bank index isn't valid until we are attached */
1883 if (smmu_domain->smmu == NULL)
1884 return -ENODEV;
1885
1886 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
1887 ret = 0;
1888 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06001889 case DOMAIN_ATTR_TTBR0: {
1890 u64 val;
1891 struct arm_smmu_device *smmu = smmu_domain->smmu;
1892 /* not valid until we are attached */
1893 if (smmu == NULL)
1894 return -ENODEV;
1895
1896 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
1897 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
1898 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
1899 << (TTBRn_ASID_SHIFT);
1900 *((u64 *)data) = val;
1901 ret = 0;
1902 break;
1903 }
1904 case DOMAIN_ATTR_CONTEXTIDR:
1905 /* not valid until attached */
1906 if (smmu_domain->smmu == NULL)
1907 return -ENODEV;
1908 *((u32 *)data) = smmu_domain->cfg.procid;
1909 ret = 0;
1910 break;
1911 case DOMAIN_ATTR_PROCID:
1912 *((u32 *)data) = smmu_domain->cfg.procid;
1913 ret = 0;
1914 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07001915 case DOMAIN_ATTR_DYNAMIC:
1916 *((int *)data) = !!(smmu_domain->attributes
1917 & (1 << DOMAIN_ATTR_DYNAMIC));
1918 ret = 0;
1919 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001920 case DOMAIN_ATTR_NON_FATAL_FAULTS:
1921 *((int *)data) = !!(smmu_domain->attributes
1922 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
1923 ret = 0;
1924 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07001925 case DOMAIN_ATTR_S1_BYPASS:
1926 *((int *)data) = !!(smmu_domain->attributes
1927 & (1 << DOMAIN_ATTR_S1_BYPASS));
1928 ret = 0;
1929 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001930 default:
1931 return -ENODEV;
1932 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06001933 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001934}
1935
1936static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1937 enum iommu_attr attr, void *data)
1938{
Will Deacon518f7132014-11-14 17:17:54 +00001939 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001940 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001941
Will Deacon518f7132014-11-14 17:17:54 +00001942 mutex_lock(&smmu_domain->init_mutex);
1943
Will Deaconc752ce42014-06-25 22:46:31 +01001944 switch (attr) {
1945 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001946 if (smmu_domain->smmu) {
1947 ret = -EPERM;
1948 goto out_unlock;
1949 }
1950
Will Deaconc752ce42014-06-25 22:46:31 +01001951 if (*(int *)data)
1952 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1953 else
1954 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1955
Will Deacon518f7132014-11-14 17:17:54 +00001956 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06001957 case DOMAIN_ATTR_PROCID:
1958 if (smmu_domain->smmu != NULL) {
1959 dev_err(smmu_domain->smmu->dev,
1960 "cannot change procid attribute while attached\n");
1961 ret = -EBUSY;
1962 break;
1963 }
1964 smmu_domain->cfg.procid = *((u32 *)data);
1965 ret = 0;
1966 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07001967 case DOMAIN_ATTR_DYNAMIC: {
1968 int dynamic = *((int *)data);
1969
1970 if (smmu_domain->smmu != NULL) {
1971 dev_err(smmu_domain->smmu->dev,
1972 "cannot change dynamic attribute while attached\n");
1973 ret = -EBUSY;
1974 break;
1975 }
1976
1977 if (dynamic)
1978 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
1979 else
1980 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
1981 ret = 0;
1982 break;
1983 }
1984 case DOMAIN_ATTR_CONTEXT_BANK:
1985 /* context bank can't be set while attached */
1986 if (smmu_domain->smmu != NULL) {
1987 ret = -EBUSY;
1988 break;
1989 }
1990 /* ... and it can only be set for dynamic contexts. */
1991 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
1992 ret = -EINVAL;
1993 break;
1994 }
1995
1996 /* this will be validated during attach */
1997 smmu_domain->cfg.cbndx = *((unsigned int *)data);
1998 ret = 0;
1999 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002000 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2001 u32 non_fatal_faults = *((int *)data);
2002
2003 if (non_fatal_faults)
2004 smmu_domain->attributes |=
2005 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2006 else
2007 smmu_domain->attributes &=
2008 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2009 ret = 0;
2010 break;
2011 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002012 case DOMAIN_ATTR_S1_BYPASS: {
2013 int bypass = *((int *)data);
2014
2015 /* bypass can't be changed while attached */
2016 if (smmu_domain->smmu != NULL) {
2017 ret = -EBUSY;
2018 break;
2019 }
2020 if (bypass)
2021 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2022 else
2023 smmu_domain->attributes &=
2024 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2025
2026 ret = 0;
2027 break;
2028 }
Will Deaconc752ce42014-06-25 22:46:31 +01002029 default:
Will Deacon518f7132014-11-14 17:17:54 +00002030 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002031 }
Will Deacon518f7132014-11-14 17:17:54 +00002032
2033out_unlock:
2034 mutex_unlock(&smmu_domain->init_mutex);
2035 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002036}
2037
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002038static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2039 unsigned long flags)
2040{
2041 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2042 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2043 struct arm_smmu_device *smmu;
2044 void __iomem *cb_base;
2045
2046 if (!smmu_domain->smmu) {
2047 pr_err("Can't trigger faults on non-attached domains\n");
2048 return;
2049 }
2050
2051 smmu = smmu_domain->smmu;
2052
2053 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2054 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2055 flags, cfg->cbndx);
2056 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002057 /* give the interrupt time to fire... */
2058 msleep(1000);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002059}
2060
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002061static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2062 unsigned long offset)
2063{
2064 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2065 struct arm_smmu_device *smmu;
2066 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2067 void __iomem *cb_base;
2068 unsigned long val;
2069
2070 if (offset >= SZ_4K) {
2071 pr_err("Invalid offset: 0x%lx\n", offset);
2072 return 0;
2073 }
2074
2075 smmu = smmu_domain->smmu;
2076 if (!smmu) {
2077 WARN(1, "Can't read registers of a detached domain\n");
2078 val = 0;
2079 return val;
2080 }
2081
2082 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2083 val = readl_relaxed(cb_base + offset);
2084
2085 return val;
2086}
2087
2088static void arm_smmu_reg_write(struct iommu_domain *domain,
2089 unsigned long offset, unsigned long val)
2090{
2091 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2092 struct arm_smmu_device *smmu;
2093 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2094 void __iomem *cb_base;
2095
2096 if (offset >= SZ_4K) {
2097 pr_err("Invalid offset: 0x%lx\n", offset);
2098 return;
2099 }
2100
2101 smmu = smmu_domain->smmu;
2102 if (!smmu) {
2103 WARN(1, "Can't read registers of a detached domain\n");
2104 return;
2105 }
2106
2107 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2108 writel_relaxed(val, cb_base + offset);
2109}
2110
Will Deacon518f7132014-11-14 17:17:54 +00002111static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002112 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002113 .domain_alloc = arm_smmu_domain_alloc,
2114 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002115 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002116 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002117 .map = arm_smmu_map,
2118 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002119 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002120 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002121 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002122 .add_device = arm_smmu_add_device,
2123 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002124 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002125 .domain_get_attr = arm_smmu_domain_get_attr,
2126 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00002127 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002128 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002129 .reg_read = arm_smmu_reg_read,
2130 .reg_write = arm_smmu_reg_write,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002131};
2132
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002133static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002134{
2135 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002136 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002137
2138 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2139 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2140 0, 30000)) {
2141 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2142 return -EBUSY;
2143 }
2144
2145 return 0;
2146}
2147
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002148static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
2149{
2150 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2151 u32 reg;
2152
2153 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2154 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2155 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2156
2157 return wait ? arm_smmu_wait_for_halt(smmu) : 0;
2158}
2159
2160static int arm_smmu_halt(struct arm_smmu_device *smmu)
2161{
2162 return __arm_smmu_halt(smmu, true);
2163}
2164
2165static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
2166{
2167 return __arm_smmu_halt(smmu, false);
2168}
2169
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002170static void arm_smmu_resume(struct arm_smmu_device *smmu)
2171{
2172 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2173 u32 reg;
2174
2175 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2176 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2177 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2178}
2179
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002180static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
2181{
2182 int i;
2183 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
2184
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002185 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002186 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2187 writel_relaxed(regs[i].value,
2188 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002189 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002190}
2191
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002192static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002193{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002194 int i;
2195 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002196 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002197 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002198
Peng Fan3ca37122016-05-03 21:50:30 +08002199 /*
2200 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
2201 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
2202 * bit is only present in MMU-500r2 onwards.
2203 */
2204 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
2205 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
2206 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
2207 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
2208 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
2209 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
2210 }
2211
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002212 /* Make sure all context banks are disabled and clear CB_FSR */
2213 for (i = 0; i < smmu->num_context_banks; ++i) {
2214 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2215 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
2216 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002217 /*
2218 * Disable MMU-500's not-particularly-beneficial next-page
2219 * prefetcher for the sake of errata #841119 and #826419.
2220 */
2221 if (smmu->model == ARM_MMU500) {
2222 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
2223 reg &= ~ARM_MMU500_ACTLR_CPRE;
2224 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2225 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002226
2227 if (smmu->model == QCOM_SMMUV2) {
2228 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2229 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2230 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2231 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2232 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002233 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002234}
2235
2236static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
2237{
2238 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
2239 int i = 0;
2240 u32 reg;
2241
2242 /* clear global FSR */
2243 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2244 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2245
2246 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
2247 /*
2248 * Mark all SMRn as invalid and all S2CRn as bypass unless
2249 * overridden
2250 */
2251 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
2252 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2253 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
2254 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
2255 }
2256
2257 arm_smmu_context_bank_reset(smmu);
2258 }
Will Deacon1463fe42013-07-31 19:21:27 +01002259
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002260 /* Program implementation defined registers */
2261 arm_smmu_impl_def_programming(smmu);
2262
Will Deacon45ae7cf2013-06-24 18:31:25 +01002263 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002264 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
2265 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
2266
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002267 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002268
Will Deacon45ae7cf2013-06-24 18:31:25 +01002269 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002270 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002271
2272 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002273 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002274
Robin Murphy25a1c962016-02-10 14:25:33 +00002275 /* Enable client access, handling unmatched streams as appropriate */
2276 reg &= ~sCR0_CLIENTPD;
2277 if (disable_bypass)
2278 reg |= sCR0_USFCFG;
2279 else
2280 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002281
2282 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002283 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002284
2285 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002286 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002287
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002288 if (smmu->features & ARM_SMMU_FEAT_VMID16)
2289 reg |= sCR0_VMID16EN;
2290
Will Deacon45ae7cf2013-06-24 18:31:25 +01002291 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00002292 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002293 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002294}
2295
2296static int arm_smmu_id_size_to_bits(int size)
2297{
2298 switch (size) {
2299 case 0:
2300 return 32;
2301 case 1:
2302 return 36;
2303 case 2:
2304 return 40;
2305 case 3:
2306 return 42;
2307 case 4:
2308 return 44;
2309 case 5:
2310 default:
2311 return 48;
2312 }
2313}
2314
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002315static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
2316{
2317 struct device *dev = smmu->dev;
2318 int i, ntuples, ret;
2319 u32 *tuples;
2320 struct arm_smmu_impl_def_reg *regs, *regit;
2321
2322 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
2323 return 0;
2324
2325 ntuples /= sizeof(u32);
2326 if (ntuples % 2) {
2327 dev_err(dev,
2328 "Invalid number of attach-impl-defs registers: %d\n",
2329 ntuples);
2330 return -EINVAL;
2331 }
2332
2333 regs = devm_kmalloc(
2334 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
2335 GFP_KERNEL);
2336 if (!regs)
2337 return -ENOMEM;
2338
2339 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
2340 if (!tuples)
2341 return -ENOMEM;
2342
2343 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
2344 tuples, ntuples);
2345 if (ret)
2346 return ret;
2347
2348 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
2349 regit->offset = tuples[i];
2350 regit->value = tuples[i + 1];
2351 }
2352
2353 devm_kfree(dev, tuples);
2354
2355 smmu->impl_def_attach_registers = regs;
2356 smmu->num_impl_def_attach_registers = ntuples / 2;
2357
2358 return 0;
2359}
2360
Will Deacon45ae7cf2013-06-24 18:31:25 +01002361static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
2362{
2363 unsigned long size;
2364 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
2365 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01002366 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002367
2368 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01002369 dev_notice(smmu->dev, "SMMUv%d with:\n",
2370 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002371
2372 /* ID0 */
2373 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01002374
2375 /* Restrict available stages based on module parameter */
2376 if (force_stage == 1)
2377 id &= ~(ID0_S2TS | ID0_NTS);
2378 else if (force_stage == 2)
2379 id &= ~(ID0_S1TS | ID0_NTS);
2380
Will Deacon45ae7cf2013-06-24 18:31:25 +01002381 if (id & ID0_S1TS) {
2382 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2383 dev_notice(smmu->dev, "\tstage 1 translation\n");
2384 }
2385
2386 if (id & ID0_S2TS) {
2387 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2388 dev_notice(smmu->dev, "\tstage 2 translation\n");
2389 }
2390
2391 if (id & ID0_NTS) {
2392 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
2393 dev_notice(smmu->dev, "\tnested translation\n");
2394 }
2395
2396 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01002397 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002398 dev_err(smmu->dev, "\tno translation support!\n");
2399 return -ENODEV;
2400 }
2401
Robin Murphyb7862e32016-04-13 18:13:03 +01002402 if ((id & ID0_S1TS) &&
2403 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002404 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
2405 dev_notice(smmu->dev, "\taddress translation ops\n");
2406 }
2407
Robin Murphybae2c2d2015-07-29 19:46:05 +01002408 /*
2409 * In order for DMA API calls to work properly, we must defer to what
2410 * the DT says about coherency, regardless of what the hardware claims.
2411 * Fortunately, this also opens up a workaround for systems where the
2412 * ID register value has ended up configured incorrectly.
2413 */
2414 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
2415 cttw_reg = !!(id & ID0_CTTW);
2416 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002417 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01002418 if (cttw_dt || cttw_reg)
2419 dev_notice(smmu->dev, "\t%scoherent table walk\n",
2420 cttw_dt ? "" : "non-");
2421 if (cttw_dt != cttw_reg)
2422 dev_notice(smmu->dev,
2423 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002424
2425 if (id & ID0_SMS) {
2426 u32 smr, sid, mask;
2427
2428 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
2429 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
2430 ID0_NUMSMRG_MASK;
2431 if (smmu->num_mapping_groups == 0) {
2432 dev_err(smmu->dev,
2433 "stream-matching supported, but no SMRs present!\n");
2434 return -ENODEV;
2435 }
2436
Dhaval Patel031d7462015-05-09 14:47:29 -07002437 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
2438 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
2439 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
2440 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
2441 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Will Deacon45ae7cf2013-06-24 18:31:25 +01002442
Dhaval Patel031d7462015-05-09 14:47:29 -07002443 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
2444 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
2445 if ((mask & sid) != sid) {
2446 dev_err(smmu->dev,
2447 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
2448 mask, sid);
2449 return -ENODEV;
2450 }
2451
2452 dev_notice(smmu->dev,
2453 "\tstream matching with %u register groups, mask 0x%x",
2454 smmu->num_mapping_groups, mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002455 }
Olav Haugan3c8766d2014-08-22 17:12:32 -07002456 } else {
2457 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
2458 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002459 }
2460
Robin Murphy7602b872016-04-28 17:12:09 +01002461 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
2462 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
2463 if (!(id & ID0_PTFS_NO_AARCH32S))
2464 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
2465 }
2466
Will Deacon45ae7cf2013-06-24 18:31:25 +01002467 /* ID1 */
2468 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01002469 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002470
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002471 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00002472 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01002473 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002474 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07002475 dev_warn(smmu->dev,
2476 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
2477 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002478
Will Deacon518f7132014-11-14 17:17:54 +00002479 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002480 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
2481 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
2482 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
2483 return -ENODEV;
2484 }
2485 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
2486 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01002487 /*
2488 * Cavium CN88xx erratum #27704.
2489 * Ensure ASID and VMID allocation is unique across all SMMUs in
2490 * the system.
2491 */
2492 if (smmu->model == CAVIUM_SMMUV2) {
2493 smmu->cavium_id_base =
2494 atomic_add_return(smmu->num_context_banks,
2495 &cavium_smmu_context_count);
2496 smmu->cavium_id_base -= smmu->num_context_banks;
2497 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002498
2499 /* ID2 */
2500 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
2501 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002502 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002503
Will Deacon518f7132014-11-14 17:17:54 +00002504 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002505 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002506 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002507
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002508 if (id & ID2_VMID16)
2509 smmu->features |= ARM_SMMU_FEAT_VMID16;
2510
Robin Murphyf1d84542015-03-04 16:41:05 +00002511 /*
2512 * What the page table walker can address actually depends on which
2513 * descriptor format is in use, but since a) we don't know that yet,
2514 * and b) it can vary per context bank, this will have to do...
2515 */
2516 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2517 dev_warn(smmu->dev,
2518 "failed to set DMA mask for table walker\n");
2519
Robin Murphyb7862e32016-04-13 18:13:03 +01002520 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00002521 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01002522 if (smmu->version == ARM_SMMU_V1_64K)
2523 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002524 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002525 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00002526 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002527 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002528 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002529 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002530 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002531 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002532 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002533 }
2534
Robin Murphy7602b872016-04-28 17:12:09 +01002535 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002536 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002537 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002538 if (smmu->features &
2539 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002540 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002541 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002542 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002543 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002544 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002545
Robin Murphyd5466352016-05-09 17:20:09 +01002546 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2547 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2548 else
2549 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2550 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2551 smmu->pgsize_bitmap);
2552
Will Deacon518f7132014-11-14 17:17:54 +00002553
Will Deacon28d60072014-09-01 16:24:48 +01002554 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2555 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002556 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002557
2558 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2559 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002560 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002561
Will Deacon45ae7cf2013-06-24 18:31:25 +01002562 return 0;
2563}
2564
Robin Murphy67b65a32016-04-13 18:12:57 +01002565struct arm_smmu_match_data {
2566 enum arm_smmu_arch_version version;
2567 enum arm_smmu_implementation model;
2568};
2569
2570#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2571static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2572
2573ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2574ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002575ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002576ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002577ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002578ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002579
Joerg Roedel09b52692014-10-02 12:24:45 +02002580static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002581 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2582 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2583 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002584 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002585 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002586 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002587 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002588 { },
2589};
2590MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2591
Will Deacon45ae7cf2013-06-24 18:31:25 +01002592static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2593{
Robin Murphy09360402014-08-28 17:51:59 +01002594 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01002595 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002596 struct resource *res;
2597 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002598 struct device *dev = &pdev->dev;
2599 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002600 struct of_phandle_iterator it;
2601 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002602 int num_irqs, i, err;
2603
2604 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2605 if (!smmu) {
2606 dev_err(dev, "failed to allocate arm_smmu_device\n");
2607 return -ENOMEM;
2608 }
2609 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002610 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07002611 idr_init(&smmu->asid_idr);
2612 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002613
Robin Murphy09360402014-08-28 17:51:59 +01002614 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01002615 data = of_id->data;
2616 smmu->version = data->version;
2617 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01002618
Will Deacon45ae7cf2013-06-24 18:31:25 +01002619 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01002620 smmu->base = devm_ioremap_resource(dev, res);
2621 if (IS_ERR(smmu->base))
2622 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002623 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002624
2625 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2626 &smmu->num_global_irqs)) {
2627 dev_err(dev, "missing #global-interrupts property\n");
2628 return -ENODEV;
2629 }
2630
2631 num_irqs = 0;
2632 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2633 num_irqs++;
2634 if (num_irqs > smmu->num_global_irqs)
2635 smmu->num_context_irqs++;
2636 }
2637
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002638 if (!smmu->num_context_irqs) {
2639 dev_err(dev, "found %d interrupts but expected at least %d\n",
2640 num_irqs, smmu->num_global_irqs + 1);
2641 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002642 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002643
2644 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2645 GFP_KERNEL);
2646 if (!smmu->irqs) {
2647 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2648 return -ENOMEM;
2649 }
2650
2651 for (i = 0; i < num_irqs; ++i) {
2652 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002653
Will Deacon45ae7cf2013-06-24 18:31:25 +01002654 if (irq < 0) {
2655 dev_err(dev, "failed to get irq index %d\n", i);
2656 return -ENODEV;
2657 }
2658 smmu->irqs[i] = irq;
2659 }
2660
Dhaval Patel031d7462015-05-09 14:47:29 -07002661 parse_driver_options(smmu);
2662
Olav Haugan3c8766d2014-08-22 17:12:32 -07002663 err = arm_smmu_device_cfg_probe(smmu);
2664 if (err)
2665 return err;
2666
Will Deacon45ae7cf2013-06-24 18:31:25 +01002667 i = 0;
2668 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002669
2670 err = -ENOMEM;
2671 /* No need to zero the memory for masterspec */
2672 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2673 if (!masterspec)
2674 goto out_put_masters;
2675
2676 of_for_each_phandle(&it, err, dev->of_node,
2677 "mmu-masters", "#stream-id-cells", 0) {
2678 int count = of_phandle_iterator_args(&it, masterspec->args,
2679 MAX_MASTER_STREAMIDS);
2680 masterspec->np = of_node_get(it.node);
2681 masterspec->args_count = count;
2682
2683 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002684 if (err) {
2685 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002686 masterspec->np->name);
2687 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002688 goto out_put_masters;
2689 }
2690
2691 i++;
2692 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002693
Will Deacon45ae7cf2013-06-24 18:31:25 +01002694 dev_notice(dev, "registered %d master devices\n", i);
2695
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002696 kfree(masterspec);
2697
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002698 err = arm_smmu_parse_impl_def_registers(smmu);
2699 if (err)
2700 goto out_put_masters;
2701
Robin Murphyb7862e32016-04-13 18:13:03 +01002702 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002703 smmu->num_context_banks != smmu->num_context_irqs) {
2704 dev_err(dev,
2705 "found only %d context interrupt(s) but %d required\n",
2706 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00002707 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01002708 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002709 }
2710
Will Deacon45ae7cf2013-06-24 18:31:25 +01002711 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08002712 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
2713 NULL, arm_smmu_global_fault,
2714 IRQF_ONESHOT | IRQF_SHARED,
2715 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002716 if (err) {
2717 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2718 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08002719 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002720 }
2721 }
2722
2723 INIT_LIST_HEAD(&smmu->list);
2724 spin_lock(&arm_smmu_devices_lock);
2725 list_add(&smmu->list, &arm_smmu_devices);
2726 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002727
2728 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002729 return 0;
2730
Will Deacon45ae7cf2013-06-24 18:31:25 +01002731out_put_masters:
2732 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002733 struct arm_smmu_master *master
2734 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002735 of_node_put(master->of_node);
2736 }
2737
2738 return err;
2739}
2740
2741static int arm_smmu_device_remove(struct platform_device *pdev)
2742{
2743 int i;
2744 struct device *dev = &pdev->dev;
2745 struct arm_smmu_device *curr, *smmu = NULL;
2746 struct rb_node *node;
2747
2748 spin_lock(&arm_smmu_devices_lock);
2749 list_for_each_entry(curr, &arm_smmu_devices, list) {
2750 if (curr->dev == dev) {
2751 smmu = curr;
2752 list_del(&smmu->list);
2753 break;
2754 }
2755 }
2756 spin_unlock(&arm_smmu_devices_lock);
2757
2758 if (!smmu)
2759 return -ENODEV;
2760
Will Deacon45ae7cf2013-06-24 18:31:25 +01002761 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002762 struct arm_smmu_master *master
2763 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002764 of_node_put(master->of_node);
2765 }
2766
Will Deaconecfadb62013-07-31 19:21:28 +01002767 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002768 dev_err(dev, "removing device with active domains!\n");
2769
2770 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08002771 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002772
Patrick Dalyc190d932016-08-30 17:23:28 -07002773 idr_destroy(&smmu->asid_idr);
2774
Will Deacon45ae7cf2013-06-24 18:31:25 +01002775 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002776 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002777 return 0;
2778}
2779
Will Deacon45ae7cf2013-06-24 18:31:25 +01002780static struct platform_driver arm_smmu_driver = {
2781 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002782 .name = "arm-smmu",
2783 .of_match_table = of_match_ptr(arm_smmu_of_match),
2784 },
2785 .probe = arm_smmu_device_dt_probe,
2786 .remove = arm_smmu_device_remove,
2787};
2788
2789static int __init arm_smmu_init(void)
2790{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002791 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002792 int ret;
2793
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002794 /*
2795 * Play nice with systems that don't have an ARM SMMU by checking that
2796 * an ARM SMMU exists in the system before proceeding with the driver
2797 * and IOMMU bus operation registration.
2798 */
2799 np = of_find_matching_node(NULL, arm_smmu_of_match);
2800 if (!np)
2801 return 0;
2802
2803 of_node_put(np);
2804
Will Deacon45ae7cf2013-06-24 18:31:25 +01002805 ret = platform_driver_register(&arm_smmu_driver);
2806 if (ret)
2807 return ret;
2808
2809 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002810 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002811 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2812
Will Deacond123cf82014-02-04 22:17:53 +00002813#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002814 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002815 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002816#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002817
Will Deacona9a1b0b2014-05-01 18:05:08 +01002818#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002819 if (!iommu_present(&pci_bus_type)) {
2820 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002821 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002822 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002823#endif
2824
Will Deacon45ae7cf2013-06-24 18:31:25 +01002825 return 0;
2826}
2827
2828static void __exit arm_smmu_exit(void)
2829{
2830 return platform_driver_unregister(&arm_smmu_driver);
2831}
2832
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002833subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002834module_exit(arm_smmu_exit);
2835
2836MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2837MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2838MODULE_LICENSE("GPL v2");