blob: ebb16d41b14f79b4e7ce052232b3fd60a514e56c [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020053#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
Robin Murphyd3461802016-01-26 18:06:34 +0000181#define S2CR_PRIVCFG_SHIFT 24
182#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
183
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
203#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
204#define CBA2R_RW64_32BIT (0 << 0)
205#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800206#define CBA2R_VMID_SHIFT 16
207#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100208
209/* Translation context bank */
210#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100211#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
213#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100214#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215#define ARM_SMMU_CB_RESUME 0x8
216#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100217#define ARM_SMMU_CB_TTBR0 0x20
218#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_TTBCR 0x30
220#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000221#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100222#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100224#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000226#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100227#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000228#define ARM_SMMU_CB_S1_TLBIVAL 0x620
229#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
230#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100231#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000232#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233
234#define SCTLR_S1_ASIDPNE (1 << 12)
235#define SCTLR_CFCFG (1 << 7)
236#define SCTLR_CFIE (1 << 6)
237#define SCTLR_CFRE (1 << 5)
238#define SCTLR_E (1 << 4)
239#define SCTLR_AFE (1 << 2)
240#define SCTLR_TRE (1 << 1)
241#define SCTLR_M (1 << 0)
242#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
243
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100244#define ARM_MMU500_ACTLR_CPRE (1 << 1)
245
Peng Fan3ca37122016-05-03 21:50:30 +0800246#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
247
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700248/* Definitions for implementation-defined registers */
249#define ACTLR_QCOM_OSH_SHIFT 28
250#define ACTLR_QCOM_OSH 1
251
252#define ACTLR_QCOM_ISH_SHIFT 29
253#define ACTLR_QCOM_ISH 1
254
255#define ACTLR_QCOM_NSH_SHIFT 30
256#define ACTLR_QCOM_NSH 1
257
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000258#define CB_PAR_F (1 << 0)
259
260#define ATSR_ACTIVE (1 << 0)
261
Will Deacon45ae7cf2013-06-24 18:31:25 +0100262#define RESUME_RETRY (0 << 0)
263#define RESUME_TERMINATE (1 << 0)
264
Will Deacon45ae7cf2013-06-24 18:31:25 +0100265#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100266#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100268#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100269
270#define FSR_MULTI (1 << 31)
271#define FSR_SS (1 << 30)
272#define FSR_UUT (1 << 8)
273#define FSR_ASF (1 << 7)
274#define FSR_TLBLKF (1 << 6)
275#define FSR_TLBMCF (1 << 5)
276#define FSR_EF (1 << 4)
277#define FSR_PF (1 << 3)
278#define FSR_AFF (1 << 2)
279#define FSR_TF (1 << 1)
280
Mitchel Humpherys29073202014-07-08 09:52:18 -0700281#define FSR_IGN (FSR_AFF | FSR_ASF | \
282 FSR_TLBMCF | FSR_TLBLKF)
283#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100284 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100285
286#define FSYNR0_WNR (1 << 4)
287
Will Deacon4cf740b2014-07-14 19:47:39 +0100288static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000289module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100290MODULE_PARM_DESC(force_stage,
291 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000292static bool disable_bypass;
293module_param(disable_bypass, bool, S_IRUGO);
294MODULE_PARM_DESC(disable_bypass,
295 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100296
Robin Murphy09360402014-08-28 17:51:59 +0100297enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100298 ARM_SMMU_V1,
299 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100300 ARM_SMMU_V2,
301};
302
Robin Murphy67b65a32016-04-13 18:12:57 +0100303enum arm_smmu_implementation {
304 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100305 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100306 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700307 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100308};
309
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700310struct arm_smmu_impl_def_reg {
311 u32 offset;
312 u32 value;
313};
314
Will Deacon45ae7cf2013-06-24 18:31:25 +0100315struct arm_smmu_smr {
316 u8 idx;
317 u16 mask;
318 u16 id;
319};
320
Will Deacona9a1b0b2014-05-01 18:05:08 +0100321struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100322 int num_streamids;
323 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100324 struct arm_smmu_smr *smrs;
325};
326
Will Deacona9a1b0b2014-05-01 18:05:08 +0100327struct arm_smmu_master {
328 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100329 struct rb_node node;
330 struct arm_smmu_master_cfg cfg;
331};
332
Will Deacon45ae7cf2013-06-24 18:31:25 +0100333struct arm_smmu_device {
334 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335
336 void __iomem *base;
337 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100338 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100339
340#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
341#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
342#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
343#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
344#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000345#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800346#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100347#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
348#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
349#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
350#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
351#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100352 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000353
354#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
355 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100356 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100357 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100358
359 u32 num_context_banks;
360 u32 num_s2_context_banks;
361 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
362 atomic_t irptndx;
363
364 u32 num_mapping_groups;
365 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
366
Will Deacon518f7132014-11-14 17:17:54 +0000367 unsigned long va_size;
368 unsigned long ipa_size;
369 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100370 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371
372 u32 num_global_irqs;
373 u32 num_context_irqs;
374 unsigned int *irqs;
375
Will Deacon45ae7cf2013-06-24 18:31:25 +0100376 struct list_head list;
377 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800378
379 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700380 /* Specific to QCOM */
381 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
382 unsigned int num_impl_def_attach_registers;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100383};
384
Robin Murphy7602b872016-04-28 17:12:09 +0100385enum arm_smmu_context_fmt {
386 ARM_SMMU_CTX_FMT_NONE,
387 ARM_SMMU_CTX_FMT_AARCH64,
388 ARM_SMMU_CTX_FMT_AARCH32_L,
389 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100390};
391
392struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100393 u8 cbndx;
394 u8 irptndx;
395 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100396 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100397};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100398#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100399
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800400#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
401#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100402
Will Deaconc752ce42014-06-25 22:46:31 +0100403enum arm_smmu_domain_stage {
404 ARM_SMMU_DOMAIN_S1 = 0,
405 ARM_SMMU_DOMAIN_S2,
406 ARM_SMMU_DOMAIN_NESTED,
407};
408
Will Deacon45ae7cf2013-06-24 18:31:25 +0100409struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100410 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000411 struct io_pgtable_ops *pgtbl_ops;
412 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100413 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100414 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000415 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100416 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100417};
418
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200419struct arm_smmu_phandle_args {
420 struct device_node *np;
421 int args_count;
422 uint32_t args[MAX_MASTER_STREAMIDS];
423};
424
Will Deacon45ae7cf2013-06-24 18:31:25 +0100425static DEFINE_SPINLOCK(arm_smmu_devices_lock);
426static LIST_HEAD(arm_smmu_devices);
427
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000428struct arm_smmu_option_prop {
429 u32 opt;
430 const char *prop;
431};
432
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800433static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
434
Mitchel Humpherys29073202014-07-08 09:52:18 -0700435static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000436 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
437 { 0, NULL},
438};
439
Joerg Roedel1d672632015-03-26 13:43:10 +0100440static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
441{
442 return container_of(dom, struct arm_smmu_domain, domain);
443}
444
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000445static void parse_driver_options(struct arm_smmu_device *smmu)
446{
447 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700448
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000449 do {
450 if (of_property_read_bool(smmu->dev->of_node,
451 arm_smmu_options[i].prop)) {
452 smmu->options |= arm_smmu_options[i].opt;
453 dev_notice(smmu->dev, "option %s\n",
454 arm_smmu_options[i].prop);
455 }
456 } while (arm_smmu_options[++i].opt);
457}
458
Will Deacon8f68f8e2014-07-15 11:27:08 +0100459static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100460{
461 if (dev_is_pci(dev)) {
462 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700463
Will Deacona9a1b0b2014-05-01 18:05:08 +0100464 while (!pci_is_root_bus(bus))
465 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100466 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100467 }
468
Will Deacon8f68f8e2014-07-15 11:27:08 +0100469 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100470}
471
Will Deacon45ae7cf2013-06-24 18:31:25 +0100472static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
473 struct device_node *dev_node)
474{
475 struct rb_node *node = smmu->masters.rb_node;
476
477 while (node) {
478 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700479
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480 master = container_of(node, struct arm_smmu_master, node);
481
482 if (dev_node < master->of_node)
483 node = node->rb_left;
484 else if (dev_node > master->of_node)
485 node = node->rb_right;
486 else
487 return master;
488 }
489
490 return NULL;
491}
492
Will Deacona9a1b0b2014-05-01 18:05:08 +0100493static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100494find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100495{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100496 struct arm_smmu_master_cfg *cfg = NULL;
497 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100498
Will Deacon8f68f8e2014-07-15 11:27:08 +0100499 if (group) {
500 cfg = iommu_group_get_iommudata(group);
501 iommu_group_put(group);
502 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100503
Will Deacon8f68f8e2014-07-15 11:27:08 +0100504 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100505}
506
Will Deacon45ae7cf2013-06-24 18:31:25 +0100507static int insert_smmu_master(struct arm_smmu_device *smmu,
508 struct arm_smmu_master *master)
509{
510 struct rb_node **new, *parent;
511
512 new = &smmu->masters.rb_node;
513 parent = NULL;
514 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700515 struct arm_smmu_master *this
516 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100517
518 parent = *new;
519 if (master->of_node < this->of_node)
520 new = &((*new)->rb_left);
521 else if (master->of_node > this->of_node)
522 new = &((*new)->rb_right);
523 else
524 return -EEXIST;
525 }
526
527 rb_link_node(&master->node, parent, new);
528 rb_insert_color(&master->node, &smmu->masters);
529 return 0;
530}
531
532static int register_smmu_master(struct arm_smmu_device *smmu,
533 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200534 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100535{
536 int i;
537 struct arm_smmu_master *master;
538
539 master = find_smmu_master(smmu, masterspec->np);
540 if (master) {
541 dev_err(dev,
542 "rejecting multiple registrations for master device %s\n",
543 masterspec->np->name);
544 return -EBUSY;
545 }
546
547 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
548 dev_err(dev,
549 "reached maximum number (%d) of stream IDs for master device %s\n",
550 MAX_MASTER_STREAMIDS, masterspec->np->name);
551 return -ENOSPC;
552 }
553
554 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
555 if (!master)
556 return -ENOMEM;
557
Will Deacona9a1b0b2014-05-01 18:05:08 +0100558 master->of_node = masterspec->np;
559 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100560
Olav Haugan3c8766d2014-08-22 17:12:32 -0700561 for (i = 0; i < master->cfg.num_streamids; ++i) {
562 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100563
Olav Haugan3c8766d2014-08-22 17:12:32 -0700564 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
565 (streamid >= smmu->num_mapping_groups)) {
566 dev_err(dev,
567 "stream ID for master device %s greater than maximum allowed (%d)\n",
568 masterspec->np->name, smmu->num_mapping_groups);
569 return -ERANGE;
570 }
571 master->cfg.streamids[i] = streamid;
572 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100573 return insert_smmu_master(smmu, master);
574}
575
Will Deacon44680ee2014-06-25 11:29:12 +0100576static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100577{
Will Deacon44680ee2014-06-25 11:29:12 +0100578 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100579 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100580 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100581
582 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100583 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100584 master = find_smmu_master(smmu, dev_node);
585 if (master)
586 break;
587 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100588 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100589
Will Deacona9a1b0b2014-05-01 18:05:08 +0100590 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100591}
592
593static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
594{
595 int idx;
596
597 do {
598 idx = find_next_zero_bit(map, end, start);
599 if (idx == end)
600 return -ENOSPC;
601 } while (test_and_set_bit(idx, map));
602
603 return idx;
604}
605
606static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
607{
608 clear_bit(idx, map);
609}
610
611/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000612static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100613{
614 int count = 0;
615 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
616
617 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
618 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
619 & sTLBGSTATUS_GSACTIVE) {
620 cpu_relax();
621 if (++count == TLB_LOOP_TIMEOUT) {
622 dev_err_ratelimited(smmu->dev,
623 "TLB sync timed out -- SMMU may be deadlocked\n");
624 return;
625 }
626 udelay(1);
627 }
628}
629
Will Deacon518f7132014-11-14 17:17:54 +0000630static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100631{
Will Deacon518f7132014-11-14 17:17:54 +0000632 struct arm_smmu_domain *smmu_domain = cookie;
633 __arm_smmu_tlb_sync(smmu_domain->smmu);
634}
635
636static void arm_smmu_tlb_inv_context(void *cookie)
637{
638 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100639 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
640 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100641 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000642 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100643
644 if (stage1) {
645 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800646 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100647 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100648 } else {
649 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800650 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100651 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100652 }
653
Will Deacon518f7132014-11-14 17:17:54 +0000654 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100655}
656
Will Deacon518f7132014-11-14 17:17:54 +0000657static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000658 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000659{
660 struct arm_smmu_domain *smmu_domain = cookie;
661 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
662 struct arm_smmu_device *smmu = smmu_domain->smmu;
663 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
664 void __iomem *reg;
665
666 if (stage1) {
667 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
668 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
669
Robin Murphy7602b872016-04-28 17:12:09 +0100670 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000671 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800672 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000673 do {
674 writel_relaxed(iova, reg);
675 iova += granule;
676 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000677 } else {
678 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800679 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000680 do {
681 writeq_relaxed(iova, reg);
682 iova += granule >> 12;
683 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000684 }
Will Deacon518f7132014-11-14 17:17:54 +0000685 } else if (smmu->version == ARM_SMMU_V2) {
686 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
687 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
688 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000689 iova >>= 12;
690 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100691 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000692 iova += granule >> 12;
693 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000694 } else {
695 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800696 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000697 }
698}
699
Will Deacon518f7132014-11-14 17:17:54 +0000700static struct iommu_gather_ops arm_smmu_gather_ops = {
701 .tlb_flush_all = arm_smmu_tlb_inv_context,
702 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
703 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000704};
705
Will Deacon45ae7cf2013-06-24 18:31:25 +0100706static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
707{
Will Deacon3714ce12016-08-05 19:49:45 +0100708 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100709 unsigned long iova;
710 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100711 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100712 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
713 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100714 void __iomem *cb_base;
715
Will Deacon44680ee2014-06-25 11:29:12 +0100716 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100717 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
718
719 if (!(fsr & FSR_FAULT))
720 return IRQ_NONE;
721
Will Deacon45ae7cf2013-06-24 18:31:25 +0100722 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100723 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100724
Will Deacon3714ce12016-08-05 19:49:45 +0100725 dev_err_ratelimited(smmu->dev,
726 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
727 fsr, iova, fsynr, cfg->cbndx);
728
Will Deacon45ae7cf2013-06-24 18:31:25 +0100729 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce12016-08-05 19:49:45 +0100730 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100731}
732
733static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
734{
735 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
736 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000737 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100738
739 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
740 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
741 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
742 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
743
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000744 if (!gfsr)
745 return IRQ_NONE;
746
Will Deacon45ae7cf2013-06-24 18:31:25 +0100747 dev_err_ratelimited(smmu->dev,
748 "Unexpected global fault, this could be serious\n");
749 dev_err_ratelimited(smmu->dev,
750 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
751 gfsr, gfsynr0, gfsynr1, gfsynr2);
752
753 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100754 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100755}
756
Will Deacon518f7132014-11-14 17:17:54 +0000757static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
758 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100759{
760 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100761 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100762 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100763 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
764 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100765 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100766
Will Deacon45ae7cf2013-06-24 18:31:25 +0100767 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100768 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
769 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100770
Will Deacon4a1c93c2015-03-04 12:21:03 +0000771 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100772 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
773 reg = CBA2R_RW64_64BIT;
774 else
775 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800776 /* 16-bit VMIDs live in CBA2R */
777 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800778 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800779
Will Deacon4a1c93c2015-03-04 12:21:03 +0000780 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
781 }
782
Will Deacon45ae7cf2013-06-24 18:31:25 +0100783 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100784 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100785 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700786 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100787
Will Deacon57ca90f2014-02-06 14:59:05 +0000788 /*
789 * Use the weakest shareability/memory types, so they are
790 * overridden by the ttbcr/pte.
791 */
792 if (stage1) {
793 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
794 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800795 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
796 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800797 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000798 }
Will Deacon44680ee2014-06-25 11:29:12 +0100799 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100800
Will Deacon518f7132014-11-14 17:17:54 +0000801 /* TTBRs */
802 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100803 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100804
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800805 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100806 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100807
808 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800809 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100810 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000811 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100812 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100813 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000814 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100815
Will Deacon518f7132014-11-14 17:17:54 +0000816 /* TTBCR */
817 if (stage1) {
818 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
819 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
820 if (smmu->version > ARM_SMMU_V1) {
821 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100822 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000823 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100824 }
825 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000826 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
827 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100828 }
829
Will Deacon518f7132014-11-14 17:17:54 +0000830 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100831 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000832 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100833 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +0000834 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
835 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100836 }
837
Will Deacon45ae7cf2013-06-24 18:31:25 +0100838 /* SCTLR */
Will Deacon3714ce12016-08-05 19:49:45 +0100839 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100840 if (stage1)
841 reg |= SCTLR_S1_ASIDPNE;
842#ifdef __BIG_ENDIAN
843 reg |= SCTLR_E;
844#endif
Will Deacon25724842013-08-21 13:49:53 +0100845 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100846}
847
848static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100849 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100850{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100851 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000852 unsigned long ias, oas;
853 struct io_pgtable_ops *pgtbl_ops;
854 struct io_pgtable_cfg pgtbl_cfg;
855 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100856 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100857 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100858
Will Deacon518f7132014-11-14 17:17:54 +0000859 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100860 if (smmu_domain->smmu)
861 goto out_unlock;
862
Robin Murphy98006992016-04-20 14:53:33 +0100863 /* We're bypassing these SIDs, so don't allocate an actual context */
864 if (domain->type == IOMMU_DOMAIN_DMA) {
865 smmu_domain->smmu = smmu;
866 goto out_unlock;
867 }
868
Will Deaconc752ce42014-06-25 22:46:31 +0100869 /*
870 * Mapping the requested stage onto what we support is surprisingly
871 * complicated, mainly because the spec allows S1+S2 SMMUs without
872 * support for nested translation. That means we end up with the
873 * following table:
874 *
875 * Requested Supported Actual
876 * S1 N S1
877 * S1 S1+S2 S1
878 * S1 S2 S2
879 * S1 S1 S1
880 * N N N
881 * N S1+S2 S2
882 * N S2 S2
883 * N S1 S1
884 *
885 * Note that you can't actually request stage-2 mappings.
886 */
887 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
888 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
889 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
890 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
891
Robin Murphy7602b872016-04-28 17:12:09 +0100892 /*
893 * Choosing a suitable context format is even more fiddly. Until we
894 * grow some way for the caller to express a preference, and/or move
895 * the decision into the io-pgtable code where it arguably belongs,
896 * just aim for the closest thing to the rest of the system, and hope
897 * that the hardware isn't esoteric enough that we can't assume AArch64
898 * support to be a superset of AArch32 support...
899 */
900 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
901 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
902 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
903 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
904 ARM_SMMU_FEAT_FMT_AARCH64_16K |
905 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
906 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
907
908 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
909 ret = -EINVAL;
910 goto out_unlock;
911 }
912
Will Deaconc752ce42014-06-25 22:46:31 +0100913 switch (smmu_domain->stage) {
914 case ARM_SMMU_DOMAIN_S1:
915 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
916 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000917 ias = smmu->va_size;
918 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100919 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000920 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100921 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000922 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100923 ias = min(ias, 32UL);
924 oas = min(oas, 40UL);
925 }
Will Deaconc752ce42014-06-25 22:46:31 +0100926 break;
927 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100928 /*
929 * We will likely want to change this if/when KVM gets
930 * involved.
931 */
Will Deaconc752ce42014-06-25 22:46:31 +0100932 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100933 cfg->cbar = CBAR_TYPE_S2_TRANS;
934 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000935 ias = smmu->ipa_size;
936 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100937 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000938 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100939 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000940 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100941 ias = min(ias, 40UL);
942 oas = min(oas, 40UL);
943 }
Will Deaconc752ce42014-06-25 22:46:31 +0100944 break;
945 default:
946 ret = -EINVAL;
947 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100948 }
949
950 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
951 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200952 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100953 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100954
Will Deacon44680ee2014-06-25 11:29:12 +0100955 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100956 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100957 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
958 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100959 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100960 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100961 }
962
Will Deacon518f7132014-11-14 17:17:54 +0000963 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100964 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000965 .ias = ias,
966 .oas = oas,
967 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100968 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000969 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100970
Will Deacon518f7132014-11-14 17:17:54 +0000971 smmu_domain->smmu = smmu;
972 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
973 if (!pgtbl_ops) {
974 ret = -ENOMEM;
975 goto out_clear_smmu;
976 }
977
Robin Murphyd5466352016-05-09 17:20:09 +0100978 /* Update the domain's page sizes to reflect the page table format */
979 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +0000980
981 /* Initialise the context bank with our page table cfg */
982 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
983
984 /*
985 * Request context fault interrupt. Do this last to avoid the
986 * handler seeing a half-initialised domain state.
987 */
Will Deacon44680ee2014-06-25 11:29:12 +0100988 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800989 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
990 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200991 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100992 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100993 cfg->irptndx, irq);
994 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100995 }
996
Will Deacon518f7132014-11-14 17:17:54 +0000997 mutex_unlock(&smmu_domain->init_mutex);
998
999 /* Publish page table ops for map/unmap */
1000 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001001 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001002
Will Deacon518f7132014-11-14 17:17:54 +00001003out_clear_smmu:
1004 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001005out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001006 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001007 return ret;
1008}
1009
1010static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1011{
Joerg Roedel1d672632015-03-26 13:43:10 +01001012 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001013 struct arm_smmu_device *smmu = smmu_domain->smmu;
1014 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001015 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001016 int irq;
1017
Robin Murphy98006992016-04-20 14:53:33 +01001018 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001019 return;
1020
Will Deacon518f7132014-11-14 17:17:54 +00001021 /*
1022 * Disable the context bank and free the page tables before freeing
1023 * it.
1024 */
Will Deacon44680ee2014-06-25 11:29:12 +01001025 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001026 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001027
Will Deacon44680ee2014-06-25 11:29:12 +01001028 if (cfg->irptndx != INVALID_IRPTNDX) {
1029 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001030 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001031 }
1032
Markus Elfring44830b02015-11-06 18:32:41 +01001033 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001034 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001035}
1036
Joerg Roedel1d672632015-03-26 13:43:10 +01001037static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001038{
1039 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001040
Robin Murphy9adb9592016-01-26 18:06:36 +00001041 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001042 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001043 /*
1044 * Allocate the domain and initialise some of its data structures.
1045 * We can't really do anything meaningful until we've added a
1046 * master.
1047 */
1048 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1049 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001050 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001051
Robin Murphy9adb9592016-01-26 18:06:36 +00001052 if (type == IOMMU_DOMAIN_DMA &&
1053 iommu_get_dma_cookie(&smmu_domain->domain)) {
1054 kfree(smmu_domain);
1055 return NULL;
1056 }
1057
Will Deacon518f7132014-11-14 17:17:54 +00001058 mutex_init(&smmu_domain->init_mutex);
1059 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001060
1061 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001062}
1063
Joerg Roedel1d672632015-03-26 13:43:10 +01001064static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001065{
Joerg Roedel1d672632015-03-26 13:43:10 +01001066 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001067
1068 /*
1069 * Free the domain resources. We assume that all devices have
1070 * already been detached.
1071 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001072 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001073 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001074 kfree(smmu_domain);
1075}
1076
1077static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001078 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001079{
1080 int i;
1081 struct arm_smmu_smr *smrs;
1082 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1083
1084 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1085 return 0;
1086
Will Deacona9a1b0b2014-05-01 18:05:08 +01001087 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001088 return -EEXIST;
1089
Mitchel Humpherys29073202014-07-08 09:52:18 -07001090 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001091 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001092 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1093 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001094 return -ENOMEM;
1095 }
1096
Will Deacon44680ee2014-06-25 11:29:12 +01001097 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001098 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001099 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1100 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001101 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001102 dev_err(smmu->dev, "failed to allocate free SMR\n");
1103 goto err_free_smrs;
1104 }
1105
1106 smrs[i] = (struct arm_smmu_smr) {
1107 .idx = idx,
1108 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001109 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001110 };
1111 }
1112
1113 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001114 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001115 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1116 smrs[i].mask << SMR_MASK_SHIFT;
1117 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1118 }
1119
Will Deacona9a1b0b2014-05-01 18:05:08 +01001120 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001121 return 0;
1122
1123err_free_smrs:
1124 while (--i >= 0)
1125 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1126 kfree(smrs);
1127 return -ENOSPC;
1128}
1129
1130static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001131 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001132{
1133 int i;
1134 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001135 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001136
Will Deacon43b412b2014-07-15 11:22:24 +01001137 if (!smrs)
1138 return;
1139
Will Deacon45ae7cf2013-06-24 18:31:25 +01001140 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001141 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001142 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001143
Will Deacon45ae7cf2013-06-24 18:31:25 +01001144 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1145 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1146 }
1147
Will Deacona9a1b0b2014-05-01 18:05:08 +01001148 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001149 kfree(smrs);
1150}
1151
Will Deacon45ae7cf2013-06-24 18:31:25 +01001152static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001153 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001154{
1155 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001156 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001157 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1158
Will Deacon5f634952016-04-20 14:53:32 +01001159 /*
1160 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1161 * for all devices behind the SMMU. Note that we need to take
1162 * care configuring SMRs for devices both a platform_device and
1163 * and a PCI device (i.e. a PCI host controller)
1164 */
1165 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1166 return 0;
1167
Will Deacon8f68f8e2014-07-15 11:27:08 +01001168 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001169 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001170 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001171 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001172
Will Deacona9a1b0b2014-05-01 18:05:08 +01001173 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001174 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001175
Will Deacona9a1b0b2014-05-01 18:05:08 +01001176 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001177 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001178 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001179 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1180 }
1181
1182 return 0;
1183}
1184
1185static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001186 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001187{
Will Deacon43b412b2014-07-15 11:22:24 +01001188 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001189 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001190 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001191
Will Deacon8f68f8e2014-07-15 11:27:08 +01001192 /* An IOMMU group is torn down by the first device to be removed */
1193 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1194 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001195
1196 /*
1197 * We *must* clear the S2CR first, because freeing the SMR means
1198 * that it can be re-allocated immediately.
1199 */
Will Deacon43b412b2014-07-15 11:22:24 +01001200 for (i = 0; i < cfg->num_streamids; ++i) {
1201 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001202 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001203
Robin Murphy25a1c962016-02-10 14:25:33 +00001204 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001205 }
1206
Will Deacona9a1b0b2014-05-01 18:05:08 +01001207 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001208}
1209
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001210static void arm_smmu_detach_dev(struct device *dev,
1211 struct arm_smmu_master_cfg *cfg)
1212{
1213 struct iommu_domain *domain = dev->archdata.iommu;
1214 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1215
1216 dev->archdata.iommu = NULL;
1217 arm_smmu_domain_remove_master(smmu_domain, cfg);
1218}
1219
Will Deacon45ae7cf2013-06-24 18:31:25 +01001220static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1221{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001222 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001223 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001224 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001225 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001226
Will Deacon8f68f8e2014-07-15 11:27:08 +01001227 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001228 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001229 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1230 return -ENXIO;
1231 }
1232
Will Deacon518f7132014-11-14 17:17:54 +00001233 /* Ensure that the domain is finalised */
1234 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001235 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001236 return ret;
1237
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001239 * Sanity check the domain. We don't support domains across
1240 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001241 */
Will Deacon518f7132014-11-14 17:17:54 +00001242 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001243 dev_err(dev,
1244 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001245 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1246 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001247 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001248
1249 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001250 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001251 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001252 return -ENODEV;
1253
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001254 /* Detach the dev from its current domain */
1255 if (dev->archdata.iommu)
1256 arm_smmu_detach_dev(dev, cfg);
1257
Will Deacon844e35b2014-07-17 11:23:51 +01001258 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1259 if (!ret)
1260 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001261 return ret;
1262}
1263
Will Deacon45ae7cf2013-06-24 18:31:25 +01001264static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001265 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001266{
Will Deacon518f7132014-11-14 17:17:54 +00001267 int ret;
1268 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001269 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001270 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001271
Will Deacon518f7132014-11-14 17:17:54 +00001272 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001273 return -ENODEV;
1274
Will Deacon518f7132014-11-14 17:17:54 +00001275 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1276 ret = ops->map(ops, iova, paddr, size, prot);
1277 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1278 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001279}
1280
1281static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1282 size_t size)
1283{
Will Deacon518f7132014-11-14 17:17:54 +00001284 size_t ret;
1285 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001286 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001287 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001288
Will Deacon518f7132014-11-14 17:17:54 +00001289 if (!ops)
1290 return 0;
1291
1292 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1293 ret = ops->unmap(ops, iova, size);
1294 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1295 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001296}
1297
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001298static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1299 dma_addr_t iova)
1300{
Joerg Roedel1d672632015-03-26 13:43:10 +01001301 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001302 struct arm_smmu_device *smmu = smmu_domain->smmu;
1303 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1304 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1305 struct device *dev = smmu->dev;
1306 void __iomem *cb_base;
1307 u32 tmp;
1308 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001309 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001310
1311 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1312
Robin Murphy661d9622015-05-27 17:09:34 +01001313 /* ATS1 registers can only be written atomically */
1314 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001315 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001316 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1317 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001318 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001319
1320 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1321 !(tmp & ATSR_ACTIVE), 5, 50)) {
1322 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001323 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001324 &iova);
1325 return ops->iova_to_phys(ops, iova);
1326 }
1327
Robin Murphyf9a05f02016-04-13 18:13:01 +01001328 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001329 if (phys & CB_PAR_F) {
1330 dev_err(dev, "translation fault!\n");
1331 dev_err(dev, "PAR = 0x%llx\n", phys);
1332 return 0;
1333 }
1334
1335 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1336}
1337
Will Deacon45ae7cf2013-06-24 18:31:25 +01001338static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001339 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001340{
Will Deacon518f7132014-11-14 17:17:54 +00001341 phys_addr_t ret;
1342 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001343 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001344 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001345
Will Deacon518f7132014-11-14 17:17:54 +00001346 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001347 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001348
Will Deacon518f7132014-11-14 17:17:54 +00001349 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001350 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1351 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001352 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001353 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001354 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001355 }
1356
Will Deacon518f7132014-11-14 17:17:54 +00001357 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001358
Will Deacon518f7132014-11-14 17:17:54 +00001359 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001360}
1361
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001362static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001363{
Will Deacond0948942014-06-24 17:30:10 +01001364 switch (cap) {
1365 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001366 /*
1367 * Return true here as the SMMU can always send out coherent
1368 * requests.
1369 */
1370 return true;
Will Deacond0948942014-06-24 17:30:10 +01001371 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001372 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001373 case IOMMU_CAP_NOEXEC:
1374 return true;
Will Deacond0948942014-06-24 17:30:10 +01001375 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001376 return false;
Will Deacond0948942014-06-24 17:30:10 +01001377 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001378}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001379
Will Deacona9a1b0b2014-05-01 18:05:08 +01001380static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1381{
1382 *((u16 *)data) = alias;
1383 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001384}
1385
Will Deacon8f68f8e2014-07-15 11:27:08 +01001386static void __arm_smmu_release_pci_iommudata(void *data)
1387{
1388 kfree(data);
1389}
1390
Joerg Roedelaf659932015-10-21 23:51:41 +02001391static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1392 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001393{
Will Deacon03edb222015-01-19 14:27:33 +00001394 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001395 u16 sid;
1396 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001397
Will Deacon03edb222015-01-19 14:27:33 +00001398 cfg = iommu_group_get_iommudata(group);
1399 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001400 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001401 if (!cfg)
1402 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001403
Will Deacon03edb222015-01-19 14:27:33 +00001404 iommu_group_set_iommudata(group, cfg,
1405 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001406 }
1407
Joerg Roedelaf659932015-10-21 23:51:41 +02001408 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1409 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001410
Will Deacon03edb222015-01-19 14:27:33 +00001411 /*
1412 * Assume Stream ID == Requester ID for now.
1413 * We need a way to describe the ID mappings in FDT.
1414 */
1415 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1416 for (i = 0; i < cfg->num_streamids; ++i)
1417 if (cfg->streamids[i] == sid)
1418 break;
1419
1420 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1421 if (i == cfg->num_streamids)
1422 cfg->streamids[cfg->num_streamids++] = sid;
1423
1424 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001425}
1426
Joerg Roedelaf659932015-10-21 23:51:41 +02001427static int arm_smmu_init_platform_device(struct device *dev,
1428 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001429{
Will Deacon03edb222015-01-19 14:27:33 +00001430 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001431 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001432
1433 if (!smmu)
1434 return -ENODEV;
1435
1436 master = find_smmu_master(smmu, dev->of_node);
1437 if (!master)
1438 return -ENODEV;
1439
Will Deacon03edb222015-01-19 14:27:33 +00001440 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001441
1442 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001443}
1444
1445static int arm_smmu_add_device(struct device *dev)
1446{
Joerg Roedelaf659932015-10-21 23:51:41 +02001447 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001448
Joerg Roedelaf659932015-10-21 23:51:41 +02001449 group = iommu_group_get_for_dev(dev);
1450 if (IS_ERR(group))
1451 return PTR_ERR(group);
1452
Peng Fan9a4a9d82015-11-20 16:56:18 +08001453 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001454 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001455}
1456
Will Deacon45ae7cf2013-06-24 18:31:25 +01001457static void arm_smmu_remove_device(struct device *dev)
1458{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001459 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001460}
1461
Joerg Roedelaf659932015-10-21 23:51:41 +02001462static struct iommu_group *arm_smmu_device_group(struct device *dev)
1463{
1464 struct iommu_group *group;
1465 int ret;
1466
1467 if (dev_is_pci(dev))
1468 group = pci_device_group(dev);
1469 else
1470 group = generic_device_group(dev);
1471
1472 if (IS_ERR(group))
1473 return group;
1474
1475 if (dev_is_pci(dev))
1476 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1477 else
1478 ret = arm_smmu_init_platform_device(dev, group);
1479
1480 if (ret) {
1481 iommu_group_put(group);
1482 group = ERR_PTR(ret);
1483 }
1484
1485 return group;
1486}
1487
Will Deaconc752ce42014-06-25 22:46:31 +01001488static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1489 enum iommu_attr attr, void *data)
1490{
Joerg Roedel1d672632015-03-26 13:43:10 +01001491 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001492
1493 switch (attr) {
1494 case DOMAIN_ATTR_NESTING:
1495 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1496 return 0;
1497 default:
1498 return -ENODEV;
1499 }
1500}
1501
1502static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1503 enum iommu_attr attr, void *data)
1504{
Will Deacon518f7132014-11-14 17:17:54 +00001505 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001506 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001507
Will Deacon518f7132014-11-14 17:17:54 +00001508 mutex_lock(&smmu_domain->init_mutex);
1509
Will Deaconc752ce42014-06-25 22:46:31 +01001510 switch (attr) {
1511 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001512 if (smmu_domain->smmu) {
1513 ret = -EPERM;
1514 goto out_unlock;
1515 }
1516
Will Deaconc752ce42014-06-25 22:46:31 +01001517 if (*(int *)data)
1518 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1519 else
1520 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1521
Will Deacon518f7132014-11-14 17:17:54 +00001522 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001523 default:
Will Deacon518f7132014-11-14 17:17:54 +00001524 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001525 }
Will Deacon518f7132014-11-14 17:17:54 +00001526
1527out_unlock:
1528 mutex_unlock(&smmu_domain->init_mutex);
1529 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001530}
1531
Will Deacon518f7132014-11-14 17:17:54 +00001532static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001533 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001534 .domain_alloc = arm_smmu_domain_alloc,
1535 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001536 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001537 .map = arm_smmu_map,
1538 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001539 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001540 .iova_to_phys = arm_smmu_iova_to_phys,
1541 .add_device = arm_smmu_add_device,
1542 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001543 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001544 .domain_get_attr = arm_smmu_domain_get_attr,
1545 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001546 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001547};
1548
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001549static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
1550{
1551 int i;
1552 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
1553
1554 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
1555 writel_relaxed(regs[i].value,
1556 ARM_SMMU_GR0(smmu) + regs[i].offset);
1557}
1558
Will Deacon45ae7cf2013-06-24 18:31:25 +01001559static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1560{
1561 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001562 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001563 int i = 0;
Peng Fan3ca37122016-05-03 21:50:30 +08001564 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001565
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001566 /* clear global FSR */
1567 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1568 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001569
Robin Murphy25a1c962016-02-10 14:25:33 +00001570 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1571 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001572 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001573 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001574 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001575 }
1576
Peng Fan3ca37122016-05-03 21:50:30 +08001577 /*
1578 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1579 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1580 * bit is only present in MMU-500r2 onwards.
1581 */
1582 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1583 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1584 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1585 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1586 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1587 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1588 }
1589
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001590 /* Make sure all context banks are disabled and clear CB_FSR */
1591 for (i = 0; i < smmu->num_context_banks; ++i) {
1592 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1593 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1594 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001595 /*
1596 * Disable MMU-500's not-particularly-beneficial next-page
1597 * prefetcher for the sake of errata #841119 and #826419.
1598 */
1599 if (smmu->model == ARM_MMU500) {
1600 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1601 reg &= ~ARM_MMU500_ACTLR_CPRE;
1602 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1603 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001604
1605 if (smmu->model == QCOM_SMMUV2) {
1606 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
1607 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
1608 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
1609 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1610 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001611 }
Will Deacon1463fe42013-07-31 19:21:27 +01001612
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001613 /* Program implementation defined registers */
1614 arm_smmu_impl_def_programming(smmu);
1615
Will Deacon45ae7cf2013-06-24 18:31:25 +01001616 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001617 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1618 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1619
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001620 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001621
Will Deacon45ae7cf2013-06-24 18:31:25 +01001622 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001623 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001624
1625 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001626 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001627
Robin Murphy25a1c962016-02-10 14:25:33 +00001628 /* Enable client access, handling unmatched streams as appropriate */
1629 reg &= ~sCR0_CLIENTPD;
1630 if (disable_bypass)
1631 reg |= sCR0_USFCFG;
1632 else
1633 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001634
1635 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001636 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001637
1638 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001639 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001640
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001641 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1642 reg |= sCR0_VMID16EN;
1643
Will Deacon45ae7cf2013-06-24 18:31:25 +01001644 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001645 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001646 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001647}
1648
1649static int arm_smmu_id_size_to_bits(int size)
1650{
1651 switch (size) {
1652 case 0:
1653 return 32;
1654 case 1:
1655 return 36;
1656 case 2:
1657 return 40;
1658 case 3:
1659 return 42;
1660 case 4:
1661 return 44;
1662 case 5:
1663 default:
1664 return 48;
1665 }
1666}
1667
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07001668static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
1669{
1670 struct device *dev = smmu->dev;
1671 int i, ntuples, ret;
1672 u32 *tuples;
1673 struct arm_smmu_impl_def_reg *regs, *regit;
1674
1675 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
1676 return 0;
1677
1678 ntuples /= sizeof(u32);
1679 if (ntuples % 2) {
1680 dev_err(dev,
1681 "Invalid number of attach-impl-defs registers: %d\n",
1682 ntuples);
1683 return -EINVAL;
1684 }
1685
1686 regs = devm_kmalloc(
1687 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
1688 GFP_KERNEL);
1689 if (!regs)
1690 return -ENOMEM;
1691
1692 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
1693 if (!tuples)
1694 return -ENOMEM;
1695
1696 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
1697 tuples, ntuples);
1698 if (ret)
1699 return ret;
1700
1701 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
1702 regit->offset = tuples[i];
1703 regit->value = tuples[i + 1];
1704 }
1705
1706 devm_kfree(dev, tuples);
1707
1708 smmu->impl_def_attach_registers = regs;
1709 smmu->num_impl_def_attach_registers = ntuples / 2;
1710
1711 return 0;
1712}
1713
Will Deacon45ae7cf2013-06-24 18:31:25 +01001714static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1715{
1716 unsigned long size;
1717 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1718 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001719 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001720
1721 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001722 dev_notice(smmu->dev, "SMMUv%d with:\n",
1723 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001724
1725 /* ID0 */
1726 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001727
1728 /* Restrict available stages based on module parameter */
1729 if (force_stage == 1)
1730 id &= ~(ID0_S2TS | ID0_NTS);
1731 else if (force_stage == 2)
1732 id &= ~(ID0_S1TS | ID0_NTS);
1733
Will Deacon45ae7cf2013-06-24 18:31:25 +01001734 if (id & ID0_S1TS) {
1735 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1736 dev_notice(smmu->dev, "\tstage 1 translation\n");
1737 }
1738
1739 if (id & ID0_S2TS) {
1740 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1741 dev_notice(smmu->dev, "\tstage 2 translation\n");
1742 }
1743
1744 if (id & ID0_NTS) {
1745 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1746 dev_notice(smmu->dev, "\tnested translation\n");
1747 }
1748
1749 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001750 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001751 dev_err(smmu->dev, "\tno translation support!\n");
1752 return -ENODEV;
1753 }
1754
Robin Murphyb7862e32016-04-13 18:13:03 +01001755 if ((id & ID0_S1TS) &&
1756 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001757 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1758 dev_notice(smmu->dev, "\taddress translation ops\n");
1759 }
1760
Robin Murphybae2c2d2015-07-29 19:46:05 +01001761 /*
1762 * In order for DMA API calls to work properly, we must defer to what
1763 * the DT says about coherency, regardless of what the hardware claims.
1764 * Fortunately, this also opens up a workaround for systems where the
1765 * ID register value has ended up configured incorrectly.
1766 */
1767 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1768 cttw_reg = !!(id & ID0_CTTW);
1769 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001770 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001771 if (cttw_dt || cttw_reg)
1772 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1773 cttw_dt ? "" : "non-");
1774 if (cttw_dt != cttw_reg)
1775 dev_notice(smmu->dev,
1776 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001777
1778 if (id & ID0_SMS) {
1779 u32 smr, sid, mask;
1780
1781 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1782 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1783 ID0_NUMSMRG_MASK;
1784 if (smmu->num_mapping_groups == 0) {
1785 dev_err(smmu->dev,
1786 "stream-matching supported, but no SMRs present!\n");
1787 return -ENODEV;
1788 }
1789
1790 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1791 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1792 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1793 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1794
1795 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1796 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1797 if ((mask & sid) != sid) {
1798 dev_err(smmu->dev,
1799 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1800 mask, sid);
1801 return -ENODEV;
1802 }
1803
1804 dev_notice(smmu->dev,
1805 "\tstream matching with %u register groups, mask 0x%x",
1806 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07001807 } else {
1808 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1809 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001810 }
1811
Robin Murphy7602b872016-04-28 17:12:09 +01001812 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1813 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1814 if (!(id & ID0_PTFS_NO_AARCH32S))
1815 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1816 }
1817
Will Deacon45ae7cf2013-06-24 18:31:25 +01001818 /* ID1 */
1819 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001820 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001821
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001822 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001823 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001824 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001825 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001826 dev_warn(smmu->dev,
1827 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1828 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001829
Will Deacon518f7132014-11-14 17:17:54 +00001830 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001831 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1832 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1833 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1834 return -ENODEV;
1835 }
1836 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1837 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001838 /*
1839 * Cavium CN88xx erratum #27704.
1840 * Ensure ASID and VMID allocation is unique across all SMMUs in
1841 * the system.
1842 */
1843 if (smmu->model == CAVIUM_SMMUV2) {
1844 smmu->cavium_id_base =
1845 atomic_add_return(smmu->num_context_banks,
1846 &cavium_smmu_context_count);
1847 smmu->cavium_id_base -= smmu->num_context_banks;
1848 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001849
1850 /* ID2 */
1851 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1852 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001853 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001854
Will Deacon518f7132014-11-14 17:17:54 +00001855 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001856 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001857 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001858
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001859 if (id & ID2_VMID16)
1860 smmu->features |= ARM_SMMU_FEAT_VMID16;
1861
Robin Murphyf1d84542015-03-04 16:41:05 +00001862 /*
1863 * What the page table walker can address actually depends on which
1864 * descriptor format is in use, but since a) we don't know that yet,
1865 * and b) it can vary per context bank, this will have to do...
1866 */
1867 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1868 dev_warn(smmu->dev,
1869 "failed to set DMA mask for table walker\n");
1870
Robin Murphyb7862e32016-04-13 18:13:03 +01001871 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001872 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001873 if (smmu->version == ARM_SMMU_V1_64K)
1874 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001875 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001876 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001877 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001878 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001879 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001880 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001881 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001882 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001883 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001884 }
1885
Robin Murphy7602b872016-04-28 17:12:09 +01001886 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001887 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001888 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001889 if (smmu->features &
1890 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001891 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001892 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001893 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001894 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001895 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001896
Robin Murphyd5466352016-05-09 17:20:09 +01001897 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1898 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1899 else
1900 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1901 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1902 smmu->pgsize_bitmap);
1903
Will Deacon518f7132014-11-14 17:17:54 +00001904
Will Deacon28d60072014-09-01 16:24:48 +01001905 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1906 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001907 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001908
1909 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1910 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001911 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001912
Will Deacon45ae7cf2013-06-24 18:31:25 +01001913 return 0;
1914}
1915
Robin Murphy67b65a32016-04-13 18:12:57 +01001916struct arm_smmu_match_data {
1917 enum arm_smmu_arch_version version;
1918 enum arm_smmu_implementation model;
1919};
1920
1921#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1922static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1923
1924ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1925ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001926ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001927ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001928ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001929ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001930
Joerg Roedel09b52692014-10-02 12:24:45 +02001931static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001932 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1933 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1934 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001935 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001936 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001937 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07001938 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001939 { },
1940};
1941MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1942
Will Deacon45ae7cf2013-06-24 18:31:25 +01001943static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1944{
Robin Murphy09360402014-08-28 17:51:59 +01001945 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01001946 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001947 struct resource *res;
1948 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001949 struct device *dev = &pdev->dev;
1950 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001951 struct of_phandle_iterator it;
1952 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001953 int num_irqs, i, err;
1954
1955 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1956 if (!smmu) {
1957 dev_err(dev, "failed to allocate arm_smmu_device\n");
1958 return -ENOMEM;
1959 }
1960 smmu->dev = dev;
1961
Robin Murphy09360402014-08-28 17:51:59 +01001962 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01001963 data = of_id->data;
1964 smmu->version = data->version;
1965 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001966
Will Deacon45ae7cf2013-06-24 18:31:25 +01001967 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001968 smmu->base = devm_ioremap_resource(dev, res);
1969 if (IS_ERR(smmu->base))
1970 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001971 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972
1973 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1974 &smmu->num_global_irqs)) {
1975 dev_err(dev, "missing #global-interrupts property\n");
1976 return -ENODEV;
1977 }
1978
1979 num_irqs = 0;
1980 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1981 num_irqs++;
1982 if (num_irqs > smmu->num_global_irqs)
1983 smmu->num_context_irqs++;
1984 }
1985
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001986 if (!smmu->num_context_irqs) {
1987 dev_err(dev, "found %d interrupts but expected at least %d\n",
1988 num_irqs, smmu->num_global_irqs + 1);
1989 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001990 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001991
1992 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1993 GFP_KERNEL);
1994 if (!smmu->irqs) {
1995 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1996 return -ENOMEM;
1997 }
1998
1999 for (i = 0; i < num_irqs; ++i) {
2000 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002001
Will Deacon45ae7cf2013-06-24 18:31:25 +01002002 if (irq < 0) {
2003 dev_err(dev, "failed to get irq index %d\n", i);
2004 return -ENODEV;
2005 }
2006 smmu->irqs[i] = irq;
2007 }
2008
Olav Haugan3c8766d2014-08-22 17:12:32 -07002009 err = arm_smmu_device_cfg_probe(smmu);
2010 if (err)
2011 return err;
2012
Will Deacon45ae7cf2013-06-24 18:31:25 +01002013 i = 0;
2014 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002015
2016 err = -ENOMEM;
2017 /* No need to zero the memory for masterspec */
2018 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2019 if (!masterspec)
2020 goto out_put_masters;
2021
2022 of_for_each_phandle(&it, err, dev->of_node,
2023 "mmu-masters", "#stream-id-cells", 0) {
2024 int count = of_phandle_iterator_args(&it, masterspec->args,
2025 MAX_MASTER_STREAMIDS);
2026 masterspec->np = of_node_get(it.node);
2027 masterspec->args_count = count;
2028
2029 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002030 if (err) {
2031 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002032 masterspec->np->name);
2033 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002034 goto out_put_masters;
2035 }
2036
2037 i++;
2038 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002039
Will Deacon45ae7cf2013-06-24 18:31:25 +01002040 dev_notice(dev, "registered %d master devices\n", i);
2041
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002042 kfree(masterspec);
2043
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002044 err = arm_smmu_parse_impl_def_registers(smmu);
2045 if (err)
2046 goto out_put_masters;
2047
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002048 parse_driver_options(smmu);
2049
Robin Murphyb7862e32016-04-13 18:13:03 +01002050 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002051 smmu->num_context_banks != smmu->num_context_irqs) {
2052 dev_err(dev,
2053 "found only %d context interrupt(s) but %d required\n",
2054 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00002055 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01002056 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002057 }
2058
Will Deacon45ae7cf2013-06-24 18:31:25 +01002059 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002060 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2061 arm_smmu_global_fault,
2062 IRQF_SHARED,
2063 "arm-smmu global fault",
2064 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002065 if (err) {
2066 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2067 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08002068 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002069 }
2070 }
2071
2072 INIT_LIST_HEAD(&smmu->list);
2073 spin_lock(&arm_smmu_devices_lock);
2074 list_add(&smmu->list, &arm_smmu_devices);
2075 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002076
2077 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002078 return 0;
2079
Will Deacon45ae7cf2013-06-24 18:31:25 +01002080out_put_masters:
2081 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002082 struct arm_smmu_master *master
2083 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002084 of_node_put(master->of_node);
2085 }
2086
2087 return err;
2088}
2089
2090static int arm_smmu_device_remove(struct platform_device *pdev)
2091{
2092 int i;
2093 struct device *dev = &pdev->dev;
2094 struct arm_smmu_device *curr, *smmu = NULL;
2095 struct rb_node *node;
2096
2097 spin_lock(&arm_smmu_devices_lock);
2098 list_for_each_entry(curr, &arm_smmu_devices, list) {
2099 if (curr->dev == dev) {
2100 smmu = curr;
2101 list_del(&smmu->list);
2102 break;
2103 }
2104 }
2105 spin_unlock(&arm_smmu_devices_lock);
2106
2107 if (!smmu)
2108 return -ENODEV;
2109
Will Deacon45ae7cf2013-06-24 18:31:25 +01002110 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002111 struct arm_smmu_master *master
2112 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002113 of_node_put(master->of_node);
2114 }
2115
Will Deaconecfadb62013-07-31 19:21:28 +01002116 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002117 dev_err(dev, "removing device with active domains!\n");
2118
2119 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08002120 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002121
2122 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002123 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002124 return 0;
2125}
2126
Will Deacon45ae7cf2013-06-24 18:31:25 +01002127static struct platform_driver arm_smmu_driver = {
2128 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002129 .name = "arm-smmu",
2130 .of_match_table = of_match_ptr(arm_smmu_of_match),
2131 },
2132 .probe = arm_smmu_device_dt_probe,
2133 .remove = arm_smmu_device_remove,
2134};
2135
2136static int __init arm_smmu_init(void)
2137{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002138 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002139 int ret;
2140
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002141 /*
2142 * Play nice with systems that don't have an ARM SMMU by checking that
2143 * an ARM SMMU exists in the system before proceeding with the driver
2144 * and IOMMU bus operation registration.
2145 */
2146 np = of_find_matching_node(NULL, arm_smmu_of_match);
2147 if (!np)
2148 return 0;
2149
2150 of_node_put(np);
2151
Will Deacon45ae7cf2013-06-24 18:31:25 +01002152 ret = platform_driver_register(&arm_smmu_driver);
2153 if (ret)
2154 return ret;
2155
2156 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002157 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002158 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2159
Will Deacond123cf82014-02-04 22:17:53 +00002160#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002161 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002162 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002163#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002164
Will Deacona9a1b0b2014-05-01 18:05:08 +01002165#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002166 if (!iommu_present(&pci_bus_type)) {
2167 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002168 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002169 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002170#endif
2171
Will Deacon45ae7cf2013-06-24 18:31:25 +01002172 return 0;
2173}
2174
2175static void __exit arm_smmu_exit(void)
2176{
2177 return platform_driver_unregister(&arm_smmu_driver);
2178}
2179
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002180subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002181module_exit(arm_smmu_exit);
2182
2183MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2184MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2185MODULE_LICENSE("GPL v2");