blob: 4f49fe29f2029ad0f1fdc4f7015ed0fa13e7c2bf [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020053#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
Robin Murphyd3461802016-01-26 18:06:34 +0000181#define S2CR_PRIVCFG_SHIFT 24
182#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
183
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
203#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
204#define CBA2R_RW64_32BIT (0 << 0)
205#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800206#define CBA2R_VMID_SHIFT 16
207#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100208
209/* Translation context bank */
210#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100211#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
213#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100214#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215#define ARM_SMMU_CB_RESUME 0x8
216#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100217#define ARM_SMMU_CB_TTBR0 0x20
218#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_TTBCR 0x30
220#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000221#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100222#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100224#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000226#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100227#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000228#define ARM_SMMU_CB_S1_TLBIVAL 0x620
229#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
230#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100231#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000232#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233
234#define SCTLR_S1_ASIDPNE (1 << 12)
235#define SCTLR_CFCFG (1 << 7)
236#define SCTLR_CFIE (1 << 6)
237#define SCTLR_CFRE (1 << 5)
238#define SCTLR_E (1 << 4)
239#define SCTLR_AFE (1 << 2)
240#define SCTLR_TRE (1 << 1)
241#define SCTLR_M (1 << 0)
242#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
243
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100244#define ARM_MMU500_ACTLR_CPRE (1 << 1)
245
Peng Fan3ca37122016-05-03 21:50:30 +0800246#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
247
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000248#define CB_PAR_F (1 << 0)
249
250#define ATSR_ACTIVE (1 << 0)
251
Will Deacon45ae7cf2013-06-24 18:31:25 +0100252#define RESUME_RETRY (0 << 0)
253#define RESUME_TERMINATE (1 << 0)
254
Will Deacon45ae7cf2013-06-24 18:31:25 +0100255#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100256#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100258#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100259
260#define FSR_MULTI (1 << 31)
261#define FSR_SS (1 << 30)
262#define FSR_UUT (1 << 8)
263#define FSR_ASF (1 << 7)
264#define FSR_TLBLKF (1 << 6)
265#define FSR_TLBMCF (1 << 5)
266#define FSR_EF (1 << 4)
267#define FSR_PF (1 << 3)
268#define FSR_AFF (1 << 2)
269#define FSR_TF (1 << 1)
270
Mitchel Humpherys29073202014-07-08 09:52:18 -0700271#define FSR_IGN (FSR_AFF | FSR_ASF | \
272 FSR_TLBMCF | FSR_TLBLKF)
273#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100274 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275
276#define FSYNR0_WNR (1 << 4)
277
Will Deacon4cf740b2014-07-14 19:47:39 +0100278static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000279module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100280MODULE_PARM_DESC(force_stage,
281 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000282static bool disable_bypass;
283module_param(disable_bypass, bool, S_IRUGO);
284MODULE_PARM_DESC(disable_bypass,
285 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100286
Robin Murphy09360402014-08-28 17:51:59 +0100287enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100288 ARM_SMMU_V1,
289 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100290 ARM_SMMU_V2,
291};
292
Robin Murphy67b65a32016-04-13 18:12:57 +0100293enum arm_smmu_implementation {
294 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100295 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100296 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100297};
298
Will Deacon45ae7cf2013-06-24 18:31:25 +0100299struct arm_smmu_smr {
300 u8 idx;
301 u16 mask;
302 u16 id;
303};
304
Will Deacona9a1b0b2014-05-01 18:05:08 +0100305struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100306 int num_streamids;
307 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100308 struct arm_smmu_smr *smrs;
309};
310
Will Deacona9a1b0b2014-05-01 18:05:08 +0100311struct arm_smmu_master {
312 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100313 struct rb_node node;
314 struct arm_smmu_master_cfg cfg;
315};
316
Will Deacon45ae7cf2013-06-24 18:31:25 +0100317struct arm_smmu_device {
318 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100319
320 void __iomem *base;
321 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100322 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100323
324#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
325#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
326#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
327#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
328#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000329#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800330#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100331#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
332#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
333#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
334#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
335#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100336 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000337
338#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
339 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100340 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100341 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100342
343 u32 num_context_banks;
344 u32 num_s2_context_banks;
345 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
346 atomic_t irptndx;
347
348 u32 num_mapping_groups;
349 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
350
Will Deacon518f7132014-11-14 17:17:54 +0000351 unsigned long va_size;
352 unsigned long ipa_size;
353 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100354 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355
356 u32 num_global_irqs;
357 u32 num_context_irqs;
358 unsigned int *irqs;
359
Will Deacon45ae7cf2013-06-24 18:31:25 +0100360 struct list_head list;
361 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800362
363 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100364};
365
Robin Murphy7602b872016-04-28 17:12:09 +0100366enum arm_smmu_context_fmt {
367 ARM_SMMU_CTX_FMT_NONE,
368 ARM_SMMU_CTX_FMT_AARCH64,
369 ARM_SMMU_CTX_FMT_AARCH32_L,
370 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371};
372
373struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100374 u8 cbndx;
375 u8 irptndx;
376 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100377 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100378};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100379#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100380
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800381#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
382#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100383
Will Deaconc752ce42014-06-25 22:46:31 +0100384enum arm_smmu_domain_stage {
385 ARM_SMMU_DOMAIN_S1 = 0,
386 ARM_SMMU_DOMAIN_S2,
387 ARM_SMMU_DOMAIN_NESTED,
388};
389
Will Deacon45ae7cf2013-06-24 18:31:25 +0100390struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100391 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000392 struct io_pgtable_ops *pgtbl_ops;
393 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100394 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100395 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000396 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100397 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100398};
399
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200400struct arm_smmu_phandle_args {
401 struct device_node *np;
402 int args_count;
403 uint32_t args[MAX_MASTER_STREAMIDS];
404};
405
Will Deacon45ae7cf2013-06-24 18:31:25 +0100406static DEFINE_SPINLOCK(arm_smmu_devices_lock);
407static LIST_HEAD(arm_smmu_devices);
408
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000409struct arm_smmu_option_prop {
410 u32 opt;
411 const char *prop;
412};
413
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800414static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
415
Mitchel Humpherys29073202014-07-08 09:52:18 -0700416static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000417 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
418 { 0, NULL},
419};
420
Joerg Roedel1d672632015-03-26 13:43:10 +0100421static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
422{
423 return container_of(dom, struct arm_smmu_domain, domain);
424}
425
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000426static void parse_driver_options(struct arm_smmu_device *smmu)
427{
428 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700429
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000430 do {
431 if (of_property_read_bool(smmu->dev->of_node,
432 arm_smmu_options[i].prop)) {
433 smmu->options |= arm_smmu_options[i].opt;
434 dev_notice(smmu->dev, "option %s\n",
435 arm_smmu_options[i].prop);
436 }
437 } while (arm_smmu_options[++i].opt);
438}
439
Will Deacon8f68f8e2014-07-15 11:27:08 +0100440static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100441{
442 if (dev_is_pci(dev)) {
443 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700444
Will Deacona9a1b0b2014-05-01 18:05:08 +0100445 while (!pci_is_root_bus(bus))
446 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100447 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100448 }
449
Will Deacon8f68f8e2014-07-15 11:27:08 +0100450 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100451}
452
Will Deacon45ae7cf2013-06-24 18:31:25 +0100453static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
454 struct device_node *dev_node)
455{
456 struct rb_node *node = smmu->masters.rb_node;
457
458 while (node) {
459 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700460
Will Deacon45ae7cf2013-06-24 18:31:25 +0100461 master = container_of(node, struct arm_smmu_master, node);
462
463 if (dev_node < master->of_node)
464 node = node->rb_left;
465 else if (dev_node > master->of_node)
466 node = node->rb_right;
467 else
468 return master;
469 }
470
471 return NULL;
472}
473
Will Deacona9a1b0b2014-05-01 18:05:08 +0100474static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100475find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100476{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100477 struct arm_smmu_master_cfg *cfg = NULL;
478 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100479
Will Deacon8f68f8e2014-07-15 11:27:08 +0100480 if (group) {
481 cfg = iommu_group_get_iommudata(group);
482 iommu_group_put(group);
483 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100484
Will Deacon8f68f8e2014-07-15 11:27:08 +0100485 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100486}
487
Will Deacon45ae7cf2013-06-24 18:31:25 +0100488static int insert_smmu_master(struct arm_smmu_device *smmu,
489 struct arm_smmu_master *master)
490{
491 struct rb_node **new, *parent;
492
493 new = &smmu->masters.rb_node;
494 parent = NULL;
495 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700496 struct arm_smmu_master *this
497 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100498
499 parent = *new;
500 if (master->of_node < this->of_node)
501 new = &((*new)->rb_left);
502 else if (master->of_node > this->of_node)
503 new = &((*new)->rb_right);
504 else
505 return -EEXIST;
506 }
507
508 rb_link_node(&master->node, parent, new);
509 rb_insert_color(&master->node, &smmu->masters);
510 return 0;
511}
512
513static int register_smmu_master(struct arm_smmu_device *smmu,
514 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200515 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100516{
517 int i;
518 struct arm_smmu_master *master;
519
520 master = find_smmu_master(smmu, masterspec->np);
521 if (master) {
522 dev_err(dev,
523 "rejecting multiple registrations for master device %s\n",
524 masterspec->np->name);
525 return -EBUSY;
526 }
527
528 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
529 dev_err(dev,
530 "reached maximum number (%d) of stream IDs for master device %s\n",
531 MAX_MASTER_STREAMIDS, masterspec->np->name);
532 return -ENOSPC;
533 }
534
535 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
536 if (!master)
537 return -ENOMEM;
538
Will Deacona9a1b0b2014-05-01 18:05:08 +0100539 master->of_node = masterspec->np;
540 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100541
Olav Haugan3c8766d2014-08-22 17:12:32 -0700542 for (i = 0; i < master->cfg.num_streamids; ++i) {
543 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100544
Olav Haugan3c8766d2014-08-22 17:12:32 -0700545 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
546 (streamid >= smmu->num_mapping_groups)) {
547 dev_err(dev,
548 "stream ID for master device %s greater than maximum allowed (%d)\n",
549 masterspec->np->name, smmu->num_mapping_groups);
550 return -ERANGE;
551 }
552 master->cfg.streamids[i] = streamid;
553 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100554 return insert_smmu_master(smmu, master);
555}
556
Will Deacon44680ee2014-06-25 11:29:12 +0100557static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100558{
Will Deacon44680ee2014-06-25 11:29:12 +0100559 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100560 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100561 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100562
563 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100564 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100565 master = find_smmu_master(smmu, dev_node);
566 if (master)
567 break;
568 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100569 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100570
Will Deacona9a1b0b2014-05-01 18:05:08 +0100571 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100572}
573
574static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
575{
576 int idx;
577
578 do {
579 idx = find_next_zero_bit(map, end, start);
580 if (idx == end)
581 return -ENOSPC;
582 } while (test_and_set_bit(idx, map));
583
584 return idx;
585}
586
587static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
588{
589 clear_bit(idx, map);
590}
591
592/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000593static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100594{
595 int count = 0;
596 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
597
598 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
599 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
600 & sTLBGSTATUS_GSACTIVE) {
601 cpu_relax();
602 if (++count == TLB_LOOP_TIMEOUT) {
603 dev_err_ratelimited(smmu->dev,
604 "TLB sync timed out -- SMMU may be deadlocked\n");
605 return;
606 }
607 udelay(1);
608 }
609}
610
Will Deacon518f7132014-11-14 17:17:54 +0000611static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100612{
Will Deacon518f7132014-11-14 17:17:54 +0000613 struct arm_smmu_domain *smmu_domain = cookie;
614 __arm_smmu_tlb_sync(smmu_domain->smmu);
615}
616
617static void arm_smmu_tlb_inv_context(void *cookie)
618{
619 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100620 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
621 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100622 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000623 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100624
625 if (stage1) {
626 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800627 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100628 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100629 } else {
630 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800631 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100632 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100633 }
634
Will Deacon518f7132014-11-14 17:17:54 +0000635 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100636}
637
Will Deacon518f7132014-11-14 17:17:54 +0000638static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000639 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000640{
641 struct arm_smmu_domain *smmu_domain = cookie;
642 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
643 struct arm_smmu_device *smmu = smmu_domain->smmu;
644 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
645 void __iomem *reg;
646
647 if (stage1) {
648 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
649 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
650
Robin Murphy7602b872016-04-28 17:12:09 +0100651 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000652 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800653 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000654 do {
655 writel_relaxed(iova, reg);
656 iova += granule;
657 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000658 } else {
659 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800660 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000661 do {
662 writeq_relaxed(iova, reg);
663 iova += granule >> 12;
664 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000665 }
Will Deacon518f7132014-11-14 17:17:54 +0000666 } else if (smmu->version == ARM_SMMU_V2) {
667 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
668 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
669 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000670 iova >>= 12;
671 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100672 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000673 iova += granule >> 12;
674 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000675 } else {
676 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800677 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000678 }
679}
680
Will Deacon518f7132014-11-14 17:17:54 +0000681static struct iommu_gather_ops arm_smmu_gather_ops = {
682 .tlb_flush_all = arm_smmu_tlb_inv_context,
683 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
684 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000685};
686
Will Deacon45ae7cf2013-06-24 18:31:25 +0100687static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
688{
689 int flags, ret;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100690 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100691 unsigned long iova;
692 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100693 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100694 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
695 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100696 void __iomem *cb_base;
697
Will Deacon44680ee2014-06-25 11:29:12 +0100698 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100699 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
700
701 if (!(fsr & FSR_FAULT))
702 return IRQ_NONE;
703
704 if (fsr & FSR_IGN)
705 dev_err_ratelimited(smmu->dev,
Hans Wennborg70c9a7d2014-08-06 05:42:01 +0100706 "Unexpected context fault (fsr 0x%x)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +0100707 fsr);
708
709 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
710 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
711
Robin Murphyf9a05f02016-04-13 18:13:01 +0100712 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100713 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
714 ret = IRQ_HANDLED;
715 resume = RESUME_RETRY;
716 } else {
Andreas Herrmann2ef0f032013-10-01 13:39:08 +0100717 dev_err_ratelimited(smmu->dev,
718 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100719 iova, fsynr, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100720 ret = IRQ_NONE;
721 resume = RESUME_TERMINATE;
722 }
723
724 /* Clear the faulting FSR */
725 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
726
727 /* Retry or terminate any stalled transactions */
728 if (fsr & FSR_SS)
729 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
730
731 return ret;
732}
733
734static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
735{
736 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
737 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000738 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100739
740 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
741 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
742 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
743 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
744
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000745 if (!gfsr)
746 return IRQ_NONE;
747
Will Deacon45ae7cf2013-06-24 18:31:25 +0100748 dev_err_ratelimited(smmu->dev,
749 "Unexpected global fault, this could be serious\n");
750 dev_err_ratelimited(smmu->dev,
751 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
752 gfsr, gfsynr0, gfsynr1, gfsynr2);
753
754 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100755 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100756}
757
Will Deacon518f7132014-11-14 17:17:54 +0000758static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
759 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100760{
761 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100762 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100763 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100764 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
765 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100766 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100767
Will Deacon45ae7cf2013-06-24 18:31:25 +0100768 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100769 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
770 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100771
Will Deacon4a1c93c2015-03-04 12:21:03 +0000772 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100773 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
774 reg = CBA2R_RW64_64BIT;
775 else
776 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800777 /* 16-bit VMIDs live in CBA2R */
778 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800779 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800780
Will Deacon4a1c93c2015-03-04 12:21:03 +0000781 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
782 }
783
Will Deacon45ae7cf2013-06-24 18:31:25 +0100784 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100785 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100786 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700787 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100788
Will Deacon57ca90f2014-02-06 14:59:05 +0000789 /*
790 * Use the weakest shareability/memory types, so they are
791 * overridden by the ttbcr/pte.
792 */
793 if (stage1) {
794 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
795 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800796 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
797 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800798 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000799 }
Will Deacon44680ee2014-06-25 11:29:12 +0100800 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100801
Will Deacon518f7132014-11-14 17:17:54 +0000802 /* TTBRs */
803 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100804 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100805
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800806 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100807 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100808
809 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800810 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100811 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000812 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100813 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100814 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000815 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100816
Will Deacon518f7132014-11-14 17:17:54 +0000817 /* TTBCR */
818 if (stage1) {
819 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
820 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
821 if (smmu->version > ARM_SMMU_V1) {
822 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100823 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000824 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100825 }
826 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000827 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
828 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100829 }
830
Will Deacon518f7132014-11-14 17:17:54 +0000831 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100832 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000833 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100834 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +0000835 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
836 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100837 }
838
Will Deacon45ae7cf2013-06-24 18:31:25 +0100839 /* SCTLR */
840 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
841 if (stage1)
842 reg |= SCTLR_S1_ASIDPNE;
843#ifdef __BIG_ENDIAN
844 reg |= SCTLR_E;
845#endif
Will Deacon25724842013-08-21 13:49:53 +0100846 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100847}
848
849static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100850 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100851{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100852 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000853 unsigned long ias, oas;
854 struct io_pgtable_ops *pgtbl_ops;
855 struct io_pgtable_cfg pgtbl_cfg;
856 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100857 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100858 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100859
Will Deacon518f7132014-11-14 17:17:54 +0000860 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100861 if (smmu_domain->smmu)
862 goto out_unlock;
863
Robin Murphy98006992016-04-20 14:53:33 +0100864 /* We're bypassing these SIDs, so don't allocate an actual context */
865 if (domain->type == IOMMU_DOMAIN_DMA) {
866 smmu_domain->smmu = smmu;
867 goto out_unlock;
868 }
869
Will Deaconc752ce42014-06-25 22:46:31 +0100870 /*
871 * Mapping the requested stage onto what we support is surprisingly
872 * complicated, mainly because the spec allows S1+S2 SMMUs without
873 * support for nested translation. That means we end up with the
874 * following table:
875 *
876 * Requested Supported Actual
877 * S1 N S1
878 * S1 S1+S2 S1
879 * S1 S2 S2
880 * S1 S1 S1
881 * N N N
882 * N S1+S2 S2
883 * N S2 S2
884 * N S1 S1
885 *
886 * Note that you can't actually request stage-2 mappings.
887 */
888 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
889 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
890 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
891 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
892
Robin Murphy7602b872016-04-28 17:12:09 +0100893 /*
894 * Choosing a suitable context format is even more fiddly. Until we
895 * grow some way for the caller to express a preference, and/or move
896 * the decision into the io-pgtable code where it arguably belongs,
897 * just aim for the closest thing to the rest of the system, and hope
898 * that the hardware isn't esoteric enough that we can't assume AArch64
899 * support to be a superset of AArch32 support...
900 */
901 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
902 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
903 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
904 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
905 ARM_SMMU_FEAT_FMT_AARCH64_16K |
906 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
907 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
908
909 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
910 ret = -EINVAL;
911 goto out_unlock;
912 }
913
Will Deaconc752ce42014-06-25 22:46:31 +0100914 switch (smmu_domain->stage) {
915 case ARM_SMMU_DOMAIN_S1:
916 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
917 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000918 ias = smmu->va_size;
919 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100920 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000921 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100922 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000923 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100924 ias = min(ias, 32UL);
925 oas = min(oas, 40UL);
926 }
Will Deaconc752ce42014-06-25 22:46:31 +0100927 break;
928 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100929 /*
930 * We will likely want to change this if/when KVM gets
931 * involved.
932 */
Will Deaconc752ce42014-06-25 22:46:31 +0100933 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100934 cfg->cbar = CBAR_TYPE_S2_TRANS;
935 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000936 ias = smmu->ipa_size;
937 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100938 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000939 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100940 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000941 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100942 ias = min(ias, 40UL);
943 oas = min(oas, 40UL);
944 }
Will Deaconc752ce42014-06-25 22:46:31 +0100945 break;
946 default:
947 ret = -EINVAL;
948 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100949 }
950
951 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
952 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200953 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100954 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100955
Will Deacon44680ee2014-06-25 11:29:12 +0100956 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100957 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100958 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
959 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100960 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100961 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100962 }
963
Will Deacon518f7132014-11-14 17:17:54 +0000964 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100965 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000966 .ias = ias,
967 .oas = oas,
968 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100969 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000970 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100971
Will Deacon518f7132014-11-14 17:17:54 +0000972 smmu_domain->smmu = smmu;
973 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
974 if (!pgtbl_ops) {
975 ret = -ENOMEM;
976 goto out_clear_smmu;
977 }
978
Robin Murphyd5466352016-05-09 17:20:09 +0100979 /* Update the domain's page sizes to reflect the page table format */
980 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +0000981
982 /* Initialise the context bank with our page table cfg */
983 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
984
985 /*
986 * Request context fault interrupt. Do this last to avoid the
987 * handler seeing a half-initialised domain state.
988 */
Will Deacon44680ee2014-06-25 11:29:12 +0100989 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800990 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
991 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200992 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100993 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100994 cfg->irptndx, irq);
995 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100996 }
997
Will Deacon518f7132014-11-14 17:17:54 +0000998 mutex_unlock(&smmu_domain->init_mutex);
999
1000 /* Publish page table ops for map/unmap */
1001 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001002 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001003
Will Deacon518f7132014-11-14 17:17:54 +00001004out_clear_smmu:
1005 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001006out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001007 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001008 return ret;
1009}
1010
1011static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1012{
Joerg Roedel1d672632015-03-26 13:43:10 +01001013 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001014 struct arm_smmu_device *smmu = smmu_domain->smmu;
1015 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001016 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001017 int irq;
1018
Robin Murphy98006992016-04-20 14:53:33 +01001019 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001020 return;
1021
Will Deacon518f7132014-11-14 17:17:54 +00001022 /*
1023 * Disable the context bank and free the page tables before freeing
1024 * it.
1025 */
Will Deacon44680ee2014-06-25 11:29:12 +01001026 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001027 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001028
Will Deacon44680ee2014-06-25 11:29:12 +01001029 if (cfg->irptndx != INVALID_IRPTNDX) {
1030 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001031 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001032 }
1033
Markus Elfring44830b02015-11-06 18:32:41 +01001034 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001035 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001036}
1037
Joerg Roedel1d672632015-03-26 13:43:10 +01001038static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001039{
1040 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001041
Robin Murphy9adb9592016-01-26 18:06:36 +00001042 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001043 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001044 /*
1045 * Allocate the domain and initialise some of its data structures.
1046 * We can't really do anything meaningful until we've added a
1047 * master.
1048 */
1049 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1050 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001051 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001052
Robin Murphy9adb9592016-01-26 18:06:36 +00001053 if (type == IOMMU_DOMAIN_DMA &&
1054 iommu_get_dma_cookie(&smmu_domain->domain)) {
1055 kfree(smmu_domain);
1056 return NULL;
1057 }
1058
Will Deacon518f7132014-11-14 17:17:54 +00001059 mutex_init(&smmu_domain->init_mutex);
1060 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001061
1062 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001063}
1064
Joerg Roedel1d672632015-03-26 13:43:10 +01001065static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001066{
Joerg Roedel1d672632015-03-26 13:43:10 +01001067 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001068
1069 /*
1070 * Free the domain resources. We assume that all devices have
1071 * already been detached.
1072 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001073 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001074 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001075 kfree(smmu_domain);
1076}
1077
1078static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001079 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001080{
1081 int i;
1082 struct arm_smmu_smr *smrs;
1083 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1084
1085 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1086 return 0;
1087
Will Deacona9a1b0b2014-05-01 18:05:08 +01001088 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001089 return -EEXIST;
1090
Mitchel Humpherys29073202014-07-08 09:52:18 -07001091 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001092 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001093 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1094 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001095 return -ENOMEM;
1096 }
1097
Will Deacon44680ee2014-06-25 11:29:12 +01001098 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001099 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001100 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1101 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001102 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001103 dev_err(smmu->dev, "failed to allocate free SMR\n");
1104 goto err_free_smrs;
1105 }
1106
1107 smrs[i] = (struct arm_smmu_smr) {
1108 .idx = idx,
1109 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001110 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001111 };
1112 }
1113
1114 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001115 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001116 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1117 smrs[i].mask << SMR_MASK_SHIFT;
1118 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1119 }
1120
Will Deacona9a1b0b2014-05-01 18:05:08 +01001121 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001122 return 0;
1123
1124err_free_smrs:
1125 while (--i >= 0)
1126 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1127 kfree(smrs);
1128 return -ENOSPC;
1129}
1130
1131static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001132 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001133{
1134 int i;
1135 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001136 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001137
Will Deacon43b412b2014-07-15 11:22:24 +01001138 if (!smrs)
1139 return;
1140
Will Deacon45ae7cf2013-06-24 18:31:25 +01001141 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001142 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001143 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001144
Will Deacon45ae7cf2013-06-24 18:31:25 +01001145 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1146 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1147 }
1148
Will Deacona9a1b0b2014-05-01 18:05:08 +01001149 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001150 kfree(smrs);
1151}
1152
Will Deacon45ae7cf2013-06-24 18:31:25 +01001153static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001154 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001155{
1156 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001157 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001158 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1159
Will Deacon5f634952016-04-20 14:53:32 +01001160 /*
1161 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1162 * for all devices behind the SMMU. Note that we need to take
1163 * care configuring SMRs for devices both a platform_device and
1164 * and a PCI device (i.e. a PCI host controller)
1165 */
1166 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1167 return 0;
1168
Will Deacon8f68f8e2014-07-15 11:27:08 +01001169 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001170 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001171 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001172 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001173
Will Deacona9a1b0b2014-05-01 18:05:08 +01001174 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001175 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001176
Will Deacona9a1b0b2014-05-01 18:05:08 +01001177 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001178 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001179 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001180 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1181 }
1182
1183 return 0;
1184}
1185
1186static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001187 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188{
Will Deacon43b412b2014-07-15 11:22:24 +01001189 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001190 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001191 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192
Will Deacon8f68f8e2014-07-15 11:27:08 +01001193 /* An IOMMU group is torn down by the first device to be removed */
1194 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1195 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001196
1197 /*
1198 * We *must* clear the S2CR first, because freeing the SMR means
1199 * that it can be re-allocated immediately.
1200 */
Will Deacon43b412b2014-07-15 11:22:24 +01001201 for (i = 0; i < cfg->num_streamids; ++i) {
1202 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001203 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001204
Robin Murphy25a1c962016-02-10 14:25:33 +00001205 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001206 }
1207
Will Deacona9a1b0b2014-05-01 18:05:08 +01001208 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209}
1210
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001211static void arm_smmu_detach_dev(struct device *dev,
1212 struct arm_smmu_master_cfg *cfg)
1213{
1214 struct iommu_domain *domain = dev->archdata.iommu;
1215 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1216
1217 dev->archdata.iommu = NULL;
1218 arm_smmu_domain_remove_master(smmu_domain, cfg);
1219}
1220
Will Deacon45ae7cf2013-06-24 18:31:25 +01001221static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1222{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001223 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001224 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001225 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001226 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001227
Will Deacon8f68f8e2014-07-15 11:27:08 +01001228 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001229 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001230 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1231 return -ENXIO;
1232 }
1233
Will Deacon518f7132014-11-14 17:17:54 +00001234 /* Ensure that the domain is finalised */
1235 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001236 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001237 return ret;
1238
Will Deacon45ae7cf2013-06-24 18:31:25 +01001239 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001240 * Sanity check the domain. We don't support domains across
1241 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001242 */
Will Deacon518f7132014-11-14 17:17:54 +00001243 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001244 dev_err(dev,
1245 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001246 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1247 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001248 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001249
1250 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001251 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001252 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001253 return -ENODEV;
1254
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001255 /* Detach the dev from its current domain */
1256 if (dev->archdata.iommu)
1257 arm_smmu_detach_dev(dev, cfg);
1258
Will Deacon844e35b2014-07-17 11:23:51 +01001259 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1260 if (!ret)
1261 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001262 return ret;
1263}
1264
Will Deacon45ae7cf2013-06-24 18:31:25 +01001265static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001266 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267{
Will Deacon518f7132014-11-14 17:17:54 +00001268 int ret;
1269 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001270 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001271 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272
Will Deacon518f7132014-11-14 17:17:54 +00001273 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274 return -ENODEV;
1275
Will Deacon518f7132014-11-14 17:17:54 +00001276 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1277 ret = ops->map(ops, iova, paddr, size, prot);
1278 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1279 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001280}
1281
1282static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1283 size_t size)
1284{
Will Deacon518f7132014-11-14 17:17:54 +00001285 size_t ret;
1286 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001287 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001288 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001289
Will Deacon518f7132014-11-14 17:17:54 +00001290 if (!ops)
1291 return 0;
1292
1293 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1294 ret = ops->unmap(ops, iova, size);
1295 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1296 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001297}
1298
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001299static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1300 dma_addr_t iova)
1301{
Joerg Roedel1d672632015-03-26 13:43:10 +01001302 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001303 struct arm_smmu_device *smmu = smmu_domain->smmu;
1304 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1305 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1306 struct device *dev = smmu->dev;
1307 void __iomem *cb_base;
1308 u32 tmp;
1309 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001310 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001311
1312 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1313
Robin Murphy661d9622015-05-27 17:09:34 +01001314 /* ATS1 registers can only be written atomically */
1315 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001316 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001317 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1318 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001319 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001320
1321 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1322 !(tmp & ATSR_ACTIVE), 5, 50)) {
1323 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001324 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001325 &iova);
1326 return ops->iova_to_phys(ops, iova);
1327 }
1328
Robin Murphyf9a05f02016-04-13 18:13:01 +01001329 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001330 if (phys & CB_PAR_F) {
1331 dev_err(dev, "translation fault!\n");
1332 dev_err(dev, "PAR = 0x%llx\n", phys);
1333 return 0;
1334 }
1335
1336 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1337}
1338
Will Deacon45ae7cf2013-06-24 18:31:25 +01001339static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001340 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001341{
Will Deacon518f7132014-11-14 17:17:54 +00001342 phys_addr_t ret;
1343 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001344 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001345 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346
Will Deacon518f7132014-11-14 17:17:54 +00001347 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001348 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001349
Will Deacon518f7132014-11-14 17:17:54 +00001350 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001351 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1352 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001353 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001354 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001355 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001356 }
1357
Will Deacon518f7132014-11-14 17:17:54 +00001358 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001359
Will Deacon518f7132014-11-14 17:17:54 +00001360 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001361}
1362
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001363static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364{
Will Deacond0948942014-06-24 17:30:10 +01001365 switch (cap) {
1366 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001367 /*
1368 * Return true here as the SMMU can always send out coherent
1369 * requests.
1370 */
1371 return true;
Will Deacond0948942014-06-24 17:30:10 +01001372 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001373 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001374 case IOMMU_CAP_NOEXEC:
1375 return true;
Will Deacond0948942014-06-24 17:30:10 +01001376 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001377 return false;
Will Deacond0948942014-06-24 17:30:10 +01001378 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001379}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001380
Will Deacona9a1b0b2014-05-01 18:05:08 +01001381static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1382{
1383 *((u16 *)data) = alias;
1384 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001385}
1386
Will Deacon8f68f8e2014-07-15 11:27:08 +01001387static void __arm_smmu_release_pci_iommudata(void *data)
1388{
1389 kfree(data);
1390}
1391
Joerg Roedelaf659932015-10-21 23:51:41 +02001392static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1393 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001394{
Will Deacon03edb222015-01-19 14:27:33 +00001395 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001396 u16 sid;
1397 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001398
Will Deacon03edb222015-01-19 14:27:33 +00001399 cfg = iommu_group_get_iommudata(group);
1400 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001401 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001402 if (!cfg)
1403 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001404
Will Deacon03edb222015-01-19 14:27:33 +00001405 iommu_group_set_iommudata(group, cfg,
1406 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001407 }
1408
Joerg Roedelaf659932015-10-21 23:51:41 +02001409 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1410 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001411
Will Deacon03edb222015-01-19 14:27:33 +00001412 /*
1413 * Assume Stream ID == Requester ID for now.
1414 * We need a way to describe the ID mappings in FDT.
1415 */
1416 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1417 for (i = 0; i < cfg->num_streamids; ++i)
1418 if (cfg->streamids[i] == sid)
1419 break;
1420
1421 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1422 if (i == cfg->num_streamids)
1423 cfg->streamids[cfg->num_streamids++] = sid;
1424
1425 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001426}
1427
Joerg Roedelaf659932015-10-21 23:51:41 +02001428static int arm_smmu_init_platform_device(struct device *dev,
1429 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001430{
Will Deacon03edb222015-01-19 14:27:33 +00001431 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001432 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001433
1434 if (!smmu)
1435 return -ENODEV;
1436
1437 master = find_smmu_master(smmu, dev->of_node);
1438 if (!master)
1439 return -ENODEV;
1440
Will Deacon03edb222015-01-19 14:27:33 +00001441 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001442
1443 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001444}
1445
1446static int arm_smmu_add_device(struct device *dev)
1447{
Joerg Roedelaf659932015-10-21 23:51:41 +02001448 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001449
Joerg Roedelaf659932015-10-21 23:51:41 +02001450 group = iommu_group_get_for_dev(dev);
1451 if (IS_ERR(group))
1452 return PTR_ERR(group);
1453
Peng Fan9a4a9d82015-11-20 16:56:18 +08001454 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001455 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001456}
1457
Will Deacon45ae7cf2013-06-24 18:31:25 +01001458static void arm_smmu_remove_device(struct device *dev)
1459{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001460 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001461}
1462
Joerg Roedelaf659932015-10-21 23:51:41 +02001463static struct iommu_group *arm_smmu_device_group(struct device *dev)
1464{
1465 struct iommu_group *group;
1466 int ret;
1467
1468 if (dev_is_pci(dev))
1469 group = pci_device_group(dev);
1470 else
1471 group = generic_device_group(dev);
1472
1473 if (IS_ERR(group))
1474 return group;
1475
1476 if (dev_is_pci(dev))
1477 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1478 else
1479 ret = arm_smmu_init_platform_device(dev, group);
1480
1481 if (ret) {
1482 iommu_group_put(group);
1483 group = ERR_PTR(ret);
1484 }
1485
1486 return group;
1487}
1488
Will Deaconc752ce42014-06-25 22:46:31 +01001489static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1490 enum iommu_attr attr, void *data)
1491{
Joerg Roedel1d672632015-03-26 13:43:10 +01001492 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001493
1494 switch (attr) {
1495 case DOMAIN_ATTR_NESTING:
1496 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1497 return 0;
1498 default:
1499 return -ENODEV;
1500 }
1501}
1502
1503static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1504 enum iommu_attr attr, void *data)
1505{
Will Deacon518f7132014-11-14 17:17:54 +00001506 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001507 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001508
Will Deacon518f7132014-11-14 17:17:54 +00001509 mutex_lock(&smmu_domain->init_mutex);
1510
Will Deaconc752ce42014-06-25 22:46:31 +01001511 switch (attr) {
1512 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001513 if (smmu_domain->smmu) {
1514 ret = -EPERM;
1515 goto out_unlock;
1516 }
1517
Will Deaconc752ce42014-06-25 22:46:31 +01001518 if (*(int *)data)
1519 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1520 else
1521 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1522
Will Deacon518f7132014-11-14 17:17:54 +00001523 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001524 default:
Will Deacon518f7132014-11-14 17:17:54 +00001525 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001526 }
Will Deacon518f7132014-11-14 17:17:54 +00001527
1528out_unlock:
1529 mutex_unlock(&smmu_domain->init_mutex);
1530 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001531}
1532
Will Deacon518f7132014-11-14 17:17:54 +00001533static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001534 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001535 .domain_alloc = arm_smmu_domain_alloc,
1536 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001537 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001538 .map = arm_smmu_map,
1539 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001540 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001541 .iova_to_phys = arm_smmu_iova_to_phys,
1542 .add_device = arm_smmu_add_device,
1543 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001544 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001545 .domain_get_attr = arm_smmu_domain_get_attr,
1546 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001547 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001548};
1549
1550static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1551{
1552 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001553 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001554 int i = 0;
Peng Fan3ca37122016-05-03 21:50:30 +08001555 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001556
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001557 /* clear global FSR */
1558 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1559 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001560
Robin Murphy25a1c962016-02-10 14:25:33 +00001561 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1562 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001563 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001564 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001565 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001566 }
1567
Peng Fan3ca37122016-05-03 21:50:30 +08001568 /*
1569 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1570 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1571 * bit is only present in MMU-500r2 onwards.
1572 */
1573 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1574 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1575 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1576 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1577 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1578 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1579 }
1580
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001581 /* Make sure all context banks are disabled and clear CB_FSR */
1582 for (i = 0; i < smmu->num_context_banks; ++i) {
1583 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1584 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1585 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001586 /*
1587 * Disable MMU-500's not-particularly-beneficial next-page
1588 * prefetcher for the sake of errata #841119 and #826419.
1589 */
1590 if (smmu->model == ARM_MMU500) {
1591 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1592 reg &= ~ARM_MMU500_ACTLR_CPRE;
1593 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1594 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001595 }
Will Deacon1463fe42013-07-31 19:21:27 +01001596
Will Deacon45ae7cf2013-06-24 18:31:25 +01001597 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001598 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1599 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1600
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001601 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001602
Will Deacon45ae7cf2013-06-24 18:31:25 +01001603 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001604 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001605
1606 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001607 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001608
Robin Murphy25a1c962016-02-10 14:25:33 +00001609 /* Enable client access, handling unmatched streams as appropriate */
1610 reg &= ~sCR0_CLIENTPD;
1611 if (disable_bypass)
1612 reg |= sCR0_USFCFG;
1613 else
1614 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001615
1616 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001617 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001618
1619 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001620 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001621
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001622 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1623 reg |= sCR0_VMID16EN;
1624
Will Deacon45ae7cf2013-06-24 18:31:25 +01001625 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001626 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001627 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001628}
1629
1630static int arm_smmu_id_size_to_bits(int size)
1631{
1632 switch (size) {
1633 case 0:
1634 return 32;
1635 case 1:
1636 return 36;
1637 case 2:
1638 return 40;
1639 case 3:
1640 return 42;
1641 case 4:
1642 return 44;
1643 case 5:
1644 default:
1645 return 48;
1646 }
1647}
1648
1649static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1650{
1651 unsigned long size;
1652 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1653 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001654 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001655
1656 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001657 dev_notice(smmu->dev, "SMMUv%d with:\n",
1658 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001659
1660 /* ID0 */
1661 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001662
1663 /* Restrict available stages based on module parameter */
1664 if (force_stage == 1)
1665 id &= ~(ID0_S2TS | ID0_NTS);
1666 else if (force_stage == 2)
1667 id &= ~(ID0_S1TS | ID0_NTS);
1668
Will Deacon45ae7cf2013-06-24 18:31:25 +01001669 if (id & ID0_S1TS) {
1670 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1671 dev_notice(smmu->dev, "\tstage 1 translation\n");
1672 }
1673
1674 if (id & ID0_S2TS) {
1675 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1676 dev_notice(smmu->dev, "\tstage 2 translation\n");
1677 }
1678
1679 if (id & ID0_NTS) {
1680 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1681 dev_notice(smmu->dev, "\tnested translation\n");
1682 }
1683
1684 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001685 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001686 dev_err(smmu->dev, "\tno translation support!\n");
1687 return -ENODEV;
1688 }
1689
Robin Murphyb7862e32016-04-13 18:13:03 +01001690 if ((id & ID0_S1TS) &&
1691 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001692 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1693 dev_notice(smmu->dev, "\taddress translation ops\n");
1694 }
1695
Robin Murphybae2c2d2015-07-29 19:46:05 +01001696 /*
1697 * In order for DMA API calls to work properly, we must defer to what
1698 * the DT says about coherency, regardless of what the hardware claims.
1699 * Fortunately, this also opens up a workaround for systems where the
1700 * ID register value has ended up configured incorrectly.
1701 */
1702 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1703 cttw_reg = !!(id & ID0_CTTW);
1704 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001705 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001706 if (cttw_dt || cttw_reg)
1707 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1708 cttw_dt ? "" : "non-");
1709 if (cttw_dt != cttw_reg)
1710 dev_notice(smmu->dev,
1711 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001712
1713 if (id & ID0_SMS) {
1714 u32 smr, sid, mask;
1715
1716 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1717 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1718 ID0_NUMSMRG_MASK;
1719 if (smmu->num_mapping_groups == 0) {
1720 dev_err(smmu->dev,
1721 "stream-matching supported, but no SMRs present!\n");
1722 return -ENODEV;
1723 }
1724
1725 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1726 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1727 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1728 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1729
1730 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1731 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1732 if ((mask & sid) != sid) {
1733 dev_err(smmu->dev,
1734 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1735 mask, sid);
1736 return -ENODEV;
1737 }
1738
1739 dev_notice(smmu->dev,
1740 "\tstream matching with %u register groups, mask 0x%x",
1741 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07001742 } else {
1743 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1744 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001745 }
1746
Robin Murphy7602b872016-04-28 17:12:09 +01001747 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1748 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1749 if (!(id & ID0_PTFS_NO_AARCH32S))
1750 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1751 }
1752
Will Deacon45ae7cf2013-06-24 18:31:25 +01001753 /* ID1 */
1754 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001755 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001756
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001757 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001758 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001759 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001760 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001761 dev_warn(smmu->dev,
1762 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1763 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001764
Will Deacon518f7132014-11-14 17:17:54 +00001765 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001766 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1767 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1768 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1769 return -ENODEV;
1770 }
1771 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1772 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001773 /*
1774 * Cavium CN88xx erratum #27704.
1775 * Ensure ASID and VMID allocation is unique across all SMMUs in
1776 * the system.
1777 */
1778 if (smmu->model == CAVIUM_SMMUV2) {
1779 smmu->cavium_id_base =
1780 atomic_add_return(smmu->num_context_banks,
1781 &cavium_smmu_context_count);
1782 smmu->cavium_id_base -= smmu->num_context_banks;
1783 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001784
1785 /* ID2 */
1786 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1787 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001788 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001789
Will Deacon518f7132014-11-14 17:17:54 +00001790 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001791 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001792 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001793
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001794 if (id & ID2_VMID16)
1795 smmu->features |= ARM_SMMU_FEAT_VMID16;
1796
Robin Murphyf1d84542015-03-04 16:41:05 +00001797 /*
1798 * What the page table walker can address actually depends on which
1799 * descriptor format is in use, but since a) we don't know that yet,
1800 * and b) it can vary per context bank, this will have to do...
1801 */
1802 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1803 dev_warn(smmu->dev,
1804 "failed to set DMA mask for table walker\n");
1805
Robin Murphyb7862e32016-04-13 18:13:03 +01001806 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001807 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001808 if (smmu->version == ARM_SMMU_V1_64K)
1809 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001810 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001811 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001812 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001813 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001814 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001815 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001816 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001817 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001818 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001819 }
1820
Robin Murphy7602b872016-04-28 17:12:09 +01001821 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001822 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001823 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001824 if (smmu->features &
1825 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001826 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001827 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001828 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001829 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001830 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001831
Robin Murphyd5466352016-05-09 17:20:09 +01001832 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1833 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1834 else
1835 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1836 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1837 smmu->pgsize_bitmap);
1838
Will Deacon518f7132014-11-14 17:17:54 +00001839
Will Deacon28d60072014-09-01 16:24:48 +01001840 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1841 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001842 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001843
1844 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1845 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001846 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001847
Will Deacon45ae7cf2013-06-24 18:31:25 +01001848 return 0;
1849}
1850
Robin Murphy67b65a32016-04-13 18:12:57 +01001851struct arm_smmu_match_data {
1852 enum arm_smmu_arch_version version;
1853 enum arm_smmu_implementation model;
1854};
1855
1856#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1857static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1858
1859ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1860ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001861ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001862ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001863ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001864
Joerg Roedel09b52692014-10-02 12:24:45 +02001865static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001866 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1867 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1868 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001869 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001870 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001871 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001872 { },
1873};
1874MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1875
Will Deacon45ae7cf2013-06-24 18:31:25 +01001876static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1877{
Robin Murphy09360402014-08-28 17:51:59 +01001878 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01001879 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001880 struct resource *res;
1881 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001882 struct device *dev = &pdev->dev;
1883 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001884 struct of_phandle_iterator it;
1885 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001886 int num_irqs, i, err;
1887
1888 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1889 if (!smmu) {
1890 dev_err(dev, "failed to allocate arm_smmu_device\n");
1891 return -ENOMEM;
1892 }
1893 smmu->dev = dev;
1894
Robin Murphy09360402014-08-28 17:51:59 +01001895 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01001896 data = of_id->data;
1897 smmu->version = data->version;
1898 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001899
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001901 smmu->base = devm_ioremap_resource(dev, res);
1902 if (IS_ERR(smmu->base))
1903 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001904 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001905
1906 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1907 &smmu->num_global_irqs)) {
1908 dev_err(dev, "missing #global-interrupts property\n");
1909 return -ENODEV;
1910 }
1911
1912 num_irqs = 0;
1913 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1914 num_irqs++;
1915 if (num_irqs > smmu->num_global_irqs)
1916 smmu->num_context_irqs++;
1917 }
1918
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001919 if (!smmu->num_context_irqs) {
1920 dev_err(dev, "found %d interrupts but expected at least %d\n",
1921 num_irqs, smmu->num_global_irqs + 1);
1922 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001923 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001924
1925 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1926 GFP_KERNEL);
1927 if (!smmu->irqs) {
1928 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1929 return -ENOMEM;
1930 }
1931
1932 for (i = 0; i < num_irqs; ++i) {
1933 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001934
Will Deacon45ae7cf2013-06-24 18:31:25 +01001935 if (irq < 0) {
1936 dev_err(dev, "failed to get irq index %d\n", i);
1937 return -ENODEV;
1938 }
1939 smmu->irqs[i] = irq;
1940 }
1941
Olav Haugan3c8766d2014-08-22 17:12:32 -07001942 err = arm_smmu_device_cfg_probe(smmu);
1943 if (err)
1944 return err;
1945
Will Deacon45ae7cf2013-06-24 18:31:25 +01001946 i = 0;
1947 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001948
1949 err = -ENOMEM;
1950 /* No need to zero the memory for masterspec */
1951 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
1952 if (!masterspec)
1953 goto out_put_masters;
1954
1955 of_for_each_phandle(&it, err, dev->of_node,
1956 "mmu-masters", "#stream-id-cells", 0) {
1957 int count = of_phandle_iterator_args(&it, masterspec->args,
1958 MAX_MASTER_STREAMIDS);
1959 masterspec->np = of_node_get(it.node);
1960 masterspec->args_count = count;
1961
1962 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963 if (err) {
1964 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001965 masterspec->np->name);
1966 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001967 goto out_put_masters;
1968 }
1969
1970 i++;
1971 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001972
Will Deacon45ae7cf2013-06-24 18:31:25 +01001973 dev_notice(dev, "registered %d master devices\n", i);
1974
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001975 kfree(masterspec);
1976
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001977 parse_driver_options(smmu);
1978
Robin Murphyb7862e32016-04-13 18:13:03 +01001979 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980 smmu->num_context_banks != smmu->num_context_irqs) {
1981 dev_err(dev,
1982 "found only %d context interrupt(s) but %d required\n",
1983 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00001984 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01001985 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001986 }
1987
Will Deacon45ae7cf2013-06-24 18:31:25 +01001988 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08001989 err = devm_request_irq(smmu->dev, smmu->irqs[i],
1990 arm_smmu_global_fault,
1991 IRQF_SHARED,
1992 "arm-smmu global fault",
1993 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001994 if (err) {
1995 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1996 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08001997 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001998 }
1999 }
2000
2001 INIT_LIST_HEAD(&smmu->list);
2002 spin_lock(&arm_smmu_devices_lock);
2003 list_add(&smmu->list, &arm_smmu_devices);
2004 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002005
2006 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002007 return 0;
2008
Will Deacon45ae7cf2013-06-24 18:31:25 +01002009out_put_masters:
2010 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002011 struct arm_smmu_master *master
2012 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002013 of_node_put(master->of_node);
2014 }
2015
2016 return err;
2017}
2018
2019static int arm_smmu_device_remove(struct platform_device *pdev)
2020{
2021 int i;
2022 struct device *dev = &pdev->dev;
2023 struct arm_smmu_device *curr, *smmu = NULL;
2024 struct rb_node *node;
2025
2026 spin_lock(&arm_smmu_devices_lock);
2027 list_for_each_entry(curr, &arm_smmu_devices, list) {
2028 if (curr->dev == dev) {
2029 smmu = curr;
2030 list_del(&smmu->list);
2031 break;
2032 }
2033 }
2034 spin_unlock(&arm_smmu_devices_lock);
2035
2036 if (!smmu)
2037 return -ENODEV;
2038
Will Deacon45ae7cf2013-06-24 18:31:25 +01002039 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002040 struct arm_smmu_master *master
2041 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002042 of_node_put(master->of_node);
2043 }
2044
Will Deaconecfadb62013-07-31 19:21:28 +01002045 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002046 dev_err(dev, "removing device with active domains!\n");
2047
2048 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08002049 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002050
2051 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002052 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002053 return 0;
2054}
2055
Will Deacon45ae7cf2013-06-24 18:31:25 +01002056static struct platform_driver arm_smmu_driver = {
2057 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002058 .name = "arm-smmu",
2059 .of_match_table = of_match_ptr(arm_smmu_of_match),
2060 },
2061 .probe = arm_smmu_device_dt_probe,
2062 .remove = arm_smmu_device_remove,
2063};
2064
2065static int __init arm_smmu_init(void)
2066{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002067 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002068 int ret;
2069
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002070 /*
2071 * Play nice with systems that don't have an ARM SMMU by checking that
2072 * an ARM SMMU exists in the system before proceeding with the driver
2073 * and IOMMU bus operation registration.
2074 */
2075 np = of_find_matching_node(NULL, arm_smmu_of_match);
2076 if (!np)
2077 return 0;
2078
2079 of_node_put(np);
2080
Will Deacon45ae7cf2013-06-24 18:31:25 +01002081 ret = platform_driver_register(&arm_smmu_driver);
2082 if (ret)
2083 return ret;
2084
2085 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002086 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002087 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2088
Will Deacond123cf82014-02-04 22:17:53 +00002089#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002090 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002091 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002092#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002093
Will Deacona9a1b0b2014-05-01 18:05:08 +01002094#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002095 if (!iommu_present(&pci_bus_type)) {
2096 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002097 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002098 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002099#endif
2100
Will Deacon45ae7cf2013-06-24 18:31:25 +01002101 return 0;
2102}
2103
2104static void __exit arm_smmu_exit(void)
2105{
2106 return platform_driver_unregister(&arm_smmu_driver);
2107}
2108
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002109subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002110module_exit(arm_smmu_exit);
2111
2112MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2113MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2114MODULE_LICENSE("GPL v2");