blob: f7ecb30a0bac84708227583540102a4be74b693f [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
50
51#include <linux/amba/bus.h>
52
Will Deacon518f7132014-11-14 17:17:54 +000053#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
Will Deacon45ae7cf2013-06-24 18:31:25 +010055/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
Will Deacon45ae7cf2013-06-24 18:31:25 +010058/* SMMU global address space */
59#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010060#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010061
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000062/*
63 * SMMU global address space with conditional offset to access secure
64 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
65 * nsGFSYNR0: 0x450)
66 */
67#define ARM_SMMU_GR0_NS(smmu) \
68 ((smmu)->base + \
69 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
70 ? 0x400 : 0))
71
Robin Murphyf9a05f02016-04-13 18:13:01 +010072/*
73 * Some 64-bit registers only make sense to write atomically, but in such
74 * cases all the data relevant to AArch32 formats lies within the lower word,
75 * therefore this actually makes more sense than it might first appear.
76 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010077#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010078#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010079#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010080#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#endif
82
Will Deacon45ae7cf2013-06-24 18:31:25 +010083/* Configuration registers */
84#define ARM_SMMU_GR0_sCR0 0x0
85#define sCR0_CLIENTPD (1 << 0)
86#define sCR0_GFRE (1 << 1)
87#define sCR0_GFIE (1 << 2)
88#define sCR0_GCFGFRE (1 << 4)
89#define sCR0_GCFGFIE (1 << 5)
90#define sCR0_USFCFG (1 << 10)
91#define sCR0_VMIDPNE (1 << 11)
92#define sCR0_PTM (1 << 12)
93#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080094#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010095#define sCR0_BSU_SHIFT 14
96#define sCR0_BSU_MASK 0x3
97
Peng Fan3ca37122016-05-03 21:50:30 +080098/* Auxiliary Configuration register */
99#define ARM_SMMU_GR0_sACR 0x10
100
Will Deacon45ae7cf2013-06-24 18:31:25 +0100101/* Identification registers */
102#define ARM_SMMU_GR0_ID0 0x20
103#define ARM_SMMU_GR0_ID1 0x24
104#define ARM_SMMU_GR0_ID2 0x28
105#define ARM_SMMU_GR0_ID3 0x2c
106#define ARM_SMMU_GR0_ID4 0x30
107#define ARM_SMMU_GR0_ID5 0x34
108#define ARM_SMMU_GR0_ID6 0x38
109#define ARM_SMMU_GR0_ID7 0x3c
110#define ARM_SMMU_GR0_sGFSR 0x48
111#define ARM_SMMU_GR0_sGFSYNR0 0x50
112#define ARM_SMMU_GR0_sGFSYNR1 0x54
113#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100114
115#define ID0_S1TS (1 << 30)
116#define ID0_S2TS (1 << 29)
117#define ID0_NTS (1 << 28)
118#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000119#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100120#define ID0_PTFS_NO_AARCH32 (1 << 25)
121#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100122#define ID0_CTTW (1 << 14)
123#define ID0_NUMIRPT_SHIFT 16
124#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700125#define ID0_NUMSIDB_SHIFT 9
126#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100127#define ID0_NUMSMRG_SHIFT 0
128#define ID0_NUMSMRG_MASK 0xff
129
130#define ID1_PAGESIZE (1 << 31)
131#define ID1_NUMPAGENDXB_SHIFT 28
132#define ID1_NUMPAGENDXB_MASK 7
133#define ID1_NUMS2CB_SHIFT 16
134#define ID1_NUMS2CB_MASK 0xff
135#define ID1_NUMCB_SHIFT 0
136#define ID1_NUMCB_MASK 0xff
137
138#define ID2_OAS_SHIFT 4
139#define ID2_OAS_MASK 0xf
140#define ID2_IAS_SHIFT 0
141#define ID2_IAS_MASK 0xf
142#define ID2_UBS_SHIFT 8
143#define ID2_UBS_MASK 0xf
144#define ID2_PTFS_4K (1 << 12)
145#define ID2_PTFS_16K (1 << 13)
146#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800147#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100148
Peng Fan3ca37122016-05-03 21:50:30 +0800149#define ID7_MAJOR_SHIFT 4
150#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153#define ARM_SMMU_GR0_TLBIVMID 0x64
154#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
155#define ARM_SMMU_GR0_TLBIALLH 0x6c
156#define ARM_SMMU_GR0_sTLBGSYNC 0x70
157#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
158#define sTLBGSTATUS_GSACTIVE (1 << 0)
159#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
160
161/* Stream mapping registers */
162#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
163#define SMR_VALID (1 << 31)
164#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
168#define S2CR_CBNDX_SHIFT 0
169#define S2CR_CBNDX_MASK 0xff
170#define S2CR_TYPE_SHIFT 16
171#define S2CR_TYPE_MASK 0x3
Robin Murphy8e8b2032016-09-12 17:13:50 +0100172enum arm_smmu_s2cr_type {
173 S2CR_TYPE_TRANS,
174 S2CR_TYPE_BYPASS,
175 S2CR_TYPE_FAULT,
176};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100177
Robin Murphyd3461802016-01-26 18:06:34 +0000178#define S2CR_PRIVCFG_SHIFT 24
Robin Murphy8e8b2032016-09-12 17:13:50 +0100179#define S2CR_PRIVCFG_MASK 0x3
180enum arm_smmu_s2cr_privcfg {
181 S2CR_PRIVCFG_DEFAULT,
182 S2CR_PRIVCFG_DIPAN,
183 S2CR_PRIVCFG_UNPRIV,
184 S2CR_PRIVCFG_PRIV,
185};
Robin Murphyd3461802016-01-26 18:06:34 +0000186
Will Deacon45ae7cf2013-06-24 18:31:25 +0100187/* Context bank attribute registers */
188#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
189#define CBAR_VMID_SHIFT 0
190#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000191#define CBAR_S1_BPSHCFG_SHIFT 8
192#define CBAR_S1_BPSHCFG_MASK 3
193#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100194#define CBAR_S1_MEMATTR_SHIFT 12
195#define CBAR_S1_MEMATTR_MASK 0xf
196#define CBAR_S1_MEMATTR_WB 0xf
197#define CBAR_TYPE_SHIFT 16
198#define CBAR_TYPE_MASK 0x3
199#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
200#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
201#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
202#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
203#define CBAR_IRPTNDX_SHIFT 24
204#define CBAR_IRPTNDX_MASK 0xff
205
206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100223#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000225#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100226#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100228#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000230#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100231#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_TLBIVAL 0x620
233#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
234#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100235#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000236#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237
238#define SCTLR_S1_ASIDPNE (1 << 12)
239#define SCTLR_CFCFG (1 << 7)
240#define SCTLR_CFIE (1 << 6)
241#define SCTLR_CFRE (1 << 5)
242#define SCTLR_E (1 << 4)
243#define SCTLR_AFE (1 << 2)
244#define SCTLR_TRE (1 << 1)
245#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100247#define ARM_MMU500_ACTLR_CPRE (1 << 1)
248
Peng Fan3ca37122016-05-03 21:50:30 +0800249#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
250
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000251#define CB_PAR_F (1 << 0)
252
253#define ATSR_ACTIVE (1 << 0)
254
Will Deacon45ae7cf2013-06-24 18:31:25 +0100255#define RESUME_RETRY (0 << 0)
256#define RESUME_TERMINATE (1 << 0)
257
Will Deacon45ae7cf2013-06-24 18:31:25 +0100258#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100259#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100260
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100261#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100262
263#define FSR_MULTI (1 << 31)
264#define FSR_SS (1 << 30)
265#define FSR_UUT (1 << 8)
266#define FSR_ASF (1 << 7)
267#define FSR_TLBLKF (1 << 6)
268#define FSR_TLBMCF (1 << 5)
269#define FSR_EF (1 << 4)
270#define FSR_PF (1 << 3)
271#define FSR_AFF (1 << 2)
272#define FSR_TF (1 << 1)
273
Mitchel Humpherys29073202014-07-08 09:52:18 -0700274#define FSR_IGN (FSR_AFF | FSR_ASF | \
275 FSR_TLBMCF | FSR_TLBLKF)
276#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100277 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278
279#define FSYNR0_WNR (1 << 4)
280
Will Deacon4cf740b2014-07-14 19:47:39 +0100281static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000282module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100283MODULE_PARM_DESC(force_stage,
284 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000285static bool disable_bypass;
286module_param(disable_bypass, bool, S_IRUGO);
287MODULE_PARM_DESC(disable_bypass,
288 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100289
Robin Murphy09360402014-08-28 17:51:59 +0100290enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100291 ARM_SMMU_V1,
292 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100293 ARM_SMMU_V2,
294};
295
Robin Murphy67b65a32016-04-13 18:12:57 +0100296enum arm_smmu_implementation {
297 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100298 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100299 CAVIUM_SMMUV2,
Vivek Gautam98e089c2018-12-04 11:52:13 +0530300 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100301};
302
Robin Murphy8e8b2032016-09-12 17:13:50 +0100303struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100304 struct iommu_group *group;
305 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100306 enum arm_smmu_s2cr_type type;
307 enum arm_smmu_s2cr_privcfg privcfg;
308 u8 cbndx;
309};
310
311#define s2cr_init_val (struct arm_smmu_s2cr){ \
312 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
313}
314
Will Deacon45ae7cf2013-06-24 18:31:25 +0100315struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100316 u16 mask;
317 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100318 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100319};
320
Will Deacona9a1b0b2014-05-01 18:05:08 +0100321struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100322 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100323 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100324};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100325#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100326#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
327#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000328#define fwspec_smendx(fw, i) \
329 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100330#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000331 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100332
333struct arm_smmu_device {
334 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335
336 void __iomem *base;
337 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100338 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100339
340#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
341#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
342#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
343#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
344#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000345#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800346#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100347#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
348#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
349#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
350#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
351#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100352 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000353
354#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
355 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100356 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100357 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100358
359 u32 num_context_banks;
360 u32 num_s2_context_banks;
361 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
362 atomic_t irptndx;
363
364 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100365 u16 streamid_mask;
366 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100367 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100368 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100369 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100370
Will Deacon518f7132014-11-14 17:17:54 +0000371 unsigned long va_size;
372 unsigned long ipa_size;
373 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100374 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100375
376 u32 num_global_irqs;
377 u32 num_context_irqs;
378 unsigned int *irqs;
379
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800380 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100381};
382
Robin Murphy7602b872016-04-28 17:12:09 +0100383enum arm_smmu_context_fmt {
384 ARM_SMMU_CTX_FMT_NONE,
385 ARM_SMMU_CTX_FMT_AARCH64,
386 ARM_SMMU_CTX_FMT_AARCH32_L,
387 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100388};
389
390struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100391 u8 cbndx;
392 u8 irptndx;
393 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100394 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100395};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100396#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100397
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800398#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
399#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100400
Will Deaconc752ce42014-06-25 22:46:31 +0100401enum arm_smmu_domain_stage {
402 ARM_SMMU_DOMAIN_S1 = 0,
403 ARM_SMMU_DOMAIN_S2,
404 ARM_SMMU_DOMAIN_NESTED,
405};
406
Will Deacon45ae7cf2013-06-24 18:31:25 +0100407struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100408 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000409 struct io_pgtable_ops *pgtbl_ops;
410 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100411 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100412 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000413 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100414 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100415};
416
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000417struct arm_smmu_option_prop {
418 u32 opt;
419 const char *prop;
420};
421
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800422static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
423
Robin Murphy021bb842016-09-14 15:26:46 +0100424static bool using_legacy_binding, using_generic_binding;
425
Mitchel Humpherys29073202014-07-08 09:52:18 -0700426static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000427 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
428 { 0, NULL},
429};
430
Joerg Roedel1d672632015-03-26 13:43:10 +0100431static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
432{
433 return container_of(dom, struct arm_smmu_domain, domain);
434}
435
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000436static void parse_driver_options(struct arm_smmu_device *smmu)
437{
438 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700439
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000440 do {
441 if (of_property_read_bool(smmu->dev->of_node,
442 arm_smmu_options[i].prop)) {
443 smmu->options |= arm_smmu_options[i].opt;
444 dev_notice(smmu->dev, "option %s\n",
445 arm_smmu_options[i].prop);
446 }
447 } while (arm_smmu_options[++i].opt);
448}
449
Will Deacon8f68f8e2014-07-15 11:27:08 +0100450static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100451{
452 if (dev_is_pci(dev)) {
453 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700454
Will Deacona9a1b0b2014-05-01 18:05:08 +0100455 while (!pci_is_root_bus(bus))
456 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100457 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100458 }
459
Robin Murphyf80cd882016-09-14 15:21:39 +0100460 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100461}
462
Robin Murphyf80cd882016-09-14 15:21:39 +0100463static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100464{
Robin Murphyf80cd882016-09-14 15:21:39 +0100465 *((__be32 *)data) = cpu_to_be32(alias);
466 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100467}
468
Robin Murphyf80cd882016-09-14 15:21:39 +0100469static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100470{
Robin Murphyf80cd882016-09-14 15:21:39 +0100471 struct of_phandle_iterator *it = *(void **)data;
472 struct device_node *np = it->node;
473 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100474
Robin Murphyf80cd882016-09-14 15:21:39 +0100475 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
476 "#stream-id-cells", 0)
477 if (it->node == np) {
478 *(void **)data = dev;
479 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700480 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100481 it->node = np;
482 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100483}
484
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100485static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100486static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100487
Robin Murphyadfec2e2016-09-12 17:13:55 +0100488static int arm_smmu_register_legacy_master(struct device *dev,
489 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100490{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100491 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100492 struct device_node *np;
493 struct of_phandle_iterator it;
494 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100495 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100496 __be32 pci_sid;
497 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100498
Robin Murphyf80cd882016-09-14 15:21:39 +0100499 np = dev_get_dev_node(dev);
500 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
501 of_node_put(np);
502 return -ENODEV;
503 }
504
505 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100506 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
507 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100508 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100509 of_node_put(np);
510 if (err == 0)
511 return -ENODEV;
512 if (err < 0)
513 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100514
Robin Murphyf80cd882016-09-14 15:21:39 +0100515 if (dev_is_pci(dev)) {
516 /* "mmu-masters" assumes Stream ID == Requester ID */
517 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
518 &pci_sid);
519 it.cur = &pci_sid;
520 it.cur_count = 1;
521 }
522
Robin Murphyadfec2e2016-09-12 17:13:55 +0100523 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
524 &arm_smmu_ops);
525 if (err)
526 return err;
527
528 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
529 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100530 return -ENOMEM;
531
Robin Murphyadfec2e2016-09-12 17:13:55 +0100532 *smmu = dev_get_drvdata(smmu_dev);
533 of_phandle_iterator_args(&it, sids, it.cur_count);
534 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
535 kfree(sids);
536 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100537}
538
539static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
540{
541 int idx;
542
543 do {
544 idx = find_next_zero_bit(map, end, start);
545 if (idx == end)
546 return -ENOSPC;
547 } while (test_and_set_bit(idx, map));
548
549 return idx;
550}
551
552static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
553{
554 clear_bit(idx, map);
555}
556
557/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000558static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100559{
560 int count = 0;
561 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
562
563 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
564 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
565 & sTLBGSTATUS_GSACTIVE) {
566 cpu_relax();
567 if (++count == TLB_LOOP_TIMEOUT) {
568 dev_err_ratelimited(smmu->dev,
569 "TLB sync timed out -- SMMU may be deadlocked\n");
570 return;
571 }
572 udelay(1);
573 }
574}
575
Will Deacon518f7132014-11-14 17:17:54 +0000576static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100577{
Will Deacon518f7132014-11-14 17:17:54 +0000578 struct arm_smmu_domain *smmu_domain = cookie;
579 __arm_smmu_tlb_sync(smmu_domain->smmu);
580}
581
582static void arm_smmu_tlb_inv_context(void *cookie)
583{
584 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100585 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
586 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100587 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000588 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100589
590 if (stage1) {
591 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800592 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100593 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100594 } else {
595 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800596 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100597 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100598 }
599
Will Deacon518f7132014-11-14 17:17:54 +0000600 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100601}
602
Will Deacon518f7132014-11-14 17:17:54 +0000603static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000604 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000605{
606 struct arm_smmu_domain *smmu_domain = cookie;
607 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
608 struct arm_smmu_device *smmu = smmu_domain->smmu;
609 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
610 void __iomem *reg;
611
612 if (stage1) {
613 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
614 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
615
Robin Murphy7602b872016-04-28 17:12:09 +0100616 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000617 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800618 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000619 do {
620 writel_relaxed(iova, reg);
621 iova += granule;
622 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000623 } else {
624 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800625 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000626 do {
627 writeq_relaxed(iova, reg);
628 iova += granule >> 12;
629 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000630 }
Will Deacon518f7132014-11-14 17:17:54 +0000631 } else if (smmu->version == ARM_SMMU_V2) {
632 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
633 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
634 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000635 iova >>= 12;
636 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100637 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000638 iova += granule >> 12;
639 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000640 } else {
641 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800642 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000643 }
644}
645
Will Deacon518f7132014-11-14 17:17:54 +0000646static struct iommu_gather_ops arm_smmu_gather_ops = {
647 .tlb_flush_all = arm_smmu_tlb_inv_context,
648 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
649 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000650};
651
Will Deacon45ae7cf2013-06-24 18:31:25 +0100652static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
653{
Will Deacon3714ce12016-08-05 19:49:45 +0100654 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100655 unsigned long iova;
656 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100657 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100658 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
659 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100660 void __iomem *cb_base;
661
Will Deacon44680ee2014-06-25 11:29:12 +0100662 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100663 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
664
665 if (!(fsr & FSR_FAULT))
666 return IRQ_NONE;
667
Will Deacon45ae7cf2013-06-24 18:31:25 +0100668 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100669 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100670
Will Deacon3714ce12016-08-05 19:49:45 +0100671 dev_err_ratelimited(smmu->dev,
672 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
673 fsr, iova, fsynr, cfg->cbndx);
674
Will Deacon45ae7cf2013-06-24 18:31:25 +0100675 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce12016-08-05 19:49:45 +0100676 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100677}
678
679static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
680{
681 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
682 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000683 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100684
685 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
686 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
687 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
688 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
689
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000690 if (!gfsr)
691 return IRQ_NONE;
692
Will Deacon45ae7cf2013-06-24 18:31:25 +0100693 dev_err_ratelimited(smmu->dev,
694 "Unexpected global fault, this could be serious\n");
695 dev_err_ratelimited(smmu->dev,
696 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
697 gfsr, gfsynr0, gfsynr1, gfsynr2);
698
699 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100700 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100701}
702
Will Deacon518f7132014-11-14 17:17:54 +0000703static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
704 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100705{
Robin Murphy60705292016-08-11 17:44:06 +0100706 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100707 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100708 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100709 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
710 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100711 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100712
Will Deacon45ae7cf2013-06-24 18:31:25 +0100713 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100714 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
715 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100716
Will Deacon4a1c93c2015-03-04 12:21:03 +0000717 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100718 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
719 reg = CBA2R_RW64_64BIT;
720 else
721 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800722 /* 16-bit VMIDs live in CBA2R */
723 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800724 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800725
Will Deacon4a1c93c2015-03-04 12:21:03 +0000726 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
727 }
728
Will Deacon45ae7cf2013-06-24 18:31:25 +0100729 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100730 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100731 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700732 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100733
Will Deacon57ca90f2014-02-06 14:59:05 +0000734 /*
735 * Use the weakest shareability/memory types, so they are
736 * overridden by the ttbcr/pte.
737 */
738 if (stage1) {
739 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
740 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800741 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
742 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800743 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000744 }
Will Deacon44680ee2014-06-25 11:29:12 +0100745 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100746
Will Deacon518f7132014-11-14 17:17:54 +0000747 /* TTBRs */
748 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100749 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100750
Robin Murphy60705292016-08-11 17:44:06 +0100751 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
752 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
753 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
754 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
755 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
756 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
757 } else {
758 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
759 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
760 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
761 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
762 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
763 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
764 }
Will Deacon518f7132014-11-14 17:17:54 +0000765 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100766 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100767 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000768 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100769
Will Deacon518f7132014-11-14 17:17:54 +0000770 /* TTBCR */
771 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100772 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
773 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
774 reg2 = 0;
775 } else {
776 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
777 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
778 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100779 }
Robin Murphy60705292016-08-11 17:44:06 +0100780 if (smmu->version > ARM_SMMU_V1)
781 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100782 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000783 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100784 }
Robin Murphy60705292016-08-11 17:44:06 +0100785 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100786
Will Deacon518f7132014-11-14 17:17:54 +0000787 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100788 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100789 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
790 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
791 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
792 } else {
793 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
794 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
795 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100796 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphy60705292016-08-11 17:44:06 +0100797 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100798 }
799
Will Deacon45ae7cf2013-06-24 18:31:25 +0100800 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100801 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100802 if (stage1)
803 reg |= SCTLR_S1_ASIDPNE;
804#ifdef __BIG_ENDIAN
805 reg |= SCTLR_E;
806#endif
Will Deacon25724842013-08-21 13:49:53 +0100807 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100808}
809
810static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100811 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100812{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100813 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000814 unsigned long ias, oas;
815 struct io_pgtable_ops *pgtbl_ops;
816 struct io_pgtable_cfg pgtbl_cfg;
817 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100818 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100819 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100820
Will Deacon518f7132014-11-14 17:17:54 +0000821 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100822 if (smmu_domain->smmu)
823 goto out_unlock;
824
Will Deaconc752ce42014-06-25 22:46:31 +0100825 /*
826 * Mapping the requested stage onto what we support is surprisingly
827 * complicated, mainly because the spec allows S1+S2 SMMUs without
828 * support for nested translation. That means we end up with the
829 * following table:
830 *
831 * Requested Supported Actual
832 * S1 N S1
833 * S1 S1+S2 S1
834 * S1 S2 S2
835 * S1 S1 S1
836 * N N N
837 * N S1+S2 S2
838 * N S2 S2
839 * N S1 S1
840 *
841 * Note that you can't actually request stage-2 mappings.
842 */
843 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
844 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
845 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
846 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
847
Robin Murphy7602b872016-04-28 17:12:09 +0100848 /*
849 * Choosing a suitable context format is even more fiddly. Until we
850 * grow some way for the caller to express a preference, and/or move
851 * the decision into the io-pgtable code where it arguably belongs,
852 * just aim for the closest thing to the rest of the system, and hope
853 * that the hardware isn't esoteric enough that we can't assume AArch64
854 * support to be a superset of AArch32 support...
855 */
856 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
857 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100858 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
859 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
860 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
861 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
862 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100863 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
864 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
865 ARM_SMMU_FEAT_FMT_AARCH64_16K |
866 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
867 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
868
869 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
870 ret = -EINVAL;
871 goto out_unlock;
872 }
873
Will Deaconc752ce42014-06-25 22:46:31 +0100874 switch (smmu_domain->stage) {
875 case ARM_SMMU_DOMAIN_S1:
876 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
877 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000878 ias = smmu->va_size;
879 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100880 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000881 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100882 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000883 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100884 ias = min(ias, 32UL);
885 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100886 } else {
887 fmt = ARM_V7S;
888 ias = min(ias, 32UL);
889 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100890 }
Will Deaconc752ce42014-06-25 22:46:31 +0100891 break;
892 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100893 /*
894 * We will likely want to change this if/when KVM gets
895 * involved.
896 */
Will Deaconc752ce42014-06-25 22:46:31 +0100897 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100898 cfg->cbar = CBAR_TYPE_S2_TRANS;
899 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000900 ias = smmu->ipa_size;
901 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100902 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000903 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100904 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000905 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100906 ias = min(ias, 40UL);
907 oas = min(oas, 40UL);
908 }
Will Deaconc752ce42014-06-25 22:46:31 +0100909 break;
910 default:
911 ret = -EINVAL;
912 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100913 }
914
915 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
916 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200917 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100918 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100919
Will Deacon44680ee2014-06-25 11:29:12 +0100920 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100921 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100922 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
923 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100924 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100925 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100926 }
927
Will Deacon518f7132014-11-14 17:17:54 +0000928 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100929 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000930 .ias = ias,
931 .oas = oas,
932 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100933 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000934 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100935
Will Deacon518f7132014-11-14 17:17:54 +0000936 smmu_domain->smmu = smmu;
937 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
938 if (!pgtbl_ops) {
939 ret = -ENOMEM;
940 goto out_clear_smmu;
941 }
942
Robin Murphyd5466352016-05-09 17:20:09 +0100943 /* Update the domain's page sizes to reflect the page table format */
944 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100945 domain->geometry.aperture_end = (1UL << ias) - 1;
946 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000947
948 /* Initialise the context bank with our page table cfg */
949 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
950
951 /*
952 * Request context fault interrupt. Do this last to avoid the
953 * handler seeing a half-initialised domain state.
954 */
Will Deacon44680ee2014-06-25 11:29:12 +0100955 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800956 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
957 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200958 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100959 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100960 cfg->irptndx, irq);
961 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100962 }
963
Will Deacon518f7132014-11-14 17:17:54 +0000964 mutex_unlock(&smmu_domain->init_mutex);
965
966 /* Publish page table ops for map/unmap */
967 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100968 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100969
Will Deacon518f7132014-11-14 17:17:54 +0000970out_clear_smmu:
971 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100972out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000973 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100974 return ret;
975}
976
977static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
978{
Joerg Roedel1d672632015-03-26 13:43:10 +0100979 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100980 struct arm_smmu_device *smmu = smmu_domain->smmu;
981 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +0100982 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100983 int irq;
984
Robin Murphy021bb842016-09-14 15:26:46 +0100985 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100986 return;
987
Will Deacon518f7132014-11-14 17:17:54 +0000988 /*
989 * Disable the context bank and free the page tables before freeing
990 * it.
991 */
Will Deacon44680ee2014-06-25 11:29:12 +0100992 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100993 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +0100994
Will Deacon44680ee2014-06-25 11:29:12 +0100995 if (cfg->irptndx != INVALID_IRPTNDX) {
996 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800997 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100998 }
999
Markus Elfring44830b02015-11-06 18:32:41 +01001000 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001001 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001002}
1003
Joerg Roedel1d672632015-03-26 13:43:10 +01001004static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001005{
1006 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001007
Robin Murphy9adb9592016-01-26 18:06:36 +00001008 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001009 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001010 /*
1011 * Allocate the domain and initialise some of its data structures.
1012 * We can't really do anything meaningful until we've added a
1013 * master.
1014 */
1015 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1016 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001017 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001018
Robin Murphy021bb842016-09-14 15:26:46 +01001019 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1020 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001021 kfree(smmu_domain);
1022 return NULL;
1023 }
1024
Will Deacon518f7132014-11-14 17:17:54 +00001025 mutex_init(&smmu_domain->init_mutex);
1026 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001027
1028 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001029}
1030
Joerg Roedel1d672632015-03-26 13:43:10 +01001031static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001032{
Joerg Roedel1d672632015-03-26 13:43:10 +01001033 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001034
1035 /*
1036 * Free the domain resources. We assume that all devices have
1037 * already been detached.
1038 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001039 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001040 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001041 kfree(smmu_domain);
1042}
1043
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001044static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1045{
1046 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001047 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001048
1049 if (smr->valid)
1050 reg |= SMR_VALID;
1051 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1052}
1053
Robin Murphy8e8b2032016-09-12 17:13:50 +01001054static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1055{
1056 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1057 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1058 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1059 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1060
1061 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1062}
1063
1064static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1065{
1066 arm_smmu_write_s2cr(smmu, idx);
1067 if (smmu->smrs)
1068 arm_smmu_write_smr(smmu, idx);
1069}
1070
Robin Murphy588888a2016-09-12 17:13:54 +01001071static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001072{
1073 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001074 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001075
Robin Murphy588888a2016-09-12 17:13:54 +01001076 /* Stream indexing is blissfully easy */
1077 if (!smrs)
1078 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001079
Robin Murphy588888a2016-09-12 17:13:54 +01001080 /* Validating SMRs is... less so */
1081 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1082 if (!smrs[i].valid) {
1083 /*
1084 * Note the first free entry we come across, which
1085 * we'll claim in the end if nothing else matches.
1086 */
1087 if (free_idx < 0)
1088 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001089 continue;
1090 }
Robin Murphy588888a2016-09-12 17:13:54 +01001091 /*
1092 * If the new entry is _entirely_ matched by an existing entry,
1093 * then reuse that, with the guarantee that there also cannot
1094 * be any subsequent conflicting entries. In normal use we'd
1095 * expect simply identical entries for this case, but there's
1096 * no harm in accommodating the generalisation.
1097 */
1098 if ((mask & smrs[i].mask) == mask &&
1099 !((id ^ smrs[i].id) & ~smrs[i].mask))
1100 return i;
1101 /*
1102 * If the new entry has any other overlap with an existing one,
1103 * though, then there always exists at least one stream ID
1104 * which would cause a conflict, and we can't allow that risk.
1105 */
1106 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1107 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001108 }
1109
Robin Murphy588888a2016-09-12 17:13:54 +01001110 return free_idx;
1111}
1112
1113static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1114{
1115 if (--smmu->s2crs[idx].count)
1116 return false;
1117
1118 smmu->s2crs[idx] = s2cr_init_val;
1119 if (smmu->smrs)
1120 smmu->smrs[idx].valid = false;
1121
1122 return true;
1123}
1124
1125static int arm_smmu_master_alloc_smes(struct device *dev)
1126{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001127 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1128 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001129 struct arm_smmu_device *smmu = cfg->smmu;
1130 struct arm_smmu_smr *smrs = smmu->smrs;
1131 struct iommu_group *group;
1132 int i, idx, ret;
1133
1134 mutex_lock(&smmu->stream_map_mutex);
1135 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001136 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001137 u16 sid = fwspec->ids[i];
1138 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1139
Robin Murphy588888a2016-09-12 17:13:54 +01001140 if (idx != INVALID_SMENDX) {
1141 ret = -EEXIST;
1142 goto out_err;
1143 }
1144
Robin Murphy021bb842016-09-14 15:26:46 +01001145 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001146 if (ret < 0)
1147 goto out_err;
1148
1149 idx = ret;
1150 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001151 smrs[idx].id = sid;
1152 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001153 smrs[idx].valid = true;
1154 }
1155 smmu->s2crs[idx].count++;
1156 cfg->smendx[i] = (s16)idx;
1157 }
1158
1159 group = iommu_group_get_for_dev(dev);
1160 if (!group)
1161 group = ERR_PTR(-ENOMEM);
1162 if (IS_ERR(group)) {
1163 ret = PTR_ERR(group);
1164 goto out_err;
1165 }
1166 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001167
Will Deacon45ae7cf2013-06-24 18:31:25 +01001168 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001169 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001170 arm_smmu_write_sme(smmu, idx);
1171 smmu->s2crs[idx].group = group;
1172 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001173
Robin Murphy588888a2016-09-12 17:13:54 +01001174 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001175 return 0;
1176
Robin Murphy588888a2016-09-12 17:13:54 +01001177out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001178 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001179 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001180 cfg->smendx[i] = INVALID_SMENDX;
1181 }
Robin Murphy588888a2016-09-12 17:13:54 +01001182 mutex_unlock(&smmu->stream_map_mutex);
1183 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001184}
1185
Robin Murphyadfec2e2016-09-12 17:13:55 +01001186static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001187{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001188 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1189 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001190 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001191
Robin Murphy588888a2016-09-12 17:13:54 +01001192 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001193 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001194 if (arm_smmu_free_sme(smmu, idx))
1195 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001196 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001197 }
Robin Murphy588888a2016-09-12 17:13:54 +01001198 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001199}
1200
Will Deacon45ae7cf2013-06-24 18:31:25 +01001201static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001202 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001203{
Will Deacon44680ee2014-06-25 11:29:12 +01001204 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001205 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1206 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1207 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy588888a2016-09-12 17:13:54 +01001208 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209
Robin Murphyadfec2e2016-09-12 17:13:55 +01001210 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001211 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001212 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001213
Robin Murphy8e8b2032016-09-12 17:13:50 +01001214 s2cr[idx].type = type;
Sricharan R75d18882017-01-06 18:58:15 +05301215 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001216 s2cr[idx].cbndx = cbndx;
1217 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001218 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001219 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001220}
1221
Will Deacon45ae7cf2013-06-24 18:31:25 +01001222static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1223{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001224 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001225 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1226 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001227 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001228
Robin Murphyadfec2e2016-09-12 17:13:55 +01001229 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001230 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1231 return -ENXIO;
1232 }
1233
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001234 /*
1235 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1236 * domains between of_xlate() and add_device() - we have no way to cope
1237 * with that, so until ARM gets converted to rely on groups and default
1238 * domains, just say no (but more politely than by dereferencing NULL).
1239 * This should be at least a WARN_ON once that's sorted.
1240 */
1241 if (!fwspec->iommu_priv)
1242 return -ENODEV;
1243
Robin Murphyadfec2e2016-09-12 17:13:55 +01001244 smmu = fwspec_smmu(fwspec);
Will Deacon518f7132014-11-14 17:17:54 +00001245 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001246 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001247 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001248 return ret;
1249
Will Deacon45ae7cf2013-06-24 18:31:25 +01001250 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001251 * Sanity check the domain. We don't support domains across
1252 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001253 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001254 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001255 dev_err(dev,
1256 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001257 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001258 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001259 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001260
1261 /* Looks ok, so add the device to the domain */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001262 return arm_smmu_domain_add_master(smmu_domain, fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001263}
1264
Will Deacon45ae7cf2013-06-24 18:31:25 +01001265static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001266 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267{
Will Deacon518f7132014-11-14 17:17:54 +00001268 int ret;
1269 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001270 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001271 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272
Will Deacon518f7132014-11-14 17:17:54 +00001273 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274 return -ENODEV;
1275
Will Deacon518f7132014-11-14 17:17:54 +00001276 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1277 ret = ops->map(ops, iova, paddr, size, prot);
1278 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1279 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001280}
1281
1282static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1283 size_t size)
1284{
Will Deacon518f7132014-11-14 17:17:54 +00001285 size_t ret;
1286 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001287 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001288 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001289
Will Deacon518f7132014-11-14 17:17:54 +00001290 if (!ops)
1291 return 0;
1292
1293 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1294 ret = ops->unmap(ops, iova, size);
1295 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1296 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001297}
1298
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001299static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1300 dma_addr_t iova)
1301{
Joerg Roedel1d672632015-03-26 13:43:10 +01001302 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001303 struct arm_smmu_device *smmu = smmu_domain->smmu;
1304 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1305 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1306 struct device *dev = smmu->dev;
1307 void __iomem *cb_base;
1308 u32 tmp;
1309 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001310 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001311
1312 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1313
Robin Murphy661d9622015-05-27 17:09:34 +01001314 /* ATS1 registers can only be written atomically */
1315 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001316 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001317 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1318 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001319 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001320
1321 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1322 !(tmp & ATSR_ACTIVE), 5, 50)) {
1323 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001324 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001325 &iova);
1326 return ops->iova_to_phys(ops, iova);
1327 }
1328
Robin Murphyf9a05f02016-04-13 18:13:01 +01001329 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001330 if (phys & CB_PAR_F) {
1331 dev_err(dev, "translation fault!\n");
1332 dev_err(dev, "PAR = 0x%llx\n", phys);
1333 return 0;
1334 }
1335
1336 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1337}
1338
Will Deacon45ae7cf2013-06-24 18:31:25 +01001339static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001340 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001341{
Will Deacon518f7132014-11-14 17:17:54 +00001342 phys_addr_t ret;
1343 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001344 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001345 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346
Will Deacon518f7132014-11-14 17:17:54 +00001347 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001348 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001349
Will Deacon518f7132014-11-14 17:17:54 +00001350 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001351 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1352 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001353 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001354 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001355 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001356 }
1357
Will Deacon518f7132014-11-14 17:17:54 +00001358 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001359
Will Deacon518f7132014-11-14 17:17:54 +00001360 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001361}
1362
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001363static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364{
Will Deacond0948942014-06-24 17:30:10 +01001365 switch (cap) {
1366 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001367 /*
1368 * Return true here as the SMMU can always send out coherent
1369 * requests.
1370 */
1371 return true;
Will Deacond0948942014-06-24 17:30:10 +01001372 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001373 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001374 case IOMMU_CAP_NOEXEC:
1375 return true;
Will Deacond0948942014-06-24 17:30:10 +01001376 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001377 return false;
Will Deacond0948942014-06-24 17:30:10 +01001378 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001379}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001380
Robin Murphy021bb842016-09-14 15:26:46 +01001381static int arm_smmu_match_node(struct device *dev, void *data)
1382{
1383 return dev->of_node == data;
1384}
1385
1386static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
1387{
1388 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1389 np, arm_smmu_match_node);
1390 put_device(dev);
1391 return dev ? dev_get_drvdata(dev) : NULL;
1392}
1393
Will Deacon03edb222015-01-19 14:27:33 +00001394static int arm_smmu_add_device(struct device *dev)
1395{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001396 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001397 struct arm_smmu_master_cfg *cfg;
Robin Murphy021bb842016-09-14 15:26:46 +01001398 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001399 int i, ret;
1400
Robin Murphy021bb842016-09-14 15:26:46 +01001401 if (using_legacy_binding) {
1402 ret = arm_smmu_register_legacy_master(dev, &smmu);
1403 fwspec = dev->iommu_fwspec;
1404 if (ret)
1405 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001406 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy021bb842016-09-14 15:26:46 +01001407 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
1408 } else {
1409 return -ENODEV;
1410 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001411
1412 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001413 for (i = 0; i < fwspec->num_ids; i++) {
1414 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001415 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001416
Robin Murphyadfec2e2016-09-12 17:13:55 +01001417 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001418 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001419 sid, smmu->streamid_mask);
1420 goto out_free;
1421 }
1422 if (mask & ~smmu->smr_mask_mask) {
1423 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1424 sid, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001425 goto out_free;
1426 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001427 }
Will Deacon03edb222015-01-19 14:27:33 +00001428
Robin Murphyadfec2e2016-09-12 17:13:55 +01001429 ret = -ENOMEM;
1430 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1431 GFP_KERNEL);
1432 if (!cfg)
1433 goto out_free;
1434
1435 cfg->smmu = smmu;
1436 fwspec->iommu_priv = cfg;
1437 while (i--)
1438 cfg->smendx[i] = INVALID_SMENDX;
1439
Robin Murphy588888a2016-09-12 17:13:54 +01001440 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001441 if (ret)
1442 goto out_free;
1443
1444 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001445
1446out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001447 if (fwspec)
1448 kfree(fwspec->iommu_priv);
1449 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001450 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001451}
1452
Will Deacon45ae7cf2013-06-24 18:31:25 +01001453static void arm_smmu_remove_device(struct device *dev)
1454{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001455 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001456
Robin Murphyadfec2e2016-09-12 17:13:55 +01001457 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001458 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001459
Robin Murphyadfec2e2016-09-12 17:13:55 +01001460 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001461 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001462 kfree(fwspec->iommu_priv);
1463 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001464}
1465
Joerg Roedelaf659932015-10-21 23:51:41 +02001466static struct iommu_group *arm_smmu_device_group(struct device *dev)
1467{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001468 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1469 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001470 struct iommu_group *group = NULL;
1471 int i, idx;
1472
Robin Murphyadfec2e2016-09-12 17:13:55 +01001473 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001474 if (group && smmu->s2crs[idx].group &&
1475 group != smmu->s2crs[idx].group)
1476 return ERR_PTR(-EINVAL);
1477
1478 group = smmu->s2crs[idx].group;
1479 }
1480
1481 if (group)
1482 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02001483
1484 if (dev_is_pci(dev))
1485 group = pci_device_group(dev);
1486 else
1487 group = generic_device_group(dev);
1488
Joerg Roedelaf659932015-10-21 23:51:41 +02001489 return group;
1490}
1491
Will Deaconc752ce42014-06-25 22:46:31 +01001492static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1493 enum iommu_attr attr, void *data)
1494{
Joerg Roedel1d672632015-03-26 13:43:10 +01001495 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001496
1497 switch (attr) {
1498 case DOMAIN_ATTR_NESTING:
1499 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1500 return 0;
1501 default:
1502 return -ENODEV;
1503 }
1504}
1505
1506static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1507 enum iommu_attr attr, void *data)
1508{
Will Deacon518f7132014-11-14 17:17:54 +00001509 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001510 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001511
Will Deacon518f7132014-11-14 17:17:54 +00001512 mutex_lock(&smmu_domain->init_mutex);
1513
Will Deaconc752ce42014-06-25 22:46:31 +01001514 switch (attr) {
1515 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001516 if (smmu_domain->smmu) {
1517 ret = -EPERM;
1518 goto out_unlock;
1519 }
1520
Will Deaconc752ce42014-06-25 22:46:31 +01001521 if (*(int *)data)
1522 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1523 else
1524 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1525
Will Deacon518f7132014-11-14 17:17:54 +00001526 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001527 default:
Will Deacon518f7132014-11-14 17:17:54 +00001528 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001529 }
Will Deacon518f7132014-11-14 17:17:54 +00001530
1531out_unlock:
1532 mutex_unlock(&smmu_domain->init_mutex);
1533 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001534}
1535
Robin Murphy021bb842016-09-14 15:26:46 +01001536static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1537{
1538 u32 fwid = 0;
1539
1540 if (args->args_count > 0)
1541 fwid |= (u16)args->args[0];
1542
1543 if (args->args_count > 1)
1544 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
1545
1546 return iommu_fwspec_add_ids(dev, &fwid, 1);
1547}
1548
Will Deacon518f7132014-11-14 17:17:54 +00001549static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001550 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001551 .domain_alloc = arm_smmu_domain_alloc,
1552 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001553 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001554 .map = arm_smmu_map,
1555 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001556 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001557 .iova_to_phys = arm_smmu_iova_to_phys,
1558 .add_device = arm_smmu_add_device,
1559 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001560 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001561 .domain_get_attr = arm_smmu_domain_get_attr,
1562 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001563 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00001564 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001565};
1566
1567static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1568{
1569 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001570 void __iomem *cb_base;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001571 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001572 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001573
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001574 /* clear global FSR */
1575 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1576 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001577
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001578 /*
1579 * Reset stream mapping groups: Initial values mark all SMRn as
1580 * invalid and all S2CRn as bypass unless overridden.
1581 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001582 for (i = 0; i < smmu->num_mapping_groups; ++i)
1583 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001584
Peng Fan3ca37122016-05-03 21:50:30 +08001585 /*
1586 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1587 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1588 * bit is only present in MMU-500r2 onwards.
1589 */
1590 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1591 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1592 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1593 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1594 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1595 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1596 }
1597
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001598 /* Make sure all context banks are disabled and clear CB_FSR */
1599 for (i = 0; i < smmu->num_context_banks; ++i) {
1600 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1601 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1602 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001603 /*
1604 * Disable MMU-500's not-particularly-beneficial next-page
1605 * prefetcher for the sake of errata #841119 and #826419.
1606 */
1607 if (smmu->model == ARM_MMU500) {
1608 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1609 reg &= ~ARM_MMU500_ACTLR_CPRE;
1610 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1611 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001612 }
Will Deacon1463fe42013-07-31 19:21:27 +01001613
Will Deacon45ae7cf2013-06-24 18:31:25 +01001614 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001615 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1616 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1617
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001618 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001619
Will Deacon45ae7cf2013-06-24 18:31:25 +01001620 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001621 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001622
1623 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001624 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001625
Robin Murphy25a1c962016-02-10 14:25:33 +00001626 /* Enable client access, handling unmatched streams as appropriate */
1627 reg &= ~sCR0_CLIENTPD;
1628 if (disable_bypass)
1629 reg |= sCR0_USFCFG;
1630 else
1631 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001632
1633 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001634 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001635
1636 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001637 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001638
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001639 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1640 reg |= sCR0_VMID16EN;
1641
Will Deacon45ae7cf2013-06-24 18:31:25 +01001642 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001643 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001644 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001645}
1646
1647static int arm_smmu_id_size_to_bits(int size)
1648{
1649 switch (size) {
1650 case 0:
1651 return 32;
1652 case 1:
1653 return 36;
1654 case 2:
1655 return 40;
1656 case 3:
1657 return 42;
1658 case 4:
1659 return 44;
1660 case 5:
1661 default:
1662 return 48;
1663 }
1664}
1665
1666static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1667{
1668 unsigned long size;
1669 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1670 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001671 bool cttw_dt, cttw_reg;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001672 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001673
1674 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001675 dev_notice(smmu->dev, "SMMUv%d with:\n",
1676 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001677
1678 /* ID0 */
1679 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001680
1681 /* Restrict available stages based on module parameter */
1682 if (force_stage == 1)
1683 id &= ~(ID0_S2TS | ID0_NTS);
1684 else if (force_stage == 2)
1685 id &= ~(ID0_S1TS | ID0_NTS);
1686
Will Deacon45ae7cf2013-06-24 18:31:25 +01001687 if (id & ID0_S1TS) {
1688 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1689 dev_notice(smmu->dev, "\tstage 1 translation\n");
1690 }
1691
1692 if (id & ID0_S2TS) {
1693 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1694 dev_notice(smmu->dev, "\tstage 2 translation\n");
1695 }
1696
1697 if (id & ID0_NTS) {
1698 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1699 dev_notice(smmu->dev, "\tnested translation\n");
1700 }
1701
1702 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001703 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001704 dev_err(smmu->dev, "\tno translation support!\n");
1705 return -ENODEV;
1706 }
1707
Robin Murphyb7862e32016-04-13 18:13:03 +01001708 if ((id & ID0_S1TS) &&
1709 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001710 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1711 dev_notice(smmu->dev, "\taddress translation ops\n");
1712 }
1713
Robin Murphybae2c2d2015-07-29 19:46:05 +01001714 /*
1715 * In order for DMA API calls to work properly, we must defer to what
1716 * the DT says about coherency, regardless of what the hardware claims.
1717 * Fortunately, this also opens up a workaround for systems where the
1718 * ID register value has ended up configured incorrectly.
1719 */
1720 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1721 cttw_reg = !!(id & ID0_CTTW);
1722 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001723 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001724 if (cttw_dt || cttw_reg)
1725 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1726 cttw_dt ? "" : "non-");
1727 if (cttw_dt != cttw_reg)
1728 dev_notice(smmu->dev,
1729 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001730
Robin Murphy21174242016-09-12 17:13:48 +01001731 /* Max. number of entries we have for stream matching/indexing */
1732 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1733 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001734 if (id & ID0_SMS) {
Robin Murphy21174242016-09-12 17:13:48 +01001735 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001736
1737 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001738 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1739 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001740 dev_err(smmu->dev,
1741 "stream-matching supported, but no SMRs present!\n");
1742 return -ENODEV;
1743 }
1744
Robin Murphy21174242016-09-12 17:13:48 +01001745 /*
1746 * SMR.ID bits may not be preserved if the corresponding MASK
1747 * bits are set, so check each one separately. We can reject
1748 * masters later if they try to claim IDs outside these masks.
1749 */
1750 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001751 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1752 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Robin Murphy21174242016-09-12 17:13:48 +01001753 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001754
Robin Murphy21174242016-09-12 17:13:48 +01001755 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1756 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1757 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1758 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001760 /* Zero-initialised to mark as invalid */
1761 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1762 GFP_KERNEL);
1763 if (!smmu->smrs)
1764 return -ENOMEM;
1765
Will Deacon45ae7cf2013-06-24 18:31:25 +01001766 dev_notice(smmu->dev,
Robin Murphy21174242016-09-12 17:13:48 +01001767 "\tstream matching with %lu register groups, mask 0x%x",
1768 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001769 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001770 /* s2cr->type == 0 means translation, so initialise explicitly */
1771 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1772 GFP_KERNEL);
1773 if (!smmu->s2crs)
1774 return -ENOMEM;
1775 for (i = 0; i < size; i++)
1776 smmu->s2crs[i] = s2cr_init_val;
1777
Robin Murphy21174242016-09-12 17:13:48 +01001778 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001779 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001780
Robin Murphy7602b872016-04-28 17:12:09 +01001781 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1782 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1783 if (!(id & ID0_PTFS_NO_AARCH32S))
1784 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1785 }
1786
Will Deacon45ae7cf2013-06-24 18:31:25 +01001787 /* ID1 */
1788 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001789 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001790
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001791 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001792 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001793 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001794 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001795 dev_warn(smmu->dev,
1796 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1797 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001798
Will Deacon518f7132014-11-14 17:17:54 +00001799 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001800 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1801 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1802 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1803 return -ENODEV;
1804 }
1805 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1806 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001807 /*
1808 * Cavium CN88xx erratum #27704.
1809 * Ensure ASID and VMID allocation is unique across all SMMUs in
1810 * the system.
1811 */
1812 if (smmu->model == CAVIUM_SMMUV2) {
1813 smmu->cavium_id_base =
1814 atomic_add_return(smmu->num_context_banks,
1815 &cavium_smmu_context_count);
1816 smmu->cavium_id_base -= smmu->num_context_banks;
1817 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001818
1819 /* ID2 */
1820 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1821 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001822 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823
Will Deacon518f7132014-11-14 17:17:54 +00001824 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001825 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001826 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001827
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001828 if (id & ID2_VMID16)
1829 smmu->features |= ARM_SMMU_FEAT_VMID16;
1830
Robin Murphyf1d84542015-03-04 16:41:05 +00001831 /*
1832 * What the page table walker can address actually depends on which
1833 * descriptor format is in use, but since a) we don't know that yet,
1834 * and b) it can vary per context bank, this will have to do...
1835 */
1836 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1837 dev_warn(smmu->dev,
1838 "failed to set DMA mask for table walker\n");
1839
Robin Murphyb7862e32016-04-13 18:13:03 +01001840 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001841 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001842 if (smmu->version == ARM_SMMU_V1_64K)
1843 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001844 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001845 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001846 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001847 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001848 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001849 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001850 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001851 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001852 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001853 }
1854
Robin Murphy7602b872016-04-28 17:12:09 +01001855 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001856 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001857 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001858 if (smmu->features &
1859 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001860 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001861 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001862 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001863 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001864 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001865
Robin Murphyd5466352016-05-09 17:20:09 +01001866 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1867 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1868 else
1869 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1870 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1871 smmu->pgsize_bitmap);
1872
Will Deacon518f7132014-11-14 17:17:54 +00001873
Will Deacon28d60072014-09-01 16:24:48 +01001874 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1875 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001876 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001877
1878 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1879 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001880 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001881
Will Deacon45ae7cf2013-06-24 18:31:25 +01001882 return 0;
1883}
1884
Robin Murphy67b65a32016-04-13 18:12:57 +01001885struct arm_smmu_match_data {
1886 enum arm_smmu_arch_version version;
1887 enum arm_smmu_implementation model;
1888};
1889
1890#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1891static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1892
1893ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1894ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001895ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001896ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001897ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Vivek Gautam98e089c2018-12-04 11:52:13 +05301898ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001899
Joerg Roedel09b52692014-10-02 12:24:45 +02001900static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001901 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1902 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1903 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001904 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001905 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001906 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Vivek Gautam98e089c2018-12-04 11:52:13 +05301907 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001908 { },
1909};
1910MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1911
Will Deacon45ae7cf2013-06-24 18:31:25 +01001912static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1913{
Robin Murphy67b65a32016-04-13 18:12:57 +01001914 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001915 struct resource *res;
1916 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001917 struct device *dev = &pdev->dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001918 int num_irqs, i, err;
Robin Murphy021bb842016-09-14 15:26:46 +01001919 bool legacy_binding;
1920
1921 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
1922 if (legacy_binding && !using_generic_binding) {
1923 if (!using_legacy_binding)
1924 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
1925 using_legacy_binding = true;
1926 } else if (!legacy_binding && !using_legacy_binding) {
1927 using_generic_binding = true;
1928 } else {
1929 dev_err(dev, "not probing due to mismatched DT properties\n");
1930 return -ENODEV;
1931 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001932
1933 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1934 if (!smmu) {
1935 dev_err(dev, "failed to allocate arm_smmu_device\n");
1936 return -ENOMEM;
1937 }
1938 smmu->dev = dev;
1939
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001940 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01001941 smmu->version = data->version;
1942 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001943
Will Deacon45ae7cf2013-06-24 18:31:25 +01001944 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001945 smmu->base = devm_ioremap_resource(dev, res);
1946 if (IS_ERR(smmu->base))
1947 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001948 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001949
1950 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1951 &smmu->num_global_irqs)) {
1952 dev_err(dev, "missing #global-interrupts property\n");
1953 return -ENODEV;
1954 }
1955
1956 num_irqs = 0;
1957 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1958 num_irqs++;
1959 if (num_irqs > smmu->num_global_irqs)
1960 smmu->num_context_irqs++;
1961 }
1962
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001963 if (!smmu->num_context_irqs) {
1964 dev_err(dev, "found %d interrupts but expected at least %d\n",
1965 num_irqs, smmu->num_global_irqs + 1);
1966 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001967 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001968
1969 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1970 GFP_KERNEL);
1971 if (!smmu->irqs) {
1972 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1973 return -ENOMEM;
1974 }
1975
1976 for (i = 0; i < num_irqs; ++i) {
1977 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001978
Will Deacon45ae7cf2013-06-24 18:31:25 +01001979 if (irq < 0) {
1980 dev_err(dev, "failed to get irq index %d\n", i);
1981 return -ENODEV;
1982 }
1983 smmu->irqs[i] = irq;
1984 }
1985
Olav Haugan3c8766d2014-08-22 17:12:32 -07001986 err = arm_smmu_device_cfg_probe(smmu);
1987 if (err)
1988 return err;
1989
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001990 parse_driver_options(smmu);
1991
Robin Murphyb7862e32016-04-13 18:13:03 +01001992 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001993 smmu->num_context_banks != smmu->num_context_irqs) {
1994 dev_err(dev,
1995 "found only %d context interrupt(s) but %d required\n",
1996 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01001997 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001998 }
1999
Will Deacon45ae7cf2013-06-24 18:31:25 +01002000 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002001 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2002 arm_smmu_global_fault,
2003 IRQF_SHARED,
2004 "arm-smmu global fault",
2005 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002006 if (err) {
2007 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2008 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002009 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002010 }
2011 }
2012
Robin Murphyadfec2e2016-09-12 17:13:55 +01002013 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002014 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002015 arm_smmu_device_reset(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002016
2017 /* Oh, for a proper bus abstraction */
2018 if (!iommu_present(&platform_bus_type))
2019 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2020#ifdef CONFIG_ARM_AMBA
2021 if (!iommu_present(&amba_bustype))
2022 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2023#endif
2024#ifdef CONFIG_PCI
2025 if (!iommu_present(&pci_bus_type)) {
2026 pci_request_acs();
2027 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2028 }
2029#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002030 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002031}
2032
2033static int arm_smmu_device_remove(struct platform_device *pdev)
2034{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002035 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002036
2037 if (!smmu)
2038 return -ENODEV;
2039
Will Deaconecfadb62013-07-31 19:21:28 +01002040 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002041 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002042
Will Deacon45ae7cf2013-06-24 18:31:25 +01002043 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002044 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002045 return 0;
2046}
2047
Will Deacon45ae7cf2013-06-24 18:31:25 +01002048static struct platform_driver arm_smmu_driver = {
2049 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002050 .name = "arm-smmu",
2051 .of_match_table = of_match_ptr(arm_smmu_of_match),
2052 },
2053 .probe = arm_smmu_device_dt_probe,
2054 .remove = arm_smmu_device_remove,
2055};
2056
2057static int __init arm_smmu_init(void)
2058{
Robin Murphy021bb842016-09-14 15:26:46 +01002059 static bool registered;
2060 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002061
Robin Murphy021bb842016-09-14 15:26:46 +01002062 if (!registered) {
2063 ret = platform_driver_register(&arm_smmu_driver);
2064 registered = !ret;
Wei Chen112c8982016-06-13 17:20:17 +08002065 }
Robin Murphy021bb842016-09-14 15:26:46 +01002066 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002067}
2068
2069static void __exit arm_smmu_exit(void)
2070{
2071 return platform_driver_unregister(&arm_smmu_driver);
2072}
2073
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002074subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002075module_exit(arm_smmu_exit);
2076
Robin Murphy021bb842016-09-14 15:26:46 +01002077static int __init arm_smmu_of_init(struct device_node *np)
2078{
2079 int ret = arm_smmu_init();
2080
2081 if (ret)
2082 return ret;
2083
2084 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2085 return -ENODEV;
2086
2087 return 0;
2088}
2089IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
2090IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
2091IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
2092IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
2093IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
2094IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
2095
Will Deacon45ae7cf2013-06-24 18:31:25 +01002096MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2097MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2098MODULE_LICENSE("GPL v2");