blob: 86ef4689466cd5d59a54b00573a68af49c3b2f3d [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070050#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070051#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070052#include <linux/msm-bus.h>
53#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55#include <linux/amba/bus.h>
56
Will Deacon518f7132014-11-14 17:17:54 +000057#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* Maximum number of context banks per SMMU */
60#define ARM_SMMU_MAX_CBS 128
61
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* SMMU global address space */
63#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010064#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000066/*
67 * SMMU global address space with conditional offset to access secure
68 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 * nsGFSYNR0: 0x450)
70 */
71#define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu)->base + \
73 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
74 ? 0x400 : 0))
75
Robin Murphyf9a05f02016-04-13 18:13:01 +010076/*
77 * Some 64-bit registers only make sense to write atomically, but in such
78 * cases all the data relevant to AArch32 formats lies within the lower word,
79 * therefore this actually makes more sense than it might first appear.
80 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#endif
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087/* Configuration registers */
88#define ARM_SMMU_GR0_sCR0 0x0
89#define sCR0_CLIENTPD (1 << 0)
90#define sCR0_GFRE (1 << 1)
91#define sCR0_GFIE (1 << 2)
92#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100131#define ID0_NUMSMRG_SHIFT 0
132#define ID0_NUMSMRG_MASK 0xff
133
134#define ID1_PAGESIZE (1 << 31)
135#define ID1_NUMPAGENDXB_SHIFT 28
136#define ID1_NUMPAGENDXB_MASK 7
137#define ID1_NUMS2CB_SHIFT 16
138#define ID1_NUMS2CB_MASK 0xff
139#define ID1_NUMCB_SHIFT 0
140#define ID1_NUMCB_MASK 0xff
141
142#define ID2_OAS_SHIFT 4
143#define ID2_OAS_MASK 0xf
144#define ID2_IAS_SHIFT 0
145#define ID2_IAS_MASK 0xf
146#define ID2_UBS_SHIFT 8
147#define ID2_UBS_MASK 0xf
148#define ID2_PTFS_4K (1 << 12)
149#define ID2_PTFS_16K (1 << 13)
150#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800151#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152
Peng Fan3ca37122016-05-03 21:50:30 +0800153#define ID7_MAJOR_SHIFT 4
154#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157#define ARM_SMMU_GR0_TLBIVMID 0x64
158#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
159#define ARM_SMMU_GR0_TLBIALLH 0x6c
160#define ARM_SMMU_GR0_sTLBGSYNC 0x70
161#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
162#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800163#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
165/* Stream mapping registers */
166#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
167#define SMR_VALID (1 << 31)
168#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100169#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170
171#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
172#define S2CR_CBNDX_SHIFT 0
173#define S2CR_CBNDX_MASK 0xff
174#define S2CR_TYPE_SHIFT 16
175#define S2CR_TYPE_MASK 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100176enum arm_smmu_s2cr_type {
177 S2CR_TYPE_TRANS,
178 S2CR_TYPE_BYPASS,
179 S2CR_TYPE_FAULT,
180};
181
182#define S2CR_PRIVCFG_SHIFT 24
183#define S2CR_PRIVCFG_MASK 0x3
184enum arm_smmu_s2cr_privcfg {
185 S2CR_PRIVCFG_DEFAULT,
186 S2CR_PRIVCFG_DIPAN,
187 S2CR_PRIVCFG_UNPRIV,
188 S2CR_PRIVCFG_PRIV,
189};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100190
191/* Context bank attribute registers */
192#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
193#define CBAR_VMID_SHIFT 0
194#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000195#define CBAR_S1_BPSHCFG_SHIFT 8
196#define CBAR_S1_BPSHCFG_MASK 3
197#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100198#define CBAR_S1_MEMATTR_SHIFT 12
199#define CBAR_S1_MEMATTR_MASK 0xf
200#define CBAR_S1_MEMATTR_WB 0xf
201#define CBAR_TYPE_SHIFT 16
202#define CBAR_TYPE_MASK 0x3
203#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
204#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
205#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
206#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
207#define CBAR_IRPTNDX_SHIFT 24
208#define CBAR_IRPTNDX_MASK 0xff
209
Shalaj Jain04059c52015-03-03 13:34:59 -0800210#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
211#define CBFRSYNRA_SID_MASK (0xffff)
212
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
214#define CBA2R_RW64_32BIT (0 << 0)
215#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800216#define CBA2R_VMID_SHIFT 16
217#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218
219/* Translation context bank */
220#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100221#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222
223#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100224#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_RESUME 0x8
226#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100227#define ARM_SMMU_CB_TTBR0 0x20
228#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600230#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100231#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100233#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100234#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700235#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100236#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000238#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100239#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000240#define ARM_SMMU_CB_S1_TLBIVAL 0x620
241#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
242#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700243#define ARM_SMMU_CB_TLBSYNC 0x7f0
244#define ARM_SMMU_CB_TLBSTATUS 0x7f4
245#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100246#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000247#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248
249#define SCTLR_S1_ASIDPNE (1 << 12)
250#define SCTLR_CFCFG (1 << 7)
251#define SCTLR_CFIE (1 << 6)
252#define SCTLR_CFRE (1 << 5)
253#define SCTLR_E (1 << 4)
254#define SCTLR_AFE (1 << 2)
255#define SCTLR_TRE (1 << 1)
256#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100258#define ARM_MMU500_ACTLR_CPRE (1 << 1)
259
Peng Fan3ca37122016-05-03 21:50:30 +0800260#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
261
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700262#define ARM_SMMU_IMPL_DEF0(smmu) \
263 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
264#define ARM_SMMU_IMPL_DEF1(smmu) \
265 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000266#define CB_PAR_F (1 << 0)
267
268#define ATSR_ACTIVE (1 << 0)
269
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270#define RESUME_RETRY (0 << 0)
271#define RESUME_TERMINATE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100274#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100276#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
278#define FSR_MULTI (1 << 31)
279#define FSR_SS (1 << 30)
280#define FSR_UUT (1 << 8)
281#define FSR_ASF (1 << 7)
282#define FSR_TLBLKF (1 << 6)
283#define FSR_TLBMCF (1 << 5)
284#define FSR_EF (1 << 4)
285#define FSR_PF (1 << 3)
286#define FSR_AFF (1 << 2)
287#define FSR_TF (1 << 1)
288
Mitchel Humpherys29073202014-07-08 09:52:18 -0700289#define FSR_IGN (FSR_AFF | FSR_ASF | \
290 FSR_TLBMCF | FSR_TLBLKF)
291#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100292 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293
294#define FSYNR0_WNR (1 << 4)
295
Will Deacon4cf740b2014-07-14 19:47:39 +0100296static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000297module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100298MODULE_PARM_DESC(force_stage,
299 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800300static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000301module_param(disable_bypass, bool, S_IRUGO);
302MODULE_PARM_DESC(disable_bypass,
303 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100304
Robin Murphy09360402014-08-28 17:51:59 +0100305enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100306 ARM_SMMU_V1,
307 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100308 ARM_SMMU_V2,
309};
310
Robin Murphy67b65a32016-04-13 18:12:57 +0100311enum arm_smmu_implementation {
312 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100313 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100314 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700315 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700316 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100317};
318
Patrick Dalyd7476202016-09-08 18:23:28 -0700319struct arm_smmu_device;
320struct arm_smmu_arch_ops {
321 int (*init)(struct arm_smmu_device *smmu);
322 void (*device_reset)(struct arm_smmu_device *smmu);
323 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
324 dma_addr_t iova);
Patrick Dalyd7476202016-09-08 18:23:28 -0700325};
326
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700327struct arm_smmu_impl_def_reg {
328 u32 offset;
329 u32 value;
330};
331
Robin Murphya754fd12016-09-12 17:13:50 +0100332struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100333 struct iommu_group *group;
334 int count;
Robin Murphya754fd12016-09-12 17:13:50 +0100335 enum arm_smmu_s2cr_type type;
336 enum arm_smmu_s2cr_privcfg privcfg;
337 u8 cbndx;
338};
339
340#define s2cr_init_val (struct arm_smmu_s2cr){ \
341 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
342}
343
Will Deacon45ae7cf2013-06-24 18:31:25 +0100344struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100345 u16 mask;
346 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100347 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100348};
349
Will Deacona9a1b0b2014-05-01 18:05:08 +0100350struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100351 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100352 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100353};
Robin Murphy468f4942016-09-12 17:13:49 +0100354#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100355#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
356#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000357#define fwspec_smendx(fw, i) \
358 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100359#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000360 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100361
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700362/*
363 * Describes resources required for on/off power operation.
364 * Separate reference count is provided for atomic/nonatomic
365 * operations.
366 */
367struct arm_smmu_power_resources {
368 struct platform_device *pdev;
369 struct device *dev;
370
371 struct clk **clocks;
372 int num_clocks;
373
374 struct regulator_bulk_data *gdscs;
375 int num_gdscs;
376
377 uint32_t bus_client;
378 struct msm_bus_scale_pdata *bus_dt_data;
379
380 /* Protects power_count */
381 struct mutex power_lock;
382 int power_count;
383
384 /* Protects clock_refs_count */
385 spinlock_t clock_refs_lock;
386 int clock_refs_count;
387};
388
Will Deacon45ae7cf2013-06-24 18:31:25 +0100389struct arm_smmu_device {
390 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100391
392 void __iomem *base;
393 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100394 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100395
396#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
397#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
398#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
399#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
400#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000401#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800402#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100403#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
404#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
405#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
406#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
407#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100408 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000409
410#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800411#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800412#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700413#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000414 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100415 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100416 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100417
418 u32 num_context_banks;
419 u32 num_s2_context_banks;
420 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
421 atomic_t irptndx;
422
423 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100424 u16 streamid_mask;
425 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100426 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100427 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100428 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100429
Will Deacon518f7132014-11-14 17:17:54 +0000430 unsigned long va_size;
431 unsigned long ipa_size;
432 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100433 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100434
435 u32 num_global_irqs;
436 u32 num_context_irqs;
437 unsigned int *irqs;
438
Patrick Daly8e3371a2017-02-13 22:14:53 -0800439 struct list_head list;
440
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800441 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700442 /* Specific to QCOM */
443 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
444 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800445
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700446 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700447
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800448 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700449
450 /* protects idr */
451 struct mutex idr_mutex;
452 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700453
454 struct arm_smmu_arch_ops *arch_ops;
455 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100456};
457
Robin Murphy7602b872016-04-28 17:12:09 +0100458enum arm_smmu_context_fmt {
459 ARM_SMMU_CTX_FMT_NONE,
460 ARM_SMMU_CTX_FMT_AARCH64,
461 ARM_SMMU_CTX_FMT_AARCH32_L,
462 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100463};
464
465struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100466 u8 cbndx;
467 u8 irptndx;
468 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600469 u32 procid;
470 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100471 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100472};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100473#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600474#define INVALID_CBNDX 0xff
475#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700476/*
477 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
478 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
479 */
480#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100481
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600482#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800483#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100484
Will Deaconc752ce42014-06-25 22:46:31 +0100485enum arm_smmu_domain_stage {
486 ARM_SMMU_DOMAIN_S1 = 0,
487 ARM_SMMU_DOMAIN_S2,
488 ARM_SMMU_DOMAIN_NESTED,
489};
490
Patrick Dalyc11d1082016-09-01 15:52:44 -0700491struct arm_smmu_pte_info {
492 void *virt_addr;
493 size_t size;
494 struct list_head entry;
495};
496
Will Deacon45ae7cf2013-06-24 18:31:25 +0100497struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100498 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800499 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000500 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700501 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000502 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100503 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100504 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000505 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700506 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700507 u32 secure_vmid;
508 struct list_head pte_info_list;
509 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700510 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700511 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100512 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100513};
514
Patrick Daly8e3371a2017-02-13 22:14:53 -0800515static DEFINE_SPINLOCK(arm_smmu_devices_lock);
516static LIST_HEAD(arm_smmu_devices);
517
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000518struct arm_smmu_option_prop {
519 u32 opt;
520 const char *prop;
521};
522
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800523static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
524
Robin Murphy7e96c742016-09-14 15:26:46 +0100525static bool using_legacy_binding, using_generic_binding;
526
Mitchel Humpherys29073202014-07-08 09:52:18 -0700527static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000528 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800529 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800530 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700531 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000532 { 0, NULL},
533};
534
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800535static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
536 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700537static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
538 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600539static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800540
Patrick Dalyc11d1082016-09-01 15:52:44 -0700541static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
542static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700543static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700544static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
545
Patrick Dalyd7476202016-09-08 18:23:28 -0700546static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
547static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
548
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800549static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
550
Joerg Roedel1d672632015-03-26 13:43:10 +0100551static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
552{
553 return container_of(dom, struct arm_smmu_domain, domain);
554}
555
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000556static void parse_driver_options(struct arm_smmu_device *smmu)
557{
558 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700559
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000560 do {
561 if (of_property_read_bool(smmu->dev->of_node,
562 arm_smmu_options[i].prop)) {
563 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700564 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000565 arm_smmu_options[i].prop);
566 }
567 } while (arm_smmu_options[++i].opt);
568}
569
Patrick Dalyc190d932016-08-30 17:23:28 -0700570static bool is_dynamic_domain(struct iommu_domain *domain)
571{
572 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
573
574 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
575}
576
Liam Mark53cf2342016-12-20 11:36:07 -0800577static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
578{
579 if (smmu_domain->attributes &
580 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
581 return true;
582 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
583 return smmu_domain->smmu->dev->archdata.dma_coherent;
584 else
585 return false;
586}
587
Patrick Dalye271f212016-10-04 13:24:49 -0700588static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
589{
590 return (smmu_domain->secure_vmid != VMID_INVAL);
591}
592
593static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
594{
595 if (arm_smmu_is_domain_secure(smmu_domain))
596 mutex_lock(&smmu_domain->assign_lock);
597}
598
599static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
600{
601 if (arm_smmu_is_domain_secure(smmu_domain))
602 mutex_unlock(&smmu_domain->assign_lock);
603}
604
Will Deacon8f68f8e2014-07-15 11:27:08 +0100605static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100606{
607 if (dev_is_pci(dev)) {
608 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700609
Will Deacona9a1b0b2014-05-01 18:05:08 +0100610 while (!pci_is_root_bus(bus))
611 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100612 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100613 }
614
Robin Murphyd5b41782016-09-14 15:21:39 +0100615 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100616}
617
Robin Murphyd5b41782016-09-14 15:21:39 +0100618static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100619{
Robin Murphyd5b41782016-09-14 15:21:39 +0100620 *((__be32 *)data) = cpu_to_be32(alias);
621 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100622}
623
Robin Murphyd5b41782016-09-14 15:21:39 +0100624static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100625{
Robin Murphyd5b41782016-09-14 15:21:39 +0100626 struct of_phandle_iterator *it = *(void **)data;
627 struct device_node *np = it->node;
628 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100629
Robin Murphyd5b41782016-09-14 15:21:39 +0100630 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
631 "#stream-id-cells", 0)
632 if (it->node == np) {
633 *(void **)data = dev;
634 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700635 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100636 it->node = np;
637 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100638}
639
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100640static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100641static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100642
Robin Murphy06e393e2016-09-12 17:13:55 +0100643static int arm_smmu_register_legacy_master(struct device *dev,
644 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100645{
Robin Murphy06e393e2016-09-12 17:13:55 +0100646 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100647 struct device_node *np;
648 struct of_phandle_iterator it;
649 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100650 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100651 __be32 pci_sid;
652 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100653
Stephen Boydfecdeef2017-03-01 16:53:19 -0800654 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100655 np = dev_get_dev_node(dev);
656 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
657 of_node_put(np);
658 return -ENODEV;
659 }
660
661 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100662 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
663 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100664 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100665 of_node_put(np);
666 if (err == 0)
667 return -ENODEV;
668 if (err < 0)
669 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100670
Robin Murphyd5b41782016-09-14 15:21:39 +0100671 if (dev_is_pci(dev)) {
672 /* "mmu-masters" assumes Stream ID == Requester ID */
673 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
674 &pci_sid);
675 it.cur = &pci_sid;
676 it.cur_count = 1;
677 }
678
Robin Murphy06e393e2016-09-12 17:13:55 +0100679 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
680 &arm_smmu_ops);
681 if (err)
682 return err;
683
684 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
685 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100686 return -ENOMEM;
687
Robin Murphy06e393e2016-09-12 17:13:55 +0100688 *smmu = dev_get_drvdata(smmu_dev);
689 of_phandle_iterator_args(&it, sids, it.cur_count);
690 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
691 kfree(sids);
692 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100693}
694
695static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
696{
697 int idx;
698
699 do {
700 idx = find_next_zero_bit(map, end, start);
701 if (idx == end)
702 return -ENOSPC;
703 } while (test_and_set_bit(idx, map));
704
705 return idx;
706}
707
708static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
709{
710 clear_bit(idx, map);
711}
712
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700713static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700714{
715 int i, ret = 0;
716
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700717 for (i = 0; i < pwr->num_clocks; ++i) {
718 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700719 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700720 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700721 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700722 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700723 break;
724 }
725 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700726 return ret;
727}
728
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700729static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700730{
731 int i;
732
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700733 for (i = pwr->num_clocks; i; --i)
734 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700735}
736
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700737static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700738{
739 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700740
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700741 for (i = 0; i < pwr->num_clocks; ++i) {
742 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700743 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700744 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700745 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700746 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700747 break;
748 }
749 }
750
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700751 return ret;
752}
Patrick Daly8befb662016-08-17 20:03:28 -0700753
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700754static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
755{
756 int i;
757
758 for (i = pwr->num_clocks; i; --i)
759 clk_disable(pwr->clocks[i - 1]);
760}
761
762static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
763{
764 if (!pwr->bus_client)
765 return 0;
766 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
767}
768
769static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
770{
771 if (!pwr->bus_client)
772 return;
773 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
774}
775
776/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
777static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
778{
779 int ret = 0;
780 unsigned long flags;
781
782 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
783 if (pwr->clock_refs_count > 0) {
784 pwr->clock_refs_count++;
785 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
786 return 0;
787 }
788
789 ret = arm_smmu_enable_clocks(pwr);
790 if (!ret)
791 pwr->clock_refs_count = 1;
792
793 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700794 return ret;
795}
796
797/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700798static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700799{
Patrick Daly8befb662016-08-17 20:03:28 -0700800 unsigned long flags;
801
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700802 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
803 if (pwr->clock_refs_count == 0) {
804 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
805 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
806 return;
807
808 } else if (pwr->clock_refs_count > 1) {
809 pwr->clock_refs_count--;
810 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700811 return;
812 }
813
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700814 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700815
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700816 pwr->clock_refs_count = 0;
817 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700818}
819
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700820static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700821{
822 int ret;
823
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700824 mutex_lock(&pwr->power_lock);
825 if (pwr->power_count > 0) {
826 pwr->power_count += 1;
827 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700828 return 0;
829 }
830
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700831 ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700832 if (ret)
833 goto out_unlock;
834
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700835 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700836 if (ret)
837 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700838
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700839 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700840 if (ret)
841 goto out_disable_bus;
842
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700843 pwr->power_count = 1;
844 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700845 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700846
Patrick Daly2764f952016-09-06 19:22:44 -0700847out_disable_bus:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700848 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700849out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700850 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700851out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700852 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700853 return ret;
854}
855
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700856static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700857{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700858 mutex_lock(&pwr->power_lock);
859 if (pwr->power_count == 0) {
860 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
861 mutex_unlock(&pwr->power_lock);
862 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700863
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700864 } else if (pwr->power_count > 1) {
865 pwr->power_count--;
866 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700867 return;
868 }
869
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700870 arm_smmu_unprepare_clocks(pwr);
871 arm_smmu_unrequest_bus(pwr);
872 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700873
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700874 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700875}
876
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700877static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700878{
879 int ret;
880
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700881 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700882 if (ret)
883 return ret;
884
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700885 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700886 if (ret)
887 goto out_disable;
888
889 return 0;
890
891out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700892 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700893 return ret;
894}
895
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700896static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700897{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700898 arm_smmu_power_off_atomic(pwr);
899 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700900}
901
902/*
903 * Must be used instead of arm_smmu_power_on if it may be called from
904 * atomic context
905 */
906static int arm_smmu_domain_power_on(struct iommu_domain *domain,
907 struct arm_smmu_device *smmu)
908{
909 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
910 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
911
912 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700913 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700914
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700915 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700916}
917
918/*
919 * Must be used instead of arm_smmu_power_on if it may be called from
920 * atomic context
921 */
922static void arm_smmu_domain_power_off(struct iommu_domain *domain,
923 struct arm_smmu_device *smmu)
924{
925 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
926 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
927
928 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700929 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700930 return;
931 }
932
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700933 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700934}
935
Will Deacon45ae7cf2013-06-24 18:31:25 +0100936/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700937static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
938 int cbndx)
939{
940 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
941 u32 val;
942
943 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
944 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
945 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700946 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700947 dev_err(smmu->dev, "TLBSYNC timeout!\n");
948}
949
Will Deacon518f7132014-11-14 17:17:54 +0000950static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100951{
952 int count = 0;
953 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
954
955 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
956 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
957 & sTLBGSTATUS_GSACTIVE) {
958 cpu_relax();
959 if (++count == TLB_LOOP_TIMEOUT) {
960 dev_err_ratelimited(smmu->dev,
961 "TLB sync timed out -- SMMU may be deadlocked\n");
962 return;
963 }
964 udelay(1);
965 }
966}
967
Will Deacon518f7132014-11-14 17:17:54 +0000968static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100969{
Will Deacon518f7132014-11-14 17:17:54 +0000970 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700971 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000972}
973
Patrick Daly8befb662016-08-17 20:03:28 -0700974/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000975static void arm_smmu_tlb_inv_context(void *cookie)
976{
977 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100978 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
979 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100980 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000981 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100982
983 if (stage1) {
984 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800985 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100986 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700987 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100988 } else {
989 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800990 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100991 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700992 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100993 }
Will Deacon1463fe42013-07-31 19:21:27 +0100994}
995
Will Deacon518f7132014-11-14 17:17:54 +0000996static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000997 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000998{
999 struct arm_smmu_domain *smmu_domain = cookie;
1000 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1001 struct arm_smmu_device *smmu = smmu_domain->smmu;
1002 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1003 void __iomem *reg;
1004
1005 if (stage1) {
1006 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1007 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1008
Robin Murphy7602b872016-04-28 17:12:09 +01001009 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001010 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001011 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001012 do {
1013 writel_relaxed(iova, reg);
1014 iova += granule;
1015 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001016 } else {
1017 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001018 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001019 do {
1020 writeq_relaxed(iova, reg);
1021 iova += granule >> 12;
1022 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001023 }
Will Deacon518f7132014-11-14 17:17:54 +00001024 } else if (smmu->version == ARM_SMMU_V2) {
1025 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1026 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1027 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001028 iova >>= 12;
1029 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001030 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001031 iova += granule >> 12;
1032 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001033 } else {
1034 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001035 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001036 }
1037}
1038
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001039struct arm_smmu_secure_pool_chunk {
1040 void *addr;
1041 size_t size;
1042 struct list_head list;
1043};
1044
1045static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1046 size_t size)
1047{
1048 struct arm_smmu_secure_pool_chunk *it;
1049
1050 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1051 if (it->size == size) {
1052 void *addr = it->addr;
1053
1054 list_del(&it->list);
1055 kfree(it);
1056 return addr;
1057 }
1058 }
1059
1060 return NULL;
1061}
1062
1063static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1064 void *addr, size_t size)
1065{
1066 struct arm_smmu_secure_pool_chunk *chunk;
1067
1068 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1069 if (!chunk)
1070 return -ENOMEM;
1071
1072 chunk->addr = addr;
1073 chunk->size = size;
1074 memset(addr, 0, size);
1075 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1076
1077 return 0;
1078}
1079
1080static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1081{
1082 struct arm_smmu_secure_pool_chunk *it, *i;
1083
1084 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1085 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1086 /* pages will be freed later (after being unassigned) */
1087 kfree(it);
1088 }
1089}
1090
Patrick Dalyc11d1082016-09-01 15:52:44 -07001091static void *arm_smmu_alloc_pages_exact(void *cookie,
1092 size_t size, gfp_t gfp_mask)
1093{
1094 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001095 void *page;
1096 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001097
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001098 if (!arm_smmu_is_domain_secure(smmu_domain))
1099 return alloc_pages_exact(size, gfp_mask);
1100
1101 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1102 if (page)
1103 return page;
1104
1105 page = alloc_pages_exact(size, gfp_mask);
1106 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001107 ret = arm_smmu_prepare_pgtable(page, cookie);
1108 if (ret) {
1109 free_pages_exact(page, size);
1110 return NULL;
1111 }
1112 }
1113
1114 return page;
1115}
1116
1117static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1118{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001119 struct arm_smmu_domain *smmu_domain = cookie;
1120
1121 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1122 free_pages_exact(virt, size);
1123 return;
1124 }
1125
1126 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1127 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001128}
1129
Will Deacon518f7132014-11-14 17:17:54 +00001130static struct iommu_gather_ops arm_smmu_gather_ops = {
1131 .tlb_flush_all = arm_smmu_tlb_inv_context,
1132 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1133 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001134 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1135 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001136};
1137
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001138static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1139 dma_addr_t iova, u32 fsr)
1140{
1141 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001142 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001143 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001144 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001145
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001146 phys = arm_smmu_iova_to_phys_hard(domain, iova);
1147 arm_smmu_tlb_inv_context(smmu_domain);
1148 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001149
Patrick Dalyad441dd2016-09-15 15:50:46 -07001150 if (phys != phys_post_tlbiall) {
1151 dev_err(smmu->dev,
1152 "ATOS results differed across TLBIALL...\n"
1153 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1154 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001155
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001156 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001157}
1158
Will Deacon45ae7cf2013-06-24 18:31:25 +01001159static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1160{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001161 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001162 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001163 unsigned long iova;
1164 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001165 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001166 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1167 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001168 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001169 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001170 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001171 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001172 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001173 bool non_fatal_fault = !!(smmu_domain->attributes &
1174 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001175
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001176 static DEFINE_RATELIMIT_STATE(_rs,
1177 DEFAULT_RATELIMIT_INTERVAL,
1178 DEFAULT_RATELIMIT_BURST);
1179
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001180 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001181 if (ret)
1182 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001183
Shalaj Jain04059c52015-03-03 13:34:59 -08001184 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001185 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001186 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1187
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001188 if (!(fsr & FSR_FAULT)) {
1189 ret = IRQ_NONE;
1190 goto out_power_off;
1191 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001193 if (fatal_asf && (fsr & FSR_ASF)) {
1194 dev_err(smmu->dev,
1195 "Took an address size fault. Refusing to recover.\n");
1196 BUG();
1197 }
1198
Will Deacon45ae7cf2013-06-24 18:31:25 +01001199 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001200 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001201 if (fsr & FSR_TF)
1202 flags |= IOMMU_FAULT_TRANSLATION;
1203 if (fsr & FSR_PF)
1204 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001205 if (fsr & FSR_EF)
1206 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001207 if (fsr & FSR_SS)
1208 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001209
Robin Murphyf9a05f02016-04-13 18:13:01 +01001210 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001211 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001212 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1213 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001214 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1215 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001216 dev_dbg(smmu->dev,
1217 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1218 iova, fsr, fsynr, cfg->cbndx);
1219 dev_dbg(smmu->dev,
1220 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001221 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001222 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001223 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001224 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1225 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001226 if (__ratelimit(&_rs)) {
1227 dev_err(smmu->dev,
1228 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1229 iova, fsr, fsynr, cfg->cbndx);
1230 dev_err(smmu->dev, "FAR = %016lx\n",
1231 (unsigned long)iova);
1232 dev_err(smmu->dev,
1233 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1234 fsr,
1235 (fsr & 0x02) ? "TF " : "",
1236 (fsr & 0x04) ? "AFF " : "",
1237 (fsr & 0x08) ? "PF " : "",
1238 (fsr & 0x10) ? "EF " : "",
1239 (fsr & 0x20) ? "TLBMCF " : "",
1240 (fsr & 0x40) ? "TLBLKF " : "",
1241 (fsr & 0x80) ? "MHF " : "",
1242 (fsr & 0x40000000) ? "SS " : "",
1243 (fsr & 0x80000000) ? "MULTI " : "");
1244 dev_err(smmu->dev,
1245 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001246 if (!phys_soft)
1247 dev_err(smmu->dev,
1248 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1249 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001250 if (phys_atos)
1251 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1252 &phys_atos);
1253 else
1254 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001255 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1256 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001257 ret = IRQ_NONE;
1258 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001259 if (!non_fatal_fault) {
1260 dev_err(smmu->dev,
1261 "Unhandled arm-smmu context fault!\n");
1262 BUG();
1263 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001264 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001265
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001266 /*
1267 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1268 * if stalled. This is required to keep the IOMMU client stalled on
1269 * the outstanding fault. This gives the client a chance to take any
1270 * debug action and then terminate the stalled transaction.
1271 * So, the sequence in case of stall on fault should be:
1272 * 1) Do not clear FSR or write to RESUME here
1273 * 2) Client takes any debug action
1274 * 3) Client terminates the stalled transaction and resumes the IOMMU
1275 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1276 * not before so that the fault remains outstanding. This ensures
1277 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1278 * need to be terminated.
1279 */
1280 if (tmp != -EBUSY) {
1281 /* Clear the faulting FSR */
1282 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001283
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001284 /*
1285 * Barrier required to ensure that the FSR is cleared
1286 * before resuming SMMU operation
1287 */
1288 wmb();
1289
1290 /* Retry or terminate any stalled transactions */
1291 if (fsr & FSR_SS)
1292 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1293 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001294
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001295out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001296 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001297
Patrick Daly5ba28112016-08-30 19:18:52 -07001298 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001299}
1300
1301static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1302{
1303 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1304 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001305 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001306
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001307 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001308 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001309
Will Deacon45ae7cf2013-06-24 18:31:25 +01001310 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1311 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1312 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1313 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1314
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001315 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001316 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001317 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001318 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001319
Will Deacon45ae7cf2013-06-24 18:31:25 +01001320 dev_err_ratelimited(smmu->dev,
1321 "Unexpected global fault, this could be serious\n");
1322 dev_err_ratelimited(smmu->dev,
1323 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1324 gfsr, gfsynr0, gfsynr1, gfsynr2);
1325
1326 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001327 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001328 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001329}
1330
Will Deacon518f7132014-11-14 17:17:54 +00001331static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1332 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001333{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001334 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001335 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001336 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001337 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1338 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001339 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001340
Will Deacon45ae7cf2013-06-24 18:31:25 +01001341 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001342 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1343 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001344
Will Deacon4a1c93c2015-03-04 12:21:03 +00001345 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001346 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1347 reg = CBA2R_RW64_64BIT;
1348 else
1349 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001350 /* 16-bit VMIDs live in CBA2R */
1351 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001352 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001353
Will Deacon4a1c93c2015-03-04 12:21:03 +00001354 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1355 }
1356
Will Deacon45ae7cf2013-06-24 18:31:25 +01001357 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001358 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001359 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001360 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001361
Will Deacon57ca90f2014-02-06 14:59:05 +00001362 /*
1363 * Use the weakest shareability/memory types, so they are
1364 * overridden by the ttbcr/pte.
1365 */
1366 if (stage1) {
1367 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1368 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001369 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1370 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001371 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001372 }
Will Deacon44680ee2014-06-25 11:29:12 +01001373 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001374
Will Deacon518f7132014-11-14 17:17:54 +00001375 /* TTBRs */
1376 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001377 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001378
Robin Murphyb94df6f2016-08-11 17:44:06 +01001379 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1380 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1381 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1382 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1383 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1384 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1385 } else {
1386 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1387 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1388 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1389 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1390 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1391 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1392 }
Will Deacon518f7132014-11-14 17:17:54 +00001393 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001394 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001395 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001396 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001397
Will Deacon518f7132014-11-14 17:17:54 +00001398 /* TTBCR */
1399 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001400 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1401 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1402 reg2 = 0;
1403 } else {
1404 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1405 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1406 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001407 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001408 if (smmu->version > ARM_SMMU_V1)
1409 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001410 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001411 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001412 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001413 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001414
Will Deacon518f7132014-11-14 17:17:54 +00001415 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001416 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001417 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1418 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1419 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1420 } else {
1421 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1422 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1423 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001424 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001425 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001426 }
1427
Will Deacon45ae7cf2013-06-24 18:31:25 +01001428 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001429 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001430
1431 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1432 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1433 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001434 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001435 if (stage1)
1436 reg |= SCTLR_S1_ASIDPNE;
1437#ifdef __BIG_ENDIAN
1438 reg |= SCTLR_E;
1439#endif
Will Deacon25724842013-08-21 13:49:53 +01001440 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001441}
1442
Patrick Dalyc190d932016-08-30 17:23:28 -07001443static int arm_smmu_init_asid(struct iommu_domain *domain,
1444 struct arm_smmu_device *smmu)
1445{
1446 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1447 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1448 bool dynamic = is_dynamic_domain(domain);
1449 int ret;
1450
1451 if (!dynamic) {
1452 cfg->asid = cfg->cbndx + 1;
1453 } else {
1454 mutex_lock(&smmu->idr_mutex);
1455 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1456 smmu->num_context_banks + 2,
1457 MAX_ASID + 1, GFP_KERNEL);
1458
1459 mutex_unlock(&smmu->idr_mutex);
1460 if (ret < 0) {
1461 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1462 ret);
1463 return ret;
1464 }
1465 cfg->asid = ret;
1466 }
1467 return 0;
1468}
1469
1470static void arm_smmu_free_asid(struct iommu_domain *domain)
1471{
1472 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1473 struct arm_smmu_device *smmu = smmu_domain->smmu;
1474 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1475 bool dynamic = is_dynamic_domain(domain);
1476
1477 if (cfg->asid == INVALID_ASID || !dynamic)
1478 return;
1479
1480 mutex_lock(&smmu->idr_mutex);
1481 idr_remove(&smmu->asid_idr, cfg->asid);
1482 mutex_unlock(&smmu->idr_mutex);
1483}
1484
Will Deacon45ae7cf2013-06-24 18:31:25 +01001485static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001486 struct arm_smmu_device *smmu,
1487 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001488{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001489 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001490 unsigned long ias, oas;
1491 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001492 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001493 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001494 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001495 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001496 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001497 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001498
Will Deacon518f7132014-11-14 17:17:54 +00001499 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001500 if (smmu_domain->smmu)
1501 goto out_unlock;
1502
Patrick Dalyc190d932016-08-30 17:23:28 -07001503 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1504 smmu_domain->cfg.asid = INVALID_ASID;
1505
Patrick Dalyc190d932016-08-30 17:23:28 -07001506 dynamic = is_dynamic_domain(domain);
1507 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1508 dev_err(smmu->dev, "dynamic domains not supported\n");
1509 ret = -EPERM;
1510 goto out_unlock;
1511 }
1512
Will Deaconc752ce42014-06-25 22:46:31 +01001513 /*
1514 * Mapping the requested stage onto what we support is surprisingly
1515 * complicated, mainly because the spec allows S1+S2 SMMUs without
1516 * support for nested translation. That means we end up with the
1517 * following table:
1518 *
1519 * Requested Supported Actual
1520 * S1 N S1
1521 * S1 S1+S2 S1
1522 * S1 S2 S2
1523 * S1 S1 S1
1524 * N N N
1525 * N S1+S2 S2
1526 * N S2 S2
1527 * N S1 S1
1528 *
1529 * Note that you can't actually request stage-2 mappings.
1530 */
1531 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1532 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1533 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1534 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1535
Robin Murphy7602b872016-04-28 17:12:09 +01001536 /*
1537 * Choosing a suitable context format is even more fiddly. Until we
1538 * grow some way for the caller to express a preference, and/or move
1539 * the decision into the io-pgtable code where it arguably belongs,
1540 * just aim for the closest thing to the rest of the system, and hope
1541 * that the hardware isn't esoteric enough that we can't assume AArch64
1542 * support to be a superset of AArch32 support...
1543 */
1544 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1545 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001546 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1547 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1548 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1549 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1550 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001551 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1552 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1553 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1554 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1555 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1556
1557 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1558 ret = -EINVAL;
1559 goto out_unlock;
1560 }
1561
Will Deaconc752ce42014-06-25 22:46:31 +01001562 switch (smmu_domain->stage) {
1563 case ARM_SMMU_DOMAIN_S1:
1564 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1565 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001566 ias = smmu->va_size;
1567 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001568 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001569 fmt = ARM_64_LPAE_S1;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001570 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001571 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001572 ias = min(ias, 32UL);
1573 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001574 } else {
1575 fmt = ARM_V7S;
1576 ias = min(ias, 32UL);
1577 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001578 }
Will Deaconc752ce42014-06-25 22:46:31 +01001579 break;
1580 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001581 /*
1582 * We will likely want to change this if/when KVM gets
1583 * involved.
1584 */
Will Deaconc752ce42014-06-25 22:46:31 +01001585 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001586 cfg->cbar = CBAR_TYPE_S2_TRANS;
1587 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001588 ias = smmu->ipa_size;
1589 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001590 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001591 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001592 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001593 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001594 ias = min(ias, 40UL);
1595 oas = min(oas, 40UL);
1596 }
Will Deaconc752ce42014-06-25 22:46:31 +01001597 break;
1598 default:
1599 ret = -EINVAL;
1600 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001601 }
1602
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001603 if (is_fast)
1604 fmt = ARM_V8L_FAST;
1605
Patrick Dalyce6786f2016-11-09 14:19:23 -08001606 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1607 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001608 if (is_iommu_pt_coherent(smmu_domain))
1609 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001610
Patrick Dalyc190d932016-08-30 17:23:28 -07001611 /* Dynamic domains must set cbndx through domain attribute */
1612 if (!dynamic) {
1613 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001614 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001615 if (ret < 0)
1616 goto out_unlock;
1617 cfg->cbndx = ret;
1618 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001619 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001620 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1621 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001622 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001623 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001624 }
1625
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001626 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001627 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001628 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001629 .ias = ias,
1630 .oas = oas,
1631 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001632 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001633 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001634
Will Deacon518f7132014-11-14 17:17:54 +00001635 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001636 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001637 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1638 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001639 if (!pgtbl_ops) {
1640 ret = -ENOMEM;
1641 goto out_clear_smmu;
1642 }
1643
Patrick Dalyc11d1082016-09-01 15:52:44 -07001644 /*
1645 * assign any page table memory that might have been allocated
1646 * during alloc_io_pgtable_ops
1647 */
Patrick Dalye271f212016-10-04 13:24:49 -07001648 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001649 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001650 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001651
Robin Murphyd5466352016-05-09 17:20:09 +01001652 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001653 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001654 domain->geometry.aperture_end = (1UL << ias) - 1;
1655 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001656
Patrick Dalyc190d932016-08-30 17:23:28 -07001657 /* Assign an asid */
1658 ret = arm_smmu_init_asid(domain, smmu);
1659 if (ret)
1660 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001661
Patrick Dalyc190d932016-08-30 17:23:28 -07001662 if (!dynamic) {
1663 /* Initialise the context bank with our page table cfg */
1664 arm_smmu_init_context_bank(smmu_domain,
1665 &smmu_domain->pgtbl_cfg);
1666
1667 /*
1668 * Request context fault interrupt. Do this last to avoid the
1669 * handler seeing a half-initialised domain state.
1670 */
1671 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1672 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001673 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1674 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001675 if (ret < 0) {
1676 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1677 cfg->irptndx, irq);
1678 cfg->irptndx = INVALID_IRPTNDX;
1679 goto out_clear_smmu;
1680 }
1681 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001682 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001683 }
Will Deacon518f7132014-11-14 17:17:54 +00001684 mutex_unlock(&smmu_domain->init_mutex);
1685
1686 /* Publish page table ops for map/unmap */
1687 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001688 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001689
Will Deacon518f7132014-11-14 17:17:54 +00001690out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001691 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001692 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001693out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001694 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001695 return ret;
1696}
1697
Patrick Daly77db4f92016-10-14 15:34:10 -07001698static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1699{
1700 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1701 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1702 smmu_domain->secure_vmid = VMID_INVAL;
1703}
1704
Will Deacon45ae7cf2013-06-24 18:31:25 +01001705static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1706{
Joerg Roedel1d672632015-03-26 13:43:10 +01001707 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001708 struct arm_smmu_device *smmu = smmu_domain->smmu;
1709 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001710 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001711 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001712 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001713 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001714
Robin Murphy7e96c742016-09-14 15:26:46 +01001715 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001716 return;
1717
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001718 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001719 if (ret) {
1720 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1721 smmu);
1722 return;
1723 }
1724
Patrick Dalyc190d932016-08-30 17:23:28 -07001725 dynamic = is_dynamic_domain(domain);
1726 if (dynamic) {
1727 arm_smmu_free_asid(domain);
1728 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001729 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001730 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001731 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001732 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001733 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001734 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001735 return;
1736 }
1737
Will Deacon518f7132014-11-14 17:17:54 +00001738 /*
1739 * Disable the context bank and free the page tables before freeing
1740 * it.
1741 */
Will Deacon44680ee2014-06-25 11:29:12 +01001742 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001743 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001744
Will Deacon44680ee2014-06-25 11:29:12 +01001745 if (cfg->irptndx != INVALID_IRPTNDX) {
1746 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001747 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001748 }
1749
Markus Elfring44830b02015-11-06 18:32:41 +01001750 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001751 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001752 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001753 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001754 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001755 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001756
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001757 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001758 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759}
1760
Joerg Roedel1d672632015-03-26 13:43:10 +01001761static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001762{
1763 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001764
Patrick Daly09801312016-08-29 17:02:52 -07001765 /* Do not support DOMAIN_DMA for now */
1766 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001767 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001768 /*
1769 * Allocate the domain and initialise some of its data structures.
1770 * We can't really do anything meaningful until we've added a
1771 * master.
1772 */
1773 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1774 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001775 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001776
Robin Murphy7e96c742016-09-14 15:26:46 +01001777 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1778 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001779 kfree(smmu_domain);
1780 return NULL;
1781 }
1782
Will Deacon518f7132014-11-14 17:17:54 +00001783 mutex_init(&smmu_domain->init_mutex);
1784 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001785 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1786 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001787 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001788 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001789 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001790
1791 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001792}
1793
Joerg Roedel1d672632015-03-26 13:43:10 +01001794static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001795{
Joerg Roedel1d672632015-03-26 13:43:10 +01001796 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001797
1798 /*
1799 * Free the domain resources. We assume that all devices have
1800 * already been detached.
1801 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001802 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001803 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001804 kfree(smmu_domain);
1805}
1806
Robin Murphy468f4942016-09-12 17:13:49 +01001807static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1808{
1809 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01001810 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01001811
1812 if (smr->valid)
1813 reg |= SMR_VALID;
1814 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1815}
1816
Robin Murphya754fd12016-09-12 17:13:50 +01001817static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1818{
1819 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1820 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1821 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1822 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1823
1824 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1825}
1826
1827static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1828{
1829 arm_smmu_write_s2cr(smmu, idx);
1830 if (smmu->smrs)
1831 arm_smmu_write_smr(smmu, idx);
1832}
1833
Robin Murphy6668f692016-09-12 17:13:54 +01001834static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01001835{
1836 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01001837 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001838
Robin Murphy6668f692016-09-12 17:13:54 +01001839 /* Stream indexing is blissfully easy */
1840 if (!smrs)
1841 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01001842
Robin Murphy6668f692016-09-12 17:13:54 +01001843 /* Validating SMRs is... less so */
1844 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1845 if (!smrs[i].valid) {
1846 /*
1847 * Note the first free entry we come across, which
1848 * we'll claim in the end if nothing else matches.
1849 */
1850 if (free_idx < 0)
1851 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01001852 continue;
1853 }
Robin Murphy6668f692016-09-12 17:13:54 +01001854 /*
1855 * If the new entry is _entirely_ matched by an existing entry,
1856 * then reuse that, with the guarantee that there also cannot
1857 * be any subsequent conflicting entries. In normal use we'd
1858 * expect simply identical entries for this case, but there's
1859 * no harm in accommodating the generalisation.
1860 */
1861 if ((mask & smrs[i].mask) == mask &&
1862 !((id ^ smrs[i].id) & ~smrs[i].mask))
1863 return i;
1864 /*
1865 * If the new entry has any other overlap with an existing one,
1866 * though, then there always exists at least one stream ID
1867 * which would cause a conflict, and we can't allow that risk.
1868 */
1869 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1870 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001871 }
1872
Robin Murphy6668f692016-09-12 17:13:54 +01001873 return free_idx;
1874}
1875
1876static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1877{
1878 if (--smmu->s2crs[idx].count)
1879 return false;
1880
1881 smmu->s2crs[idx] = s2cr_init_val;
1882 if (smmu->smrs)
1883 smmu->smrs[idx].valid = false;
1884
1885 return true;
1886}
1887
1888static int arm_smmu_master_alloc_smes(struct device *dev)
1889{
Robin Murphy06e393e2016-09-12 17:13:55 +01001890 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1891 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01001892 struct arm_smmu_device *smmu = cfg->smmu;
1893 struct arm_smmu_smr *smrs = smmu->smrs;
1894 struct iommu_group *group;
1895 int i, idx, ret;
1896
1897 mutex_lock(&smmu->stream_map_mutex);
1898 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01001899 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001900 u16 sid = fwspec->ids[i];
1901 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1902
Robin Murphy6668f692016-09-12 17:13:54 +01001903 if (idx != INVALID_SMENDX) {
1904 ret = -EEXIST;
1905 goto out_err;
1906 }
1907
Robin Murphy7e96c742016-09-14 15:26:46 +01001908 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01001909 if (ret < 0)
1910 goto out_err;
1911
1912 idx = ret;
1913 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001914 smrs[idx].id = sid;
1915 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01001916 smrs[idx].valid = true;
1917 }
1918 smmu->s2crs[idx].count++;
1919 cfg->smendx[i] = (s16)idx;
1920 }
1921
1922 group = iommu_group_get_for_dev(dev);
1923 if (!group)
1924 group = ERR_PTR(-ENOMEM);
1925 if (IS_ERR(group)) {
1926 ret = PTR_ERR(group);
1927 goto out_err;
1928 }
1929 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01001930
Will Deacon45ae7cf2013-06-24 18:31:25 +01001931 /* It worked! Now, poke the actual hardware */
Robin Murphy06e393e2016-09-12 17:13:55 +01001932 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001933 arm_smmu_write_sme(smmu, idx);
1934 smmu->s2crs[idx].group = group;
1935 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001936
Robin Murphy6668f692016-09-12 17:13:54 +01001937 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001938 return 0;
1939
Robin Murphy6668f692016-09-12 17:13:54 +01001940out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01001941 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01001942 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01001943 cfg->smendx[i] = INVALID_SMENDX;
1944 }
Robin Murphy6668f692016-09-12 17:13:54 +01001945 mutex_unlock(&smmu->stream_map_mutex);
1946 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001947}
1948
Robin Murphy06e393e2016-09-12 17:13:55 +01001949static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001950{
Robin Murphy06e393e2016-09-12 17:13:55 +01001951 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1952 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01001953 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001954
Robin Murphy6668f692016-09-12 17:13:54 +01001955 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01001956 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001957 if (arm_smmu_free_sme(smmu, idx))
1958 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01001959 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001960 }
Robin Murphy6668f692016-09-12 17:13:54 +01001961 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001962}
1963
Will Deacon45ae7cf2013-06-24 18:31:25 +01001964static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01001965 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001966{
Will Deacon44680ee2014-06-25 11:29:12 +01001967 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01001968 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1969 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1970 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01001971 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972
Robin Murphy06e393e2016-09-12 17:13:55 +01001973 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphya754fd12016-09-12 17:13:50 +01001974 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy6668f692016-09-12 17:13:54 +01001975 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01001976
1977 s2cr[idx].type = type;
1978 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1979 s2cr[idx].cbndx = cbndx;
1980 arm_smmu_write_s2cr(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001981 }
1982
1983 return 0;
1984}
1985
Patrick Daly09801312016-08-29 17:02:52 -07001986static void arm_smmu_detach_dev(struct iommu_domain *domain,
1987 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001988{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001989 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001990 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Daly09801312016-08-29 17:02:52 -07001991 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07001992 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07001993
1994 if (dynamic)
1995 return;
1996
Patrick Daly09801312016-08-29 17:02:52 -07001997 if (!smmu) {
1998 dev_err(dev, "Domain not attached; cannot detach!\n");
1999 return;
2000 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002001
Patrick Daly8befb662016-08-17 20:03:28 -07002002 /* Remove additional vote for atomic power */
2003 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002004 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
2005 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002006 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002007}
2008
Patrick Dalye271f212016-10-04 13:24:49 -07002009static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002010{
Patrick Dalye271f212016-10-04 13:24:49 -07002011 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002012 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2013 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2014 int source_vmid = VMID_HLOS;
2015 struct arm_smmu_pte_info *pte_info, *temp;
2016
Patrick Dalye271f212016-10-04 13:24:49 -07002017 if (!arm_smmu_is_domain_secure(smmu_domain))
2018 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002019
Patrick Dalye271f212016-10-04 13:24:49 -07002020 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002021 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2022 PAGE_SIZE, &source_vmid, 1,
2023 dest_vmids, dest_perms, 2);
2024 if (WARN_ON(ret))
2025 break;
2026 }
2027
2028 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2029 entry) {
2030 list_del(&pte_info->entry);
2031 kfree(pte_info);
2032 }
Patrick Dalye271f212016-10-04 13:24:49 -07002033 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002034}
2035
2036static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2037{
2038 int ret;
2039 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002040 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002041 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2042 struct arm_smmu_pte_info *pte_info, *temp;
2043
Patrick Dalye271f212016-10-04 13:24:49 -07002044 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002045 return;
2046
2047 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2048 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2049 PAGE_SIZE, source_vmlist, 2,
2050 &dest_vmids, &dest_perms, 1);
2051 if (WARN_ON(ret))
2052 break;
2053 free_pages_exact(pte_info->virt_addr, pte_info->size);
2054 }
2055
2056 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2057 entry) {
2058 list_del(&pte_info->entry);
2059 kfree(pte_info);
2060 }
2061}
2062
2063static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2064{
2065 struct arm_smmu_domain *smmu_domain = cookie;
2066 struct arm_smmu_pte_info *pte_info;
2067
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002068 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002069
2070 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2071 if (!pte_info)
2072 return;
2073
2074 pte_info->virt_addr = addr;
2075 pte_info->size = size;
2076 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2077}
2078
2079static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2080{
2081 struct arm_smmu_domain *smmu_domain = cookie;
2082 struct arm_smmu_pte_info *pte_info;
2083
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002084 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002085
2086 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2087 if (!pte_info)
2088 return -ENOMEM;
2089 pte_info->virt_addr = addr;
2090 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2091 return 0;
2092}
2093
Will Deacon45ae7cf2013-06-24 18:31:25 +01002094static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2095{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002096 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002097 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002098 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002099 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002100 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002101
Robin Murphy06e393e2016-09-12 17:13:55 +01002102 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002103 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2104 return -ENXIO;
2105 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002106
Robin Murphy4f79b142016-10-17 12:06:21 +01002107 /*
2108 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2109 * domains between of_xlate() and add_device() - we have no way to cope
2110 * with that, so until ARM gets converted to rely on groups and default
2111 * domains, just say no (but more politely than by dereferencing NULL).
2112 * This should be at least a WARN_ON once that's sorted.
2113 */
2114 if (!fwspec->iommu_priv)
2115 return -ENODEV;
2116
Robin Murphy06e393e2016-09-12 17:13:55 +01002117 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002118
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002119 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002120 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002121 if (ret)
2122 return ret;
2123
Will Deacon518f7132014-11-14 17:17:54 +00002124 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002125 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002126 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002127 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002128
Patrick Dalyc190d932016-08-30 17:23:28 -07002129 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002130 if (is_dynamic_domain(domain)) {
2131 ret = 0;
2132 goto out_power_off;
2133 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002134
Will Deacon45ae7cf2013-06-24 18:31:25 +01002135 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002136 * Sanity check the domain. We don't support domains across
2137 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002138 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002139 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002140 dev_err(dev,
2141 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002142 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002143 ret = -EINVAL;
2144 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002145 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002146
2147 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002148 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002149
2150out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002151 /*
2152 * Keep an additional vote for non-atomic power until domain is
2153 * detached
2154 */
2155 if (!ret && atomic_domain) {
2156 WARN_ON(arm_smmu_power_on(smmu->pwr));
2157 arm_smmu_power_off_atomic(smmu->pwr);
2158 }
2159
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002160 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002161
Will Deacon45ae7cf2013-06-24 18:31:25 +01002162 return ret;
2163}
2164
Will Deacon45ae7cf2013-06-24 18:31:25 +01002165static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002166 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002167{
Will Deacon518f7132014-11-14 17:17:54 +00002168 int ret;
2169 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002170 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002171 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002172
Will Deacon518f7132014-11-14 17:17:54 +00002173 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002174 return -ENODEV;
2175
Patrick Dalye271f212016-10-04 13:24:49 -07002176 arm_smmu_secure_domain_lock(smmu_domain);
2177
Will Deacon518f7132014-11-14 17:17:54 +00002178 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2179 ret = ops->map(ops, iova, paddr, size, prot);
2180 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002181
2182 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002183 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002184
Will Deacon518f7132014-11-14 17:17:54 +00002185 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002186}
2187
2188static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2189 size_t size)
2190{
Will Deacon518f7132014-11-14 17:17:54 +00002191 size_t ret;
2192 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002193 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002194 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002195
Will Deacon518f7132014-11-14 17:17:54 +00002196 if (!ops)
2197 return 0;
2198
Patrick Daly8befb662016-08-17 20:03:28 -07002199 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002200 if (ret)
2201 return ret;
2202
Patrick Dalye271f212016-10-04 13:24:49 -07002203 arm_smmu_secure_domain_lock(smmu_domain);
2204
Will Deacon518f7132014-11-14 17:17:54 +00002205 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2206 ret = ops->unmap(ops, iova, size);
2207 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002208
Patrick Daly8befb662016-08-17 20:03:28 -07002209 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002210 /*
2211 * While splitting up block mappings, we might allocate page table
2212 * memory during unmap, so the vmids needs to be assigned to the
2213 * memory here as well.
2214 */
2215 arm_smmu_assign_table(smmu_domain);
2216 /* Also unassign any pages that were free'd during unmap */
2217 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002218 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002219 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002220}
2221
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002222static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2223 struct scatterlist *sg, unsigned int nents, int prot)
2224{
2225 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002226 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002227 unsigned long flags;
2228 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2229 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2230
2231 if (!ops)
2232 return -ENODEV;
2233
Patrick Daly8befb662016-08-17 20:03:28 -07002234 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002235 if (ret)
2236 return ret;
2237
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002238 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002239 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002240 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002241
2242 if (!ret)
2243 arm_smmu_unmap(domain, iova, size);
2244
Patrick Daly8befb662016-08-17 20:03:28 -07002245 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002246 arm_smmu_assign_table(smmu_domain);
2247
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002248 return ret;
2249}
2250
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002251static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002252 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002253{
Joerg Roedel1d672632015-03-26 13:43:10 +01002254 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002255 struct arm_smmu_device *smmu = smmu_domain->smmu;
2256 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2257 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2258 struct device *dev = smmu->dev;
2259 void __iomem *cb_base;
2260 u32 tmp;
2261 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002262 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002263
2264 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2265
Robin Murphy661d9622015-05-27 17:09:34 +01002266 /* ATS1 registers can only be written atomically */
2267 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002268 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002269 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2270 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002271 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002272
2273 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2274 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002275 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002276 dev_err(dev,
2277 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2278 &iova, &phys);
2279 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002280 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002281 }
2282
Robin Murphyf9a05f02016-04-13 18:13:01 +01002283 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002284 if (phys & CB_PAR_F) {
2285 dev_err(dev, "translation fault!\n");
2286 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002287 phys = 0;
2288 } else {
2289 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002290 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002291
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002292 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002293}
2294
Will Deacon45ae7cf2013-06-24 18:31:25 +01002295static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002296 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002297{
Will Deacon518f7132014-11-14 17:17:54 +00002298 phys_addr_t ret;
2299 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002300 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002301 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002302
Will Deacon518f7132014-11-14 17:17:54 +00002303 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002304 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002305
Will Deacon518f7132014-11-14 17:17:54 +00002306 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002307 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002308 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002309
Will Deacon518f7132014-11-14 17:17:54 +00002310 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002311}
2312
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002313/*
2314 * This function can sleep, and cannot be called from atomic context. Will
2315 * power on register block if required. This restriction does not apply to the
2316 * original iova_to_phys() op.
2317 */
2318static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2319 dma_addr_t iova)
2320{
2321 phys_addr_t ret = 0;
2322 unsigned long flags;
2323 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002324
Patrick Dalyad441dd2016-09-15 15:50:46 -07002325 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002326 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2327 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002328 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002329 return ret;
2330 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002331
2332 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2333 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2334 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002335 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002336
2337 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2338
2339 return ret;
2340}
2341
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002342static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002343{
Will Deacond0948942014-06-24 17:30:10 +01002344 switch (cap) {
2345 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002346 /*
2347 * Return true here as the SMMU can always send out coherent
2348 * requests.
2349 */
2350 return true;
Will Deacond0948942014-06-24 17:30:10 +01002351 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002352 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002353 case IOMMU_CAP_NOEXEC:
2354 return true;
Will Deacond0948942014-06-24 17:30:10 +01002355 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002356 return false;
Will Deacond0948942014-06-24 17:30:10 +01002357 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002358}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002359
Patrick Daly8e3371a2017-02-13 22:14:53 -08002360static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2361{
2362 struct arm_smmu_device *smmu;
2363 unsigned long flags;
2364
2365 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2366 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2367 if (smmu->dev->of_node == np) {
2368 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2369 return smmu;
2370 }
2371 }
2372 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2373 return NULL;
2374}
2375
Robin Murphy7e96c742016-09-14 15:26:46 +01002376static int arm_smmu_match_node(struct device *dev, void *data)
2377{
2378 return dev->of_node == data;
2379}
2380
2381static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2382{
2383 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2384 np, arm_smmu_match_node);
2385 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002386 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002387}
2388
Will Deacon03edb222015-01-19 14:27:33 +00002389static int arm_smmu_add_device(struct device *dev)
2390{
Robin Murphy06e393e2016-09-12 17:13:55 +01002391 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002392 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002393 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002394 int i, ret;
2395
Robin Murphy7e96c742016-09-14 15:26:46 +01002396 if (using_legacy_binding) {
2397 ret = arm_smmu_register_legacy_master(dev, &smmu);
2398 fwspec = dev->iommu_fwspec;
2399 if (ret)
2400 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002401 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002402 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2403 if (!smmu)
2404 return -ENODEV;
2405 } else {
2406 return -ENODEV;
2407 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002408
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002409 ret = arm_smmu_power_on(smmu->pwr);
2410 if (ret)
2411 goto out_free;
2412
Robin Murphyd5b41782016-09-14 15:21:39 +01002413 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002414 for (i = 0; i < fwspec->num_ids; i++) {
2415 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002416 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002417
Robin Murphy06e393e2016-09-12 17:13:55 +01002418 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002419 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002420 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002421 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002422 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002423 if (mask & ~smmu->smr_mask_mask) {
2424 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2425 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002426 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002427 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002428 }
Will Deacon03edb222015-01-19 14:27:33 +00002429
Robin Murphy06e393e2016-09-12 17:13:55 +01002430 ret = -ENOMEM;
2431 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2432 GFP_KERNEL);
2433 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002434 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002435
2436 cfg->smmu = smmu;
2437 fwspec->iommu_priv = cfg;
2438 while (i--)
2439 cfg->smendx[i] = INVALID_SMENDX;
2440
Robin Murphy6668f692016-09-12 17:13:54 +01002441 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002442 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002443 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002444
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002445 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002446 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002447
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002448out_pwr_off:
2449 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002450out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002451 if (fwspec)
2452 kfree(fwspec->iommu_priv);
2453 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002454 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002455}
2456
Will Deacon45ae7cf2013-06-24 18:31:25 +01002457static void arm_smmu_remove_device(struct device *dev)
2458{
Robin Murphy06e393e2016-09-12 17:13:55 +01002459 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002460 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002461
Robin Murphy06e393e2016-09-12 17:13:55 +01002462 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002463 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002464
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002465 smmu = fwspec_smmu(fwspec);
2466 if (arm_smmu_power_on(smmu->pwr)) {
2467 WARN_ON(1);
2468 return;
2469 }
2470
Robin Murphy06e393e2016-09-12 17:13:55 +01002471 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002472 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002473 kfree(fwspec->iommu_priv);
2474 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002475 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002476}
2477
Joerg Roedelaf659932015-10-21 23:51:41 +02002478static struct iommu_group *arm_smmu_device_group(struct device *dev)
2479{
Robin Murphy06e393e2016-09-12 17:13:55 +01002480 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2481 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002482 struct iommu_group *group = NULL;
2483 int i, idx;
2484
Robin Murphy06e393e2016-09-12 17:13:55 +01002485 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002486 if (group && smmu->s2crs[idx].group &&
2487 group != smmu->s2crs[idx].group)
2488 return ERR_PTR(-EINVAL);
2489
2490 group = smmu->s2crs[idx].group;
2491 }
2492
2493 if (group)
2494 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02002495
2496 if (dev_is_pci(dev))
2497 group = pci_device_group(dev);
2498 else
2499 group = generic_device_group(dev);
2500
Joerg Roedelaf659932015-10-21 23:51:41 +02002501 return group;
2502}
2503
Will Deaconc752ce42014-06-25 22:46:31 +01002504static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2505 enum iommu_attr attr, void *data)
2506{
Joerg Roedel1d672632015-03-26 13:43:10 +01002507 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002508 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002509
2510 switch (attr) {
2511 case DOMAIN_ATTR_NESTING:
2512 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2513 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002514 case DOMAIN_ATTR_PT_BASE_ADDR:
2515 *((phys_addr_t *)data) =
2516 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2517 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002518 case DOMAIN_ATTR_CONTEXT_BANK:
2519 /* context bank index isn't valid until we are attached */
2520 if (smmu_domain->smmu == NULL)
2521 return -ENODEV;
2522
2523 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2524 ret = 0;
2525 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002526 case DOMAIN_ATTR_TTBR0: {
2527 u64 val;
2528 struct arm_smmu_device *smmu = smmu_domain->smmu;
2529 /* not valid until we are attached */
2530 if (smmu == NULL)
2531 return -ENODEV;
2532
2533 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2534 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2535 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2536 << (TTBRn_ASID_SHIFT);
2537 *((u64 *)data) = val;
2538 ret = 0;
2539 break;
2540 }
2541 case DOMAIN_ATTR_CONTEXTIDR:
2542 /* not valid until attached */
2543 if (smmu_domain->smmu == NULL)
2544 return -ENODEV;
2545 *((u32 *)data) = smmu_domain->cfg.procid;
2546 ret = 0;
2547 break;
2548 case DOMAIN_ATTR_PROCID:
2549 *((u32 *)data) = smmu_domain->cfg.procid;
2550 ret = 0;
2551 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002552 case DOMAIN_ATTR_DYNAMIC:
2553 *((int *)data) = !!(smmu_domain->attributes
2554 & (1 << DOMAIN_ATTR_DYNAMIC));
2555 ret = 0;
2556 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002557 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2558 *((int *)data) = !!(smmu_domain->attributes
2559 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2560 ret = 0;
2561 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002562 case DOMAIN_ATTR_S1_BYPASS:
2563 *((int *)data) = !!(smmu_domain->attributes
2564 & (1 << DOMAIN_ATTR_S1_BYPASS));
2565 ret = 0;
2566 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002567 case DOMAIN_ATTR_SECURE_VMID:
2568 *((int *)data) = smmu_domain->secure_vmid;
2569 ret = 0;
2570 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002571 case DOMAIN_ATTR_PGTBL_INFO: {
2572 struct iommu_pgtbl_info *info = data;
2573
2574 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2575 ret = -ENODEV;
2576 break;
2577 }
2578 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2579 ret = 0;
2580 break;
2581 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002582 case DOMAIN_ATTR_FAST:
2583 *((int *)data) = !!(smmu_domain->attributes
2584 & (1 << DOMAIN_ATTR_FAST));
2585 ret = 0;
2586 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002587 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2588 *((int *)data) = !!(smmu_domain->attributes &
2589 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2590 ret = 0;
2591 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002592 case DOMAIN_ATTR_EARLY_MAP:
2593 *((int *)data) = !!(smmu_domain->attributes
2594 & (1 << DOMAIN_ATTR_EARLY_MAP));
2595 ret = 0;
2596 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002597 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
2598 if (!smmu_domain->smmu)
2599 return -ENODEV;
Liam Mark53cf2342016-12-20 11:36:07 -08002600 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
2601 ret = 0;
2602 break;
2603 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
2604 *((int *)data) = !!(smmu_domain->attributes
2605 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002606 ret = 0;
2607 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002608 default:
2609 return -ENODEV;
2610 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002611 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002612}
2613
2614static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2615 enum iommu_attr attr, void *data)
2616{
Will Deacon518f7132014-11-14 17:17:54 +00002617 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002618 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002619
Will Deacon518f7132014-11-14 17:17:54 +00002620 mutex_lock(&smmu_domain->init_mutex);
2621
Will Deaconc752ce42014-06-25 22:46:31 +01002622 switch (attr) {
2623 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002624 if (smmu_domain->smmu) {
2625 ret = -EPERM;
2626 goto out_unlock;
2627 }
2628
Will Deaconc752ce42014-06-25 22:46:31 +01002629 if (*(int *)data)
2630 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2631 else
2632 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2633
Will Deacon518f7132014-11-14 17:17:54 +00002634 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002635 case DOMAIN_ATTR_PROCID:
2636 if (smmu_domain->smmu != NULL) {
2637 dev_err(smmu_domain->smmu->dev,
2638 "cannot change procid attribute while attached\n");
2639 ret = -EBUSY;
2640 break;
2641 }
2642 smmu_domain->cfg.procid = *((u32 *)data);
2643 ret = 0;
2644 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002645 case DOMAIN_ATTR_DYNAMIC: {
2646 int dynamic = *((int *)data);
2647
2648 if (smmu_domain->smmu != NULL) {
2649 dev_err(smmu_domain->smmu->dev,
2650 "cannot change dynamic attribute while attached\n");
2651 ret = -EBUSY;
2652 break;
2653 }
2654
2655 if (dynamic)
2656 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2657 else
2658 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2659 ret = 0;
2660 break;
2661 }
2662 case DOMAIN_ATTR_CONTEXT_BANK:
2663 /* context bank can't be set while attached */
2664 if (smmu_domain->smmu != NULL) {
2665 ret = -EBUSY;
2666 break;
2667 }
2668 /* ... and it can only be set for dynamic contexts. */
2669 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2670 ret = -EINVAL;
2671 break;
2672 }
2673
2674 /* this will be validated during attach */
2675 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2676 ret = 0;
2677 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002678 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2679 u32 non_fatal_faults = *((int *)data);
2680
2681 if (non_fatal_faults)
2682 smmu_domain->attributes |=
2683 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2684 else
2685 smmu_domain->attributes &=
2686 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2687 ret = 0;
2688 break;
2689 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002690 case DOMAIN_ATTR_S1_BYPASS: {
2691 int bypass = *((int *)data);
2692
2693 /* bypass can't be changed while attached */
2694 if (smmu_domain->smmu != NULL) {
2695 ret = -EBUSY;
2696 break;
2697 }
2698 if (bypass)
2699 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2700 else
2701 smmu_domain->attributes &=
2702 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2703
2704 ret = 0;
2705 break;
2706 }
Patrick Daly8befb662016-08-17 20:03:28 -07002707 case DOMAIN_ATTR_ATOMIC:
2708 {
2709 int atomic_ctx = *((int *)data);
2710
2711 /* can't be changed while attached */
2712 if (smmu_domain->smmu != NULL) {
2713 ret = -EBUSY;
2714 break;
2715 }
2716 if (atomic_ctx)
2717 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2718 else
2719 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2720 break;
2721 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002722 case DOMAIN_ATTR_SECURE_VMID:
2723 if (smmu_domain->secure_vmid != VMID_INVAL) {
2724 ret = -ENODEV;
2725 WARN(1, "secure vmid already set!");
2726 break;
2727 }
2728 smmu_domain->secure_vmid = *((int *)data);
2729 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002730 case DOMAIN_ATTR_FAST:
2731 if (*((int *)data))
2732 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2733 ret = 0;
2734 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002735 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2736 /* can't be changed while attached */
2737 if (smmu_domain->smmu != NULL) {
2738 ret = -EBUSY;
2739 break;
2740 }
2741 if (*((int *)data))
2742 smmu_domain->attributes |=
2743 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
2744 ret = 0;
2745 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002746 case DOMAIN_ATTR_EARLY_MAP: {
2747 int early_map = *((int *)data);
2748
2749 ret = 0;
2750 if (early_map) {
2751 smmu_domain->attributes |=
2752 1 << DOMAIN_ATTR_EARLY_MAP;
2753 } else {
2754 if (smmu_domain->smmu)
2755 ret = arm_smmu_enable_s1_translations(
2756 smmu_domain);
2757
2758 if (!ret)
2759 smmu_domain->attributes &=
2760 ~(1 << DOMAIN_ATTR_EARLY_MAP);
2761 }
2762 break;
2763 }
Liam Mark53cf2342016-12-20 11:36:07 -08002764 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
2765 int force_coherent = *((int *)data);
2766
2767 if (smmu_domain->smmu != NULL) {
2768 dev_err(smmu_domain->smmu->dev,
2769 "cannot change force coherent attribute while attached\n");
2770 ret = -EBUSY;
2771 break;
2772 }
2773
2774 if (force_coherent)
2775 smmu_domain->attributes |=
2776 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
2777 else
2778 smmu_domain->attributes &=
2779 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
2780
2781 ret = 0;
2782 break;
2783 }
2784
Will Deaconc752ce42014-06-25 22:46:31 +01002785 default:
Will Deacon518f7132014-11-14 17:17:54 +00002786 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002787 }
Will Deacon518f7132014-11-14 17:17:54 +00002788
2789out_unlock:
2790 mutex_unlock(&smmu_domain->init_mutex);
2791 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002792}
2793
Robin Murphy7e96c742016-09-14 15:26:46 +01002794static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
2795{
2796 u32 fwid = 0;
2797
2798 if (args->args_count > 0)
2799 fwid |= (u16)args->args[0];
2800
2801 if (args->args_count > 1)
2802 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
2803
2804 return iommu_fwspec_add_ids(dev, &fwid, 1);
2805}
2806
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002807static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
2808{
2809 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2810 struct arm_smmu_device *smmu = smmu_domain->smmu;
2811 void __iomem *cb_base;
2812 u32 reg;
2813 int ret;
2814
2815 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2816 ret = arm_smmu_power_on(smmu->pwr);
2817 if (ret)
2818 return ret;
2819
2820 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
2821 reg |= SCTLR_M;
2822
2823 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
2824 arm_smmu_power_off(smmu->pwr);
2825 return ret;
2826}
2827
Liam Mark3ba41cf2016-12-09 14:39:04 -08002828static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
2829 dma_addr_t iova)
2830{
2831 bool ret;
2832 unsigned long flags;
2833 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2834 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2835
2836 if (!ops)
2837 return false;
2838
2839 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2840 ret = ops->is_iova_coherent(ops, iova);
2841 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2842 return ret;
2843}
2844
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002845static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2846 unsigned long flags)
2847{
2848 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2849 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2850 struct arm_smmu_device *smmu;
2851 void __iomem *cb_base;
2852
2853 if (!smmu_domain->smmu) {
2854 pr_err("Can't trigger faults on non-attached domains\n");
2855 return;
2856 }
2857
2858 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002859 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002860 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002861
2862 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2863 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2864 flags, cfg->cbndx);
2865 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002866 /* give the interrupt time to fire... */
2867 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002868
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002869 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002870}
2871
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002872static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2873 unsigned long offset)
2874{
2875 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2876 struct arm_smmu_device *smmu;
2877 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2878 void __iomem *cb_base;
2879 unsigned long val;
2880
2881 if (offset >= SZ_4K) {
2882 pr_err("Invalid offset: 0x%lx\n", offset);
2883 return 0;
2884 }
2885
2886 smmu = smmu_domain->smmu;
2887 if (!smmu) {
2888 WARN(1, "Can't read registers of a detached domain\n");
2889 val = 0;
2890 return val;
2891 }
2892
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002893 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002894 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002895
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002896 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2897 val = readl_relaxed(cb_base + offset);
2898
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002899 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002900 return val;
2901}
2902
2903static void arm_smmu_reg_write(struct iommu_domain *domain,
2904 unsigned long offset, unsigned long val)
2905{
2906 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2907 struct arm_smmu_device *smmu;
2908 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2909 void __iomem *cb_base;
2910
2911 if (offset >= SZ_4K) {
2912 pr_err("Invalid offset: 0x%lx\n", offset);
2913 return;
2914 }
2915
2916 smmu = smmu_domain->smmu;
2917 if (!smmu) {
2918 WARN(1, "Can't read registers of a detached domain\n");
2919 return;
2920 }
2921
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002922 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002923 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002924
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002925 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2926 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002927
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002928 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002929}
2930
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002931static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
2932{
2933 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
2934}
2935
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002936static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
2937{
2938 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2939
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002940 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002941}
2942
2943static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
2944{
2945 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2946
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002947 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002948}
2949
Will Deacon518f7132014-11-14 17:17:54 +00002950static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002951 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002952 .domain_alloc = arm_smmu_domain_alloc,
2953 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002954 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002955 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002956 .map = arm_smmu_map,
2957 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002958 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002959 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002960 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002961 .add_device = arm_smmu_add_device,
2962 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002963 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002964 .domain_get_attr = arm_smmu_domain_get_attr,
2965 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01002966 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00002967 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002968 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002969 .reg_read = arm_smmu_reg_read,
2970 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002971 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002972 .enable_config_clocks = arm_smmu_enable_config_clocks,
2973 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08002974 .is_iova_coherent = arm_smmu_is_iova_coherent,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002975};
2976
Patrick Dalyad441dd2016-09-15 15:50:46 -07002977#define IMPL_DEF1_MICRO_MMU_CTRL 0
2978#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
2979#define MICRO_MMU_CTRL_IDLE (1 << 3)
2980
2981/* Definitions for implementation-defined registers */
2982#define ACTLR_QCOM_OSH_SHIFT 28
2983#define ACTLR_QCOM_OSH 1
2984
2985#define ACTLR_QCOM_ISH_SHIFT 29
2986#define ACTLR_QCOM_ISH 1
2987
2988#define ACTLR_QCOM_NSH_SHIFT 30
2989#define ACTLR_QCOM_NSH 1
2990
2991static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002992{
2993 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002994 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002995
2996 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2997 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2998 0, 30000)) {
2999 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3000 return -EBUSY;
3001 }
3002
3003 return 0;
3004}
3005
Patrick Dalyad441dd2016-09-15 15:50:46 -07003006static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003007{
3008 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3009 u32 reg;
3010
3011 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3012 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3013 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3014
Patrick Dalyad441dd2016-09-15 15:50:46 -07003015 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003016}
3017
Patrick Dalyad441dd2016-09-15 15:50:46 -07003018static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003019{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003020 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003021}
3022
Patrick Dalyad441dd2016-09-15 15:50:46 -07003023static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003024{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003025 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003026}
3027
Patrick Dalyad441dd2016-09-15 15:50:46 -07003028static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003029{
3030 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3031 u32 reg;
3032
3033 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3034 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3035 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3036}
3037
Patrick Dalyad441dd2016-09-15 15:50:46 -07003038static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003039{
3040 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003041 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003042 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003043 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003044
Patrick Dalyad441dd2016-09-15 15:50:46 -07003045 /*
3046 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3047 * to prevent table walks with an inconsistent state.
3048 */
3049 for (i = 0; i < smmu->num_context_banks; ++i) {
3050 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3051 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3052 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3053 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3054 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3055 }
3056
3057 /* Program implementation defined registers */
3058 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003059 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3060 writel_relaxed(regs[i].value,
3061 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003062 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003063}
3064
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003065static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3066 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003067{
3068 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3069 struct arm_smmu_device *smmu = smmu_domain->smmu;
3070 int ret;
3071 phys_addr_t phys = 0;
3072 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003073 u32 sctlr, sctlr_orig, fsr;
3074 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003075
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003076 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003077 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003078 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003079
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003080 spin_lock_irqsave(&smmu->atos_lock, flags);
3081 cb_base = ARM_SMMU_CB_BASE(smmu) +
3082 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003083
3084 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003085 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003086 qsmmuv2_wait_for_halt(smmu);
3087
3088 /* clear FSR to allow ATOS to log any faults */
3089 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3090 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3091
3092 /* disable stall mode momentarily */
3093 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3094 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3095 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3096
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003097 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003098
3099 /* restore SCTLR */
3100 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3101
3102 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003103 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3104
3105 arm_smmu_power_off(smmu_domain->smmu->pwr);
3106 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003107}
3108
3109struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3110 .device_reset = qsmmuv2_device_reset,
3111 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003112};
3113
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003114static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003115{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003116 int i;
3117 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003118 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003119 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003120
Peng Fan3ca37122016-05-03 21:50:30 +08003121 /*
3122 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3123 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3124 * bit is only present in MMU-500r2 onwards.
3125 */
3126 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3127 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3128 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3129 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3130 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3131 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3132 }
3133
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003134 /* Make sure all context banks are disabled and clear CB_FSR */
3135 for (i = 0; i < smmu->num_context_banks; ++i) {
3136 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3137 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3138 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003139 /*
3140 * Disable MMU-500's not-particularly-beneficial next-page
3141 * prefetcher for the sake of errata #841119 and #826419.
3142 */
3143 if (smmu->model == ARM_MMU500) {
3144 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3145 reg &= ~ARM_MMU500_ACTLR_CPRE;
3146 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3147 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003148 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003149}
3150
3151static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3152{
3153 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003154 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003155 u32 reg;
3156
3157 /* clear global FSR */
3158 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3159 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3160
Robin Murphy468f4942016-09-12 17:13:49 +01003161 /*
3162 * Reset stream mapping groups: Initial values mark all SMRn as
3163 * invalid and all S2CRn as bypass unless overridden.
3164 */
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003165 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
Robin Murphya754fd12016-09-12 17:13:50 +01003166 for (i = 0; i < smmu->num_mapping_groups; ++i)
3167 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003168
3169 arm_smmu_context_bank_reset(smmu);
3170 }
Will Deacon1463fe42013-07-31 19:21:27 +01003171
Will Deacon45ae7cf2013-06-24 18:31:25 +01003172 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003173 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3174 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3175
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003176 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003177
Will Deacon45ae7cf2013-06-24 18:31:25 +01003178 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003179 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003180
3181 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003182 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003183
Robin Murphy25a1c962016-02-10 14:25:33 +00003184 /* Enable client access, handling unmatched streams as appropriate */
3185 reg &= ~sCR0_CLIENTPD;
3186 if (disable_bypass)
3187 reg |= sCR0_USFCFG;
3188 else
3189 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003190
3191 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003192 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003193
3194 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003195 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003196
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003197 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3198 reg |= sCR0_VMID16EN;
3199
Will Deacon45ae7cf2013-06-24 18:31:25 +01003200 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003201 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003202 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003203
3204 /* Manage any implementation defined features */
3205 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003206}
3207
3208static int arm_smmu_id_size_to_bits(int size)
3209{
3210 switch (size) {
3211 case 0:
3212 return 32;
3213 case 1:
3214 return 36;
3215 case 2:
3216 return 40;
3217 case 3:
3218 return 42;
3219 case 4:
3220 return 44;
3221 case 5:
3222 default:
3223 return 48;
3224 }
3225}
3226
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003227static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3228{
3229 struct device *dev = smmu->dev;
3230 int i, ntuples, ret;
3231 u32 *tuples;
3232 struct arm_smmu_impl_def_reg *regs, *regit;
3233
3234 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3235 return 0;
3236
3237 ntuples /= sizeof(u32);
3238 if (ntuples % 2) {
3239 dev_err(dev,
3240 "Invalid number of attach-impl-defs registers: %d\n",
3241 ntuples);
3242 return -EINVAL;
3243 }
3244
3245 regs = devm_kmalloc(
3246 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3247 GFP_KERNEL);
3248 if (!regs)
3249 return -ENOMEM;
3250
3251 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3252 if (!tuples)
3253 return -ENOMEM;
3254
3255 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3256 tuples, ntuples);
3257 if (ret)
3258 return ret;
3259
3260 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3261 regit->offset = tuples[i];
3262 regit->value = tuples[i + 1];
3263 }
3264
3265 devm_kfree(dev, tuples);
3266
3267 smmu->impl_def_attach_registers = regs;
3268 smmu->num_impl_def_attach_registers = ntuples / 2;
3269
3270 return 0;
3271}
3272
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003273
3274static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003275{
3276 const char *cname;
3277 struct property *prop;
3278 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003279 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003280
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003281 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003282 of_property_count_strings(dev->of_node, "clock-names");
3283
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003284 if (pwr->num_clocks < 1) {
3285 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003286 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003287 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003288
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003289 pwr->clocks = devm_kzalloc(
3290 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003291 GFP_KERNEL);
3292
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003293 if (!pwr->clocks)
3294 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003295
3296 i = 0;
3297 of_property_for_each_string(dev->of_node, "clock-names",
3298 prop, cname) {
3299 struct clk *c = devm_clk_get(dev, cname);
3300
3301 if (IS_ERR(c)) {
3302 dev_err(dev, "Couldn't get clock: %s",
3303 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003304 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003305 }
3306
3307 if (clk_get_rate(c) == 0) {
3308 long rate = clk_round_rate(c, 1000);
3309
3310 clk_set_rate(c, rate);
3311 }
3312
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003313 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003314
3315 ++i;
3316 }
3317 return 0;
3318}
3319
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003320static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003321{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003322 const char *cname;
3323 struct property *prop;
3324 int i, ret = 0;
3325 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003326
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003327 pwr->num_gdscs =
3328 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3329
3330 if (pwr->num_gdscs < 1) {
3331 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003332 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003333 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003334
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003335 pwr->gdscs = devm_kzalloc(
3336 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3337
3338 if (!pwr->gdscs)
3339 return -ENOMEM;
3340
3341 i = 0;
3342 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3343 prop, cname)
3344 pwr->gdscs[i].supply = cname;
3345
3346 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3347 return ret;
3348}
3349
3350static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3351{
3352 struct device *dev = pwr->dev;
3353
3354 /* We don't want the bus APIs to print an error message */
3355 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3356 dev_dbg(dev, "No bus scaling info\n");
3357 return 0;
3358 }
3359
3360 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3361 if (!pwr->bus_dt_data) {
3362 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3363 return -EINVAL;
3364 }
3365
3366 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3367 if (!pwr->bus_client) {
3368 dev_err(dev, "Bus client registration failed\n");
3369 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
3370 return -EINVAL;
3371 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003372
3373 return 0;
3374}
3375
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003376/*
3377 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3378 */
3379static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3380 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003381{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003382 struct arm_smmu_power_resources *pwr;
3383 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003384
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003385 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3386 if (!pwr)
3387 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003388
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003389 pwr->dev = &pdev->dev;
3390 pwr->pdev = pdev;
3391 mutex_init(&pwr->power_lock);
3392 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003393
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003394 ret = arm_smmu_init_clocks(pwr);
3395 if (ret)
3396 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003397
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003398 ret = arm_smmu_init_regulators(pwr);
3399 if (ret)
3400 return ERR_PTR(ret);
3401
3402 ret = arm_smmu_init_bus_scaling(pwr);
3403 if (ret)
3404 return ERR_PTR(ret);
3405
3406 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003407}
3408
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003409/*
3410 * Bus APIs are not devm-safe.
3411 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003412static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003413{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003414 msm_bus_scale_unregister_client(pwr->bus_client);
3415 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003416}
3417
Will Deacon45ae7cf2013-06-24 18:31:25 +01003418static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3419{
3420 unsigned long size;
3421 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3422 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003423 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003424 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003425
Mitchel Humpherysba822582015-10-20 11:37:41 -07003426 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3427 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003428 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003429
3430 /* ID0 */
3431 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003432
3433 /* Restrict available stages based on module parameter */
3434 if (force_stage == 1)
3435 id &= ~(ID0_S2TS | ID0_NTS);
3436 else if (force_stage == 2)
3437 id &= ~(ID0_S1TS | ID0_NTS);
3438
Will Deacon45ae7cf2013-06-24 18:31:25 +01003439 if (id & ID0_S1TS) {
3440 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003441 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003442 }
3443
3444 if (id & ID0_S2TS) {
3445 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003446 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003447 }
3448
3449 if (id & ID0_NTS) {
3450 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003451 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003452 }
3453
3454 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003455 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003456 dev_err(smmu->dev, "\tno translation support!\n");
3457 return -ENODEV;
3458 }
3459
Robin Murphyb7862e32016-04-13 18:13:03 +01003460 if ((id & ID0_S1TS) &&
3461 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003462 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003463 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003464 }
3465
Robin Murphybae2c2d2015-07-29 19:46:05 +01003466 /*
3467 * In order for DMA API calls to work properly, we must defer to what
3468 * the DT says about coherency, regardless of what the hardware claims.
3469 * Fortunately, this also opens up a workaround for systems where the
3470 * ID register value has ended up configured incorrectly.
3471 */
3472 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3473 cttw_reg = !!(id & ID0_CTTW);
3474 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003475 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003476 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003477 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003478 cttw_dt ? "" : "non-");
3479 if (cttw_dt != cttw_reg)
3480 dev_notice(smmu->dev,
3481 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003482
Robin Murphy53867802016-09-12 17:13:48 +01003483 /* Max. number of entries we have for stream matching/indexing */
3484 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3485 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003486 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003487 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003488
3489 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003490 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3491 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003492 dev_err(smmu->dev,
3493 "stream-matching supported, but no SMRs present!\n");
3494 return -ENODEV;
3495 }
3496
Robin Murphy53867802016-09-12 17:13:48 +01003497 /*
3498 * SMR.ID bits may not be preserved if the corresponding MASK
3499 * bits are set, so check each one separately. We can reject
3500 * masters later if they try to claim IDs outside these masks.
3501 */
3502 smr = smmu->streamid_mask << SMR_ID_SHIFT;
3503 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3504 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3505 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003506
Robin Murphy53867802016-09-12 17:13:48 +01003507 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
3508 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3509 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3510 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003511
Robin Murphy468f4942016-09-12 17:13:49 +01003512 /* Zero-initialised to mark as invalid */
3513 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3514 GFP_KERNEL);
3515 if (!smmu->smrs)
3516 return -ENOMEM;
3517
Robin Murphy53867802016-09-12 17:13:48 +01003518 dev_notice(smmu->dev,
3519 "\tstream matching with %lu register groups, mask 0x%x",
3520 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003521 }
Robin Murphya754fd12016-09-12 17:13:50 +01003522 /* s2cr->type == 0 means translation, so initialise explicitly */
3523 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3524 GFP_KERNEL);
3525 if (!smmu->s2crs)
3526 return -ENOMEM;
3527 for (i = 0; i < size; i++)
3528 smmu->s2crs[i] = s2cr_init_val;
3529
Robin Murphy53867802016-09-12 17:13:48 +01003530 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003531 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003532
Robin Murphy7602b872016-04-28 17:12:09 +01003533 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3534 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3535 if (!(id & ID0_PTFS_NO_AARCH32S))
3536 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3537 }
3538
Will Deacon45ae7cf2013-06-24 18:31:25 +01003539 /* ID1 */
3540 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003541 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003542
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003543 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003544 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003545 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003546 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003547 dev_warn(smmu->dev,
3548 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3549 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003550
Will Deacon518f7132014-11-14 17:17:54 +00003551 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003552 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3553 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3554 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3555 return -ENODEV;
3556 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003557 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003558 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003559 /*
3560 * Cavium CN88xx erratum #27704.
3561 * Ensure ASID and VMID allocation is unique across all SMMUs in
3562 * the system.
3563 */
3564 if (smmu->model == CAVIUM_SMMUV2) {
3565 smmu->cavium_id_base =
3566 atomic_add_return(smmu->num_context_banks,
3567 &cavium_smmu_context_count);
3568 smmu->cavium_id_base -= smmu->num_context_banks;
3569 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003570
3571 /* ID2 */
3572 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3573 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003574 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003575
Will Deacon518f7132014-11-14 17:17:54 +00003576 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003577 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003578 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003579
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003580 if (id & ID2_VMID16)
3581 smmu->features |= ARM_SMMU_FEAT_VMID16;
3582
Robin Murphyf1d84542015-03-04 16:41:05 +00003583 /*
3584 * What the page table walker can address actually depends on which
3585 * descriptor format is in use, but since a) we don't know that yet,
3586 * and b) it can vary per context bank, this will have to do...
3587 */
3588 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3589 dev_warn(smmu->dev,
3590 "failed to set DMA mask for table walker\n");
3591
Robin Murphyb7862e32016-04-13 18:13:03 +01003592 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003593 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003594 if (smmu->version == ARM_SMMU_V1_64K)
3595 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003596 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003597 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003598 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003599 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003600 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003601 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003602 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003603 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003604 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003605 }
3606
Robin Murphy7602b872016-04-28 17:12:09 +01003607 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003608 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003609 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003610 if (smmu->features &
3611 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003612 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003613 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003614 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003615 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003616 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003617
Robin Murphyd5466352016-05-09 17:20:09 +01003618 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3619 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3620 else
3621 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003622 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003623 smmu->pgsize_bitmap);
3624
Will Deacon518f7132014-11-14 17:17:54 +00003625
Will Deacon28d60072014-09-01 16:24:48 +01003626 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003627 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3628 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003629
3630 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003631 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3632 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003633
Will Deacon45ae7cf2013-06-24 18:31:25 +01003634 return 0;
3635}
3636
Patrick Dalyd7476202016-09-08 18:23:28 -07003637static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
3638{
3639 if (!smmu->arch_ops)
3640 return 0;
3641 if (!smmu->arch_ops->init)
3642 return 0;
3643 return smmu->arch_ops->init(smmu);
3644}
3645
3646static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
3647{
3648 if (!smmu->arch_ops)
3649 return;
3650 if (!smmu->arch_ops->device_reset)
3651 return;
3652 return smmu->arch_ops->device_reset(smmu);
3653}
3654
Robin Murphy67b65a32016-04-13 18:12:57 +01003655struct arm_smmu_match_data {
3656 enum arm_smmu_arch_version version;
3657 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003658 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01003659};
3660
Patrick Dalyd7476202016-09-08 18:23:28 -07003661#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
3662static struct arm_smmu_match_data name = { \
3663.version = ver, \
3664.model = imp, \
3665.arch_ops = ops, \
3666} \
Robin Murphy67b65a32016-04-13 18:12:57 +01003667
Patrick Daly1f8a2882016-09-12 17:32:05 -07003668struct arm_smmu_arch_ops qsmmuv500_arch_ops;
3669
Patrick Dalyd7476202016-09-08 18:23:28 -07003670ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
3671ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
3672ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
3673ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
3674ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003675ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07003676ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
3677 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01003678
Joerg Roedel09b52692014-10-02 12:24:45 +02003679static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003680 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3681 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3682 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003683 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003684 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003685 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003686 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07003687 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01003688 { },
3689};
3690MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3691
Patrick Dalyc47dcd42017-02-09 23:09:57 -08003692
3693static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
3694{
3695 if (!dev->iommu_fwspec)
3696 of_iommu_configure(dev, dev->of_node);
3697 return 0;
3698}
3699
Patrick Daly000a2f22017-02-13 22:18:12 -08003700static int arm_smmu_add_device_fixup(struct device *dev, void *data)
3701{
3702 struct iommu_ops *ops = data;
3703
3704 ops->add_device(dev);
3705 return 0;
3706}
3707
Patrick Daly1f8a2882016-09-12 17:32:05 -07003708static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003709static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3710{
Robin Murphy67b65a32016-04-13 18:12:57 +01003711 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003712 struct resource *res;
3713 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003714 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01003715 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01003716 bool legacy_binding;
3717
3718 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
3719 if (legacy_binding && !using_generic_binding) {
3720 if (!using_legacy_binding)
3721 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
3722 using_legacy_binding = true;
3723 } else if (!legacy_binding && !using_legacy_binding) {
3724 using_generic_binding = true;
3725 } else {
3726 dev_err(dev, "not probing due to mismatched DT properties\n");
3727 return -ENODEV;
3728 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003729
3730 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3731 if (!smmu) {
3732 dev_err(dev, "failed to allocate arm_smmu_device\n");
3733 return -ENOMEM;
3734 }
3735 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003736 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003737 idr_init(&smmu->asid_idr);
3738 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003739
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003740 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01003741 smmu->version = data->version;
3742 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003743 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01003744
Will Deacon45ae7cf2013-06-24 18:31:25 +01003745 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003746 smmu->base = devm_ioremap_resource(dev, res);
3747 if (IS_ERR(smmu->base))
3748 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003749 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003750
3751 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3752 &smmu->num_global_irqs)) {
3753 dev_err(dev, "missing #global-interrupts property\n");
3754 return -ENODEV;
3755 }
3756
3757 num_irqs = 0;
3758 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3759 num_irqs++;
3760 if (num_irqs > smmu->num_global_irqs)
3761 smmu->num_context_irqs++;
3762 }
3763
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003764 if (!smmu->num_context_irqs) {
3765 dev_err(dev, "found %d interrupts but expected at least %d\n",
3766 num_irqs, smmu->num_global_irqs + 1);
3767 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003768 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003769
3770 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3771 GFP_KERNEL);
3772 if (!smmu->irqs) {
3773 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3774 return -ENOMEM;
3775 }
3776
3777 for (i = 0; i < num_irqs; ++i) {
3778 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003779
Will Deacon45ae7cf2013-06-24 18:31:25 +01003780 if (irq < 0) {
3781 dev_err(dev, "failed to get irq index %d\n", i);
3782 return -ENODEV;
3783 }
3784 smmu->irqs[i] = irq;
3785 }
3786
Dhaval Patel031d7462015-05-09 14:47:29 -07003787 parse_driver_options(smmu);
3788
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003789 smmu->pwr = arm_smmu_init_power_resources(pdev);
3790 if (IS_ERR(smmu->pwr))
3791 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003792
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003793 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003794 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003795 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003796
3797 err = arm_smmu_device_cfg_probe(smmu);
3798 if (err)
3799 goto out_power_off;
3800
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003801 err = arm_smmu_parse_impl_def_registers(smmu);
3802 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003803 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003804
Robin Murphyb7862e32016-04-13 18:13:03 +01003805 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003806 smmu->num_context_banks != smmu->num_context_irqs) {
3807 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08003808 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
3809 smmu->num_context_irqs, smmu->num_context_banks,
3810 smmu->num_context_banks);
3811 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003812 }
3813
Will Deacon45ae7cf2013-06-24 18:31:25 +01003814 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003815 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3816 NULL, arm_smmu_global_fault,
3817 IRQF_ONESHOT | IRQF_SHARED,
3818 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003819 if (err) {
3820 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3821 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01003822 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003823 }
3824 }
3825
Patrick Dalyd7476202016-09-08 18:23:28 -07003826 err = arm_smmu_arch_init(smmu);
3827 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003828 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07003829
Robin Murphy06e393e2016-09-12 17:13:55 +01003830 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003831 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01003832 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003833 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07003834
Patrick Daly8e3371a2017-02-13 22:14:53 -08003835 INIT_LIST_HEAD(&smmu->list);
3836 spin_lock(&arm_smmu_devices_lock);
3837 list_add(&smmu->list, &arm_smmu_devices);
3838 spin_unlock(&arm_smmu_devices_lock);
3839
Patrick Dalyc47dcd42017-02-09 23:09:57 -08003840 /* bus_set_iommu depends on this. */
3841 bus_for_each_dev(&platform_bus_type, NULL, NULL,
3842 arm_smmu_of_iommu_configure_fixup);
3843
Robin Murphy7e96c742016-09-14 15:26:46 +01003844 /* Oh, for a proper bus abstraction */
3845 if (!iommu_present(&platform_bus_type))
3846 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08003847 else
3848 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
3849 arm_smmu_add_device_fixup);
Robin Murphy7e96c742016-09-14 15:26:46 +01003850#ifdef CONFIG_ARM_AMBA
3851 if (!iommu_present(&amba_bustype))
3852 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
3853#endif
3854#ifdef CONFIG_PCI
3855 if (!iommu_present(&pci_bus_type)) {
3856 pci_request_acs();
3857 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
3858 }
3859#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003860 return 0;
3861
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003862out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003863 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003864
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003865out_exit_power_resources:
3866 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003867
Will Deacon45ae7cf2013-06-24 18:31:25 +01003868 return err;
3869}
3870
3871static int arm_smmu_device_remove(struct platform_device *pdev)
3872{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003873 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003874
3875 if (!smmu)
3876 return -ENODEV;
3877
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003878 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003879 return -EINVAL;
3880
Will Deaconecfadb62013-07-31 19:21:28 +01003881 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003882 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003883
Patrick Dalyc190d932016-08-30 17:23:28 -07003884 idr_destroy(&smmu->asid_idr);
3885
Will Deacon45ae7cf2013-06-24 18:31:25 +01003886 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003887 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003888 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003889
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003890 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003891
Will Deacon45ae7cf2013-06-24 18:31:25 +01003892 return 0;
3893}
3894
Will Deacon45ae7cf2013-06-24 18:31:25 +01003895static struct platform_driver arm_smmu_driver = {
3896 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003897 .name = "arm-smmu",
3898 .of_match_table = of_match_ptr(arm_smmu_of_match),
3899 },
3900 .probe = arm_smmu_device_dt_probe,
3901 .remove = arm_smmu_device_remove,
3902};
3903
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08003904static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003905static int __init arm_smmu_init(void)
3906{
Robin Murphy7e96c742016-09-14 15:26:46 +01003907 static bool registered;
3908 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003909
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08003910 if (registered)
3911 return 0;
3912
3913 ret = platform_driver_register(&qsmmuv500_tbu_driver);
3914 if (ret)
3915 return ret;
3916
3917 ret = platform_driver_register(&arm_smmu_driver);
3918 registered = !ret;
Robin Murphy7e96c742016-09-14 15:26:46 +01003919 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003920}
3921
3922static void __exit arm_smmu_exit(void)
3923{
3924 return platform_driver_unregister(&arm_smmu_driver);
3925}
3926
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003927subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003928module_exit(arm_smmu_exit);
3929
Robin Murphy7e96c742016-09-14 15:26:46 +01003930static int __init arm_smmu_of_init(struct device_node *np)
3931{
3932 int ret = arm_smmu_init();
3933
3934 if (ret)
3935 return ret;
3936
3937 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
3938 return -ENODEV;
3939
3940 return 0;
3941}
3942IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
3943IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
3944IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
3945IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
3946IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
3947IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01003948
Patrick Dalya0fddb62017-03-27 19:26:59 -07003949#define TCU_HW_VERSION_HLOS1 (0x18)
3950
Patrick Daly1f8a2882016-09-12 17:32:05 -07003951#define DEBUG_SID_HALT_REG 0x0
3952#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07003953#define DEBUG_SID_HALT_SID_MASK 0x3ff
3954
3955#define DEBUG_VA_ADDR_REG 0x8
3956
3957#define DEBUG_TXN_TRIGG_REG 0x18
3958#define DEBUG_TXN_AXPROT_SHIFT 6
3959#define DEBUG_TXN_AXCACHE_SHIFT 2
3960#define DEBUG_TRX_WRITE (0x1 << 1)
3961#define DEBUG_TXN_READ (0x0 << 1)
3962#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07003963
3964#define DEBUG_SR_HALT_ACK_REG 0x20
3965#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07003966#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
3967
3968#define DEBUG_PAR_REG 0x28
3969#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
3970#define DEBUG_PAR_PA_SHIFT 12
3971#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07003972
3973#define TBU_DBG_TIMEOUT_US 30000
3974
Patrick Daly6b290f1e2017-03-27 19:26:59 -07003975struct qsmmuv500_archdata {
3976 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07003977 void __iomem *tcu_base;
3978 u32 version;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07003979};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07003980#define get_qsmmuv500_archdata(smmu) \
3981 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07003982
Patrick Daly1f8a2882016-09-12 17:32:05 -07003983struct qsmmuv500_tbu_device {
3984 struct list_head list;
3985 struct device *dev;
3986 struct arm_smmu_device *smmu;
3987 void __iomem *base;
3988 void __iomem *status_reg;
3989
3990 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07003991 u32 sid_start;
3992 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07003993
3994 /* Protects halt count */
3995 spinlock_t halt_lock;
3996 u32 halt_count;
3997};
3998
3999static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
4000{
4001 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004002 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004003 int ret = 0;
4004
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004005 list_for_each_entry(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004006 ret = arm_smmu_power_on(tbu->pwr);
4007 if (ret)
4008 break;
4009 }
4010 if (!ret)
4011 return 0;
4012
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004013 list_for_each_entry_continue_reverse(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004014 arm_smmu_power_off(tbu->pwr);
4015 }
4016 return ret;
4017}
4018
4019static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
4020{
4021 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004022 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004023
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004024 list_for_each_entry_reverse(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004025 arm_smmu_power_off(tbu->pwr);
4026 }
4027}
4028
4029static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
4030{
4031 unsigned long flags;
4032 u32 val;
4033 void __iomem *base;
4034
4035 spin_lock_irqsave(&tbu->halt_lock, flags);
4036 if (tbu->halt_count) {
4037 tbu->halt_count++;
4038 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4039 return 0;
4040 }
4041
4042 base = tbu->base;
4043 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4044 val |= DEBUG_SID_HALT_VAL;
4045 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4046
4047 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
4048 val, (val & DEBUG_SR_HALT_ACK_VAL),
4049 0, TBU_DBG_TIMEOUT_US)) {
4050 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4051 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4052 return -ETIMEDOUT;
4053 }
4054
4055 tbu->halt_count = 1;
4056 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4057 return 0;
4058}
4059
4060static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4061{
4062 unsigned long flags;
4063 u32 val;
4064 void __iomem *base;
4065
4066 spin_lock_irqsave(&tbu->halt_lock, flags);
4067 if (!tbu->halt_count) {
4068 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4069 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4070 return;
4071
4072 } else if (tbu->halt_count > 1) {
4073 tbu->halt_count--;
4074 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4075 return;
4076 }
4077
4078 base = tbu->base;
4079 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4080 val &= ~DEBUG_SID_HALT_VAL;
4081 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4082
4083 tbu->halt_count = 0;
4084 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4085}
4086
4087static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
4088{
4089 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004090 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004091 int ret = 0;
4092
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004093 list_for_each_entry(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004094 ret = qsmmuv500_tbu_halt(tbu);
4095 if (ret)
4096 break;
4097 }
4098
4099 if (!ret)
4100 return 0;
4101
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004102 list_for_each_entry_continue_reverse(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004103 qsmmuv500_tbu_resume(tbu);
4104 }
4105 return ret;
4106}
4107
4108static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
4109{
4110 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004111 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004112
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004113 list_for_each_entry(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004114 qsmmuv500_tbu_resume(tbu);
4115 }
4116}
4117
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004118static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4119 struct arm_smmu_device *smmu, u32 sid)
4120{
4121 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004122 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004123
4124 list_for_each_entry(tbu, &data->tbus, list) {
4125 if (tbu->sid_start <= sid &&
4126 sid < tbu->sid_start + tbu->num_sids)
4127 break;
4128 }
4129 return tbu;
4130}
4131
Patrick Daly1f8a2882016-09-12 17:32:05 -07004132static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
4133{
4134 int i, ret;
4135 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
4136
4137 ret = qsmmuv500_tbu_power_on_all(smmu);
4138 if (ret)
4139 return;
4140
4141 /* Program implementation defined registers */
4142 qsmmuv500_halt_all(smmu);
4143 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
4144 writel_relaxed(regs[i].value,
4145 ARM_SMMU_GR0(smmu) + regs[i].offset);
4146 qsmmuv500_resume_all(smmu);
4147 qsmmuv500_tbu_power_off_all(smmu);
4148}
4149
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004150static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
4151 struct qsmmuv500_tbu_device *tbu,
4152 unsigned long *flags)
4153{
4154 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004155 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004156 u32 val;
4157
4158 spin_lock_irqsave(&smmu->atos_lock, *flags);
4159 /* The status register is not accessible on version 1.0 */
4160 if (data->version == 0x01000000)
4161 return 0;
4162
4163 if (readl_poll_timeout_atomic(tbu->status_reg,
4164 val, (val == 0x1), 0,
4165 TBU_DBG_TIMEOUT_US)) {
4166 dev_err(tbu->dev, "ECATS hw busy!\n");
4167 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4168 return -ETIMEDOUT;
4169 }
4170
4171 return 0;
4172}
4173
4174static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
4175 struct qsmmuv500_tbu_device *tbu,
4176 unsigned long *flags)
4177{
4178 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004179 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004180
4181 /* The status register is not accessible on version 1.0 */
4182 if (data->version != 0x01000000)
4183 writel_relaxed(0, tbu->status_reg);
4184 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4185}
4186
4187/*
4188 * Zero means failure.
4189 */
4190static phys_addr_t qsmmuv500_iova_to_phys(
4191 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
4192{
4193 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4194 struct arm_smmu_device *smmu = smmu_domain->smmu;
4195 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4196 struct qsmmuv500_tbu_device *tbu;
4197 int ret;
4198 phys_addr_t phys = 0;
4199 u64 val, fsr;
4200 unsigned long flags;
4201 void __iomem *cb_base;
4202 u32 sctlr_orig, sctlr;
4203 int needs_redo = 0;
4204
4205 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4206 tbu = qsmmuv500_find_tbu(smmu, sid);
4207 if (!tbu)
4208 return 0;
4209
4210 ret = arm_smmu_power_on(tbu->pwr);
4211 if (ret)
4212 return 0;
4213
4214 /*
4215 * Disable client transactions & wait for existing operations to
4216 * complete.
4217 */
4218 ret = qsmmuv500_tbu_halt(tbu);
4219 if (ret)
4220 goto out_power_off;
4221
4222 /* Only one concurrent atos operation */
4223 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
4224 if (ret)
4225 goto out_resume;
4226
4227 /*
4228 * We can be called from an interrupt handler with FSR already set
4229 * so terminate the faulting transaction prior to starting ecats.
4230 * No new racing faults can occur since we in the halted state.
4231 * ECATS can trigger the fault interrupt, so disable it temporarily
4232 * and check for an interrupt manually.
4233 */
4234 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4235 if (fsr & FSR_FAULT) {
4236 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4237 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4238 }
4239 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4240 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4241 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4242
4243redo:
4244 /* Set address and stream-id */
4245 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
4246 val |= sid & DEBUG_SID_HALT_SID_MASK;
4247 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
4248 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
4249
4250 /*
4251 * Write-back Read and Write-Allocate
4252 * Priviledged, nonsecure, data transaction
4253 * Read operation.
4254 */
4255 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
4256 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
4257 val |= DEBUG_TXN_TRIGGER;
4258 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
4259
4260 ret = 0;
4261 if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG,
4262 val, !(val & DEBUG_SR_ECATS_RUNNING_VAL),
4263 0, TBU_DBG_TIMEOUT_US)) {
4264 dev_err(tbu->dev, "ECATS translation timed out!\n");
4265 }
4266
4267 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4268 if (fsr & FSR_FAULT) {
4269 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
4270 val);
4271 ret = -EINVAL;
4272
4273 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
4274 /*
4275 * Clear pending interrupts
4276 * Barrier required to ensure that the FSR is cleared
4277 * before resuming SMMU operation
4278 */
4279 wmb();
4280 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4281 }
4282
4283 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
4284 if (val & DEBUG_PAR_FAULT_VAL) {
4285 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
4286 val);
4287 ret = -EINVAL;
4288 }
4289
4290 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
4291 if (ret < 0)
4292 phys = 0;
4293
4294 /* Reset hardware */
4295 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
4296 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
4297
4298 /*
4299 * After a failed translation, the next successful translation will
4300 * incorrectly be reported as a failure.
4301 */
4302 if (!phys && needs_redo++ < 2)
4303 goto redo;
4304
4305 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4306 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
4307
4308out_resume:
4309 qsmmuv500_tbu_resume(tbu);
4310
4311out_power_off:
4312 arm_smmu_power_off(tbu->pwr);
4313
4314 return phys;
4315}
4316
4317static phys_addr_t qsmmuv500_iova_to_phys_hard(
4318 struct iommu_domain *domain, dma_addr_t iova)
4319{
4320 u16 sid;
4321 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4322 struct iommu_fwspec *fwspec;
4323
4324 /* Select a sid */
4325 fwspec = smmu_domain->dev->iommu_fwspec;
4326 sid = (u16)fwspec->ids[0];
4327
4328 return qsmmuv500_iova_to_phys(domain, iova, sid);
4329}
4330
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004331static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004332{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004333 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004334 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004335 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004336
4337 if (!dev->driver) {
4338 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4339 return -EINVAL;
4340 }
4341
4342 tbu = dev_get_drvdata(dev);
4343
4344 INIT_LIST_HEAD(&tbu->list);
4345 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004346 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004347 return 0;
4348}
4349
4350static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4351{
Patrick Dalya0fddb62017-03-27 19:26:59 -07004352 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004353 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004354 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004355 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004356 int ret;
4357
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004358 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
4359 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004360 return -ENOMEM;
4361
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004362 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07004363
4364 pdev = container_of(dev, struct platform_device, dev);
4365 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
4366 data->tcu_base = devm_ioremap_resource(dev, res);
4367 if (IS_ERR(data->tcu_base))
4368 return PTR_ERR(data->tcu_base);
4369
4370 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004371 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004372
4373 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4374 if (ret)
4375 return ret;
4376
4377 /* Attempt to register child devices */
4378 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
4379 if (ret)
4380 return -EINVAL;
4381
4382 return 0;
4383}
4384
4385struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
4386 .init = qsmmuv500_arch_init,
4387 .device_reset = qsmmuv500_device_reset,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004388 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly1f8a2882016-09-12 17:32:05 -07004389};
4390
4391static const struct of_device_id qsmmuv500_tbu_of_match[] = {
4392 {.compatible = "qcom,qsmmuv500-tbu"},
4393 {}
4394};
4395
4396static int qsmmuv500_tbu_probe(struct platform_device *pdev)
4397{
4398 struct resource *res;
4399 struct device *dev = &pdev->dev;
4400 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004401 const __be32 *cell;
4402 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004403
4404 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
4405 if (!tbu)
4406 return -ENOMEM;
4407
4408 INIT_LIST_HEAD(&tbu->list);
4409 tbu->dev = dev;
4410 spin_lock_init(&tbu->halt_lock);
4411
4412 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
4413 tbu->base = devm_ioremap_resource(dev, res);
4414 if (IS_ERR(tbu->base))
4415 return PTR_ERR(tbu->base);
4416
4417 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
4418 tbu->status_reg = devm_ioremap_resource(dev, res);
4419 if (IS_ERR(tbu->status_reg))
4420 return PTR_ERR(tbu->status_reg);
4421
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004422 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
4423 if (!cell || len < 8)
4424 return -EINVAL;
4425
4426 tbu->sid_start = of_read_number(cell, 1);
4427 tbu->num_sids = of_read_number(cell + 1, 1);
4428
Patrick Daly1f8a2882016-09-12 17:32:05 -07004429 tbu->pwr = arm_smmu_init_power_resources(pdev);
4430 if (IS_ERR(tbu->pwr))
4431 return PTR_ERR(tbu->pwr);
4432
4433 dev_set_drvdata(dev, tbu);
4434 return 0;
4435}
4436
4437static struct platform_driver qsmmuv500_tbu_driver = {
4438 .driver = {
4439 .name = "qsmmuv500-tbu",
4440 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
4441 },
4442 .probe = qsmmuv500_tbu_probe,
4443};
4444
Will Deacon45ae7cf2013-06-24 18:31:25 +01004445MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
4446MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
4447MODULE_LICENSE("GPL v2");