blob: b91a6b5f3ff816242b3c454f553d1ff9df1afe9b [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070050#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070051#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070052#include <linux/msm-bus.h>
53#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55#include <linux/amba/bus.h>
56
Will Deacon518f7132014-11-14 17:17:54 +000057#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* Maximum number of context banks per SMMU */
60#define ARM_SMMU_MAX_CBS 128
61
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* SMMU global address space */
63#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010064#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000066/*
67 * SMMU global address space with conditional offset to access secure
68 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 * nsGFSYNR0: 0x450)
70 */
71#define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu)->base + \
73 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
74 ? 0x400 : 0))
75
Robin Murphyf9a05f02016-04-13 18:13:01 +010076/*
77 * Some 64-bit registers only make sense to write atomically, but in such
78 * cases all the data relevant to AArch32 formats lies within the lower word,
79 * therefore this actually makes more sense than it might first appear.
80 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#endif
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087/* Configuration registers */
88#define ARM_SMMU_GR0_sCR0 0x0
89#define sCR0_CLIENTPD (1 << 0)
90#define sCR0_GFRE (1 << 1)
91#define sCR0_GFIE (1 << 2)
92#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100131#define ID0_NUMSMRG_SHIFT 0
132#define ID0_NUMSMRG_MASK 0xff
133
134#define ID1_PAGESIZE (1 << 31)
135#define ID1_NUMPAGENDXB_SHIFT 28
136#define ID1_NUMPAGENDXB_MASK 7
137#define ID1_NUMS2CB_SHIFT 16
138#define ID1_NUMS2CB_MASK 0xff
139#define ID1_NUMCB_SHIFT 0
140#define ID1_NUMCB_MASK 0xff
141
142#define ID2_OAS_SHIFT 4
143#define ID2_OAS_MASK 0xf
144#define ID2_IAS_SHIFT 0
145#define ID2_IAS_MASK 0xf
146#define ID2_UBS_SHIFT 8
147#define ID2_UBS_MASK 0xf
148#define ID2_PTFS_4K (1 << 12)
149#define ID2_PTFS_16K (1 << 13)
150#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800151#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152
Peng Fan3ca37122016-05-03 21:50:30 +0800153#define ID7_MAJOR_SHIFT 4
154#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157#define ARM_SMMU_GR0_TLBIVMID 0x64
158#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
159#define ARM_SMMU_GR0_TLBIALLH 0x6c
160#define ARM_SMMU_GR0_sTLBGSYNC 0x70
161#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
162#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800163#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
165/* Stream mapping registers */
166#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
167#define SMR_VALID (1 << 31)
168#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100169#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170
171#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
172#define S2CR_CBNDX_SHIFT 0
173#define S2CR_CBNDX_MASK 0xff
174#define S2CR_TYPE_SHIFT 16
175#define S2CR_TYPE_MASK 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100176enum arm_smmu_s2cr_type {
177 S2CR_TYPE_TRANS,
178 S2CR_TYPE_BYPASS,
179 S2CR_TYPE_FAULT,
180};
181
182#define S2CR_PRIVCFG_SHIFT 24
183#define S2CR_PRIVCFG_MASK 0x3
184enum arm_smmu_s2cr_privcfg {
185 S2CR_PRIVCFG_DEFAULT,
186 S2CR_PRIVCFG_DIPAN,
187 S2CR_PRIVCFG_UNPRIV,
188 S2CR_PRIVCFG_PRIV,
189};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100190
191/* Context bank attribute registers */
192#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
193#define CBAR_VMID_SHIFT 0
194#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000195#define CBAR_S1_BPSHCFG_SHIFT 8
196#define CBAR_S1_BPSHCFG_MASK 3
197#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100198#define CBAR_S1_MEMATTR_SHIFT 12
199#define CBAR_S1_MEMATTR_MASK 0xf
200#define CBAR_S1_MEMATTR_WB 0xf
201#define CBAR_TYPE_SHIFT 16
202#define CBAR_TYPE_MASK 0x3
203#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
204#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
205#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
206#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
207#define CBAR_IRPTNDX_SHIFT 24
208#define CBAR_IRPTNDX_MASK 0xff
209
Shalaj Jain04059c52015-03-03 13:34:59 -0800210#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
211#define CBFRSYNRA_SID_MASK (0xffff)
212
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
214#define CBA2R_RW64_32BIT (0 << 0)
215#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800216#define CBA2R_VMID_SHIFT 16
217#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218
219/* Translation context bank */
220#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100221#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222
223#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100224#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_RESUME 0x8
226#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100227#define ARM_SMMU_CB_TTBR0 0x20
228#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600230#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100231#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100233#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100234#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700235#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100236#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000238#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100239#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000240#define ARM_SMMU_CB_S1_TLBIVAL 0x620
241#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
242#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700243#define ARM_SMMU_CB_TLBSYNC 0x7f0
244#define ARM_SMMU_CB_TLBSTATUS 0x7f4
245#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100246#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000247#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248
249#define SCTLR_S1_ASIDPNE (1 << 12)
250#define SCTLR_CFCFG (1 << 7)
251#define SCTLR_CFIE (1 << 6)
252#define SCTLR_CFRE (1 << 5)
253#define SCTLR_E (1 << 4)
254#define SCTLR_AFE (1 << 2)
255#define SCTLR_TRE (1 << 1)
256#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100258#define ARM_MMU500_ACTLR_CPRE (1 << 1)
259
Peng Fan3ca37122016-05-03 21:50:30 +0800260#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
261
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700262#define ARM_SMMU_IMPL_DEF0(smmu) \
263 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
264#define ARM_SMMU_IMPL_DEF1(smmu) \
265 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000266#define CB_PAR_F (1 << 0)
267
268#define ATSR_ACTIVE (1 << 0)
269
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270#define RESUME_RETRY (0 << 0)
271#define RESUME_TERMINATE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100274#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100276#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
278#define FSR_MULTI (1 << 31)
279#define FSR_SS (1 << 30)
280#define FSR_UUT (1 << 8)
281#define FSR_ASF (1 << 7)
282#define FSR_TLBLKF (1 << 6)
283#define FSR_TLBMCF (1 << 5)
284#define FSR_EF (1 << 4)
285#define FSR_PF (1 << 3)
286#define FSR_AFF (1 << 2)
287#define FSR_TF (1 << 1)
288
Mitchel Humpherys29073202014-07-08 09:52:18 -0700289#define FSR_IGN (FSR_AFF | FSR_ASF | \
290 FSR_TLBMCF | FSR_TLBLKF)
291#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100292 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293
294#define FSYNR0_WNR (1 << 4)
295
Will Deacon4cf740b2014-07-14 19:47:39 +0100296static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000297module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100298MODULE_PARM_DESC(force_stage,
299 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800300static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000301module_param(disable_bypass, bool, S_IRUGO);
302MODULE_PARM_DESC(disable_bypass,
303 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100304
Robin Murphy09360402014-08-28 17:51:59 +0100305enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100306 ARM_SMMU_V1,
307 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100308 ARM_SMMU_V2,
309};
310
Robin Murphy67b65a32016-04-13 18:12:57 +0100311enum arm_smmu_implementation {
312 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100313 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100314 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700315 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700316 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100317};
318
Patrick Dalyd7476202016-09-08 18:23:28 -0700319struct arm_smmu_device;
320struct arm_smmu_arch_ops {
321 int (*init)(struct arm_smmu_device *smmu);
322 void (*device_reset)(struct arm_smmu_device *smmu);
323 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
324 dma_addr_t iova);
Patrick Dalyd7476202016-09-08 18:23:28 -0700325};
326
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700327struct arm_smmu_impl_def_reg {
328 u32 offset;
329 u32 value;
330};
331
Robin Murphya754fd12016-09-12 17:13:50 +0100332struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100333 struct iommu_group *group;
334 int count;
Robin Murphya754fd12016-09-12 17:13:50 +0100335 enum arm_smmu_s2cr_type type;
336 enum arm_smmu_s2cr_privcfg privcfg;
337 u8 cbndx;
338};
339
340#define s2cr_init_val (struct arm_smmu_s2cr){ \
341 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
342}
343
Will Deacon45ae7cf2013-06-24 18:31:25 +0100344struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100345 u16 mask;
346 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100347 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100348};
349
Will Deacona9a1b0b2014-05-01 18:05:08 +0100350struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100351 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100352 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100353};
Robin Murphy468f4942016-09-12 17:13:49 +0100354#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100355#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
356#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000357#define fwspec_smendx(fw, i) \
358 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100359#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000360 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100361
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700362/*
363 * Describes resources required for on/off power operation.
364 * Separate reference count is provided for atomic/nonatomic
365 * operations.
366 */
367struct arm_smmu_power_resources {
368 struct platform_device *pdev;
369 struct device *dev;
370
371 struct clk **clocks;
372 int num_clocks;
373
374 struct regulator_bulk_data *gdscs;
375 int num_gdscs;
376
377 uint32_t bus_client;
378 struct msm_bus_scale_pdata *bus_dt_data;
379
380 /* Protects power_count */
381 struct mutex power_lock;
382 int power_count;
383
384 /* Protects clock_refs_count */
385 spinlock_t clock_refs_lock;
386 int clock_refs_count;
387};
388
Will Deacon45ae7cf2013-06-24 18:31:25 +0100389struct arm_smmu_device {
390 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100391
392 void __iomem *base;
393 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100394 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100395
396#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
397#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
398#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
399#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
400#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000401#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800402#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100403#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
404#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
405#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
406#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
407#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100408 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000409
410#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800411#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800412#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700413#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Patrick Daly4423d3e2017-05-04 18:17:51 -0700414#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000415 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100416 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100417 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100418
419 u32 num_context_banks;
420 u32 num_s2_context_banks;
421 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
422 atomic_t irptndx;
423
424 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100425 u16 streamid_mask;
426 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100427 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100428 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100429 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100430
Will Deacon518f7132014-11-14 17:17:54 +0000431 unsigned long va_size;
432 unsigned long ipa_size;
433 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100434 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100435
436 u32 num_global_irqs;
437 u32 num_context_irqs;
438 unsigned int *irqs;
439
Patrick Daly8e3371a2017-02-13 22:14:53 -0800440 struct list_head list;
441
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800442 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700443 /* Specific to QCOM */
444 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
445 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800446
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700447 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700448
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800449 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700450
451 /* protects idr */
452 struct mutex idr_mutex;
453 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700454
455 struct arm_smmu_arch_ops *arch_ops;
456 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100457};
458
Robin Murphy7602b872016-04-28 17:12:09 +0100459enum arm_smmu_context_fmt {
460 ARM_SMMU_CTX_FMT_NONE,
461 ARM_SMMU_CTX_FMT_AARCH64,
462 ARM_SMMU_CTX_FMT_AARCH32_L,
463 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100464};
465
466struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100467 u8 cbndx;
468 u8 irptndx;
469 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600470 u32 procid;
471 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100472 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100473};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100474#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600475#define INVALID_CBNDX 0xff
476#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700477/*
478 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
479 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
480 */
481#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100482
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600483#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800484#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100485
Will Deaconc752ce42014-06-25 22:46:31 +0100486enum arm_smmu_domain_stage {
487 ARM_SMMU_DOMAIN_S1 = 0,
488 ARM_SMMU_DOMAIN_S2,
489 ARM_SMMU_DOMAIN_NESTED,
490};
491
Patrick Dalyc11d1082016-09-01 15:52:44 -0700492struct arm_smmu_pte_info {
493 void *virt_addr;
494 size_t size;
495 struct list_head entry;
496};
497
Will Deacon45ae7cf2013-06-24 18:31:25 +0100498struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100499 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800500 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000501 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700502 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000503 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100504 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100505 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000506 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700507 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700508 u32 secure_vmid;
509 struct list_head pte_info_list;
510 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700511 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700512 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100513 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100514};
515
Patrick Daly8e3371a2017-02-13 22:14:53 -0800516static DEFINE_SPINLOCK(arm_smmu_devices_lock);
517static LIST_HEAD(arm_smmu_devices);
518
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000519struct arm_smmu_option_prop {
520 u32 opt;
521 const char *prop;
522};
523
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800524static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
525
Robin Murphy7e96c742016-09-14 15:26:46 +0100526static bool using_legacy_binding, using_generic_binding;
527
Mitchel Humpherys29073202014-07-08 09:52:18 -0700528static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000529 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800530 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800531 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700532 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700533 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000534 { 0, NULL},
535};
536
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800537static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
538 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700539static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
540 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600541static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800542
Patrick Dalyc11d1082016-09-01 15:52:44 -0700543static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
544static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700545static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700546static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
547
Patrick Dalyd7476202016-09-08 18:23:28 -0700548static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
549static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
550
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800551static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
552
Joerg Roedel1d672632015-03-26 13:43:10 +0100553static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
554{
555 return container_of(dom, struct arm_smmu_domain, domain);
556}
557
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000558static void parse_driver_options(struct arm_smmu_device *smmu)
559{
560 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700561
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000562 do {
563 if (of_property_read_bool(smmu->dev->of_node,
564 arm_smmu_options[i].prop)) {
565 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700566 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000567 arm_smmu_options[i].prop);
568 }
569 } while (arm_smmu_options[++i].opt);
570}
571
Patrick Dalyc190d932016-08-30 17:23:28 -0700572static bool is_dynamic_domain(struct iommu_domain *domain)
573{
574 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
575
576 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
577}
578
Liam Mark53cf2342016-12-20 11:36:07 -0800579static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
580{
581 if (smmu_domain->attributes &
582 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
583 return true;
584 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
585 return smmu_domain->smmu->dev->archdata.dma_coherent;
586 else
587 return false;
588}
589
Patrick Dalye271f212016-10-04 13:24:49 -0700590static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
591{
592 return (smmu_domain->secure_vmid != VMID_INVAL);
593}
594
595static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
596{
597 if (arm_smmu_is_domain_secure(smmu_domain))
598 mutex_lock(&smmu_domain->assign_lock);
599}
600
601static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
602{
603 if (arm_smmu_is_domain_secure(smmu_domain))
604 mutex_unlock(&smmu_domain->assign_lock);
605}
606
Will Deacon8f68f8e2014-07-15 11:27:08 +0100607static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100608{
609 if (dev_is_pci(dev)) {
610 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700611
Will Deacona9a1b0b2014-05-01 18:05:08 +0100612 while (!pci_is_root_bus(bus))
613 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100614 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100615 }
616
Robin Murphyd5b41782016-09-14 15:21:39 +0100617 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100618}
619
Robin Murphyd5b41782016-09-14 15:21:39 +0100620static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100621{
Robin Murphyd5b41782016-09-14 15:21:39 +0100622 *((__be32 *)data) = cpu_to_be32(alias);
623 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100624}
625
Robin Murphyd5b41782016-09-14 15:21:39 +0100626static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100627{
Robin Murphyd5b41782016-09-14 15:21:39 +0100628 struct of_phandle_iterator *it = *(void **)data;
629 struct device_node *np = it->node;
630 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100631
Robin Murphyd5b41782016-09-14 15:21:39 +0100632 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
633 "#stream-id-cells", 0)
634 if (it->node == np) {
635 *(void **)data = dev;
636 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700637 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100638 it->node = np;
639 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100640}
641
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100642static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100643static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100644
Robin Murphy06e393e2016-09-12 17:13:55 +0100645static int arm_smmu_register_legacy_master(struct device *dev,
646 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100647{
Robin Murphy06e393e2016-09-12 17:13:55 +0100648 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100649 struct device_node *np;
650 struct of_phandle_iterator it;
651 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100652 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100653 __be32 pci_sid;
654 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100655
Stephen Boydfecdeef2017-03-01 16:53:19 -0800656 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100657 np = dev_get_dev_node(dev);
658 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
659 of_node_put(np);
660 return -ENODEV;
661 }
662
663 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100664 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
665 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100666 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100667 of_node_put(np);
668 if (err == 0)
669 return -ENODEV;
670 if (err < 0)
671 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100672
Robin Murphyd5b41782016-09-14 15:21:39 +0100673 if (dev_is_pci(dev)) {
674 /* "mmu-masters" assumes Stream ID == Requester ID */
675 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
676 &pci_sid);
677 it.cur = &pci_sid;
678 it.cur_count = 1;
679 }
680
Robin Murphy06e393e2016-09-12 17:13:55 +0100681 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
682 &arm_smmu_ops);
683 if (err)
684 return err;
685
686 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
687 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100688 return -ENOMEM;
689
Robin Murphy06e393e2016-09-12 17:13:55 +0100690 *smmu = dev_get_drvdata(smmu_dev);
691 of_phandle_iterator_args(&it, sids, it.cur_count);
692 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
693 kfree(sids);
694 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100695}
696
697static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
698{
699 int idx;
700
701 do {
702 idx = find_next_zero_bit(map, end, start);
703 if (idx == end)
704 return -ENOSPC;
705 } while (test_and_set_bit(idx, map));
706
707 return idx;
708}
709
710static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
711{
712 clear_bit(idx, map);
713}
714
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700715static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700716{
717 int i, ret = 0;
718
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700719 for (i = 0; i < pwr->num_clocks; ++i) {
720 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700721 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700722 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700723 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700724 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700725 break;
726 }
727 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700728 return ret;
729}
730
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700731static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700732{
733 int i;
734
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700735 for (i = pwr->num_clocks; i; --i)
736 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700737}
738
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700739static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700740{
741 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700742
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700743 for (i = 0; i < pwr->num_clocks; ++i) {
744 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700745 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700746 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700747 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700748 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700749 break;
750 }
751 }
752
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700753 return ret;
754}
Patrick Daly8befb662016-08-17 20:03:28 -0700755
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700756static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
757{
758 int i;
759
760 for (i = pwr->num_clocks; i; --i)
761 clk_disable(pwr->clocks[i - 1]);
762}
763
764static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
765{
766 if (!pwr->bus_client)
767 return 0;
768 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
769}
770
771static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
772{
773 if (!pwr->bus_client)
774 return;
775 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
776}
777
778/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
779static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
780{
781 int ret = 0;
782 unsigned long flags;
783
784 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
785 if (pwr->clock_refs_count > 0) {
786 pwr->clock_refs_count++;
787 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
788 return 0;
789 }
790
791 ret = arm_smmu_enable_clocks(pwr);
792 if (!ret)
793 pwr->clock_refs_count = 1;
794
795 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700796 return ret;
797}
798
799/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700800static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700801{
Patrick Daly8befb662016-08-17 20:03:28 -0700802 unsigned long flags;
803
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700804 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
805 if (pwr->clock_refs_count == 0) {
806 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
807 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
808 return;
809
810 } else if (pwr->clock_refs_count > 1) {
811 pwr->clock_refs_count--;
812 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700813 return;
814 }
815
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700816 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700817
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700818 pwr->clock_refs_count = 0;
819 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700820}
821
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700822static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700823{
824 int ret;
825
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700826 mutex_lock(&pwr->power_lock);
827 if (pwr->power_count > 0) {
828 pwr->power_count += 1;
829 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700830 return 0;
831 }
832
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700833 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700834 if (ret)
835 goto out_unlock;
836
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700837 ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700838 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700839 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700840
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700841 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700842 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700843 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -0700844
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700845 pwr->power_count = 1;
846 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700847 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700848
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700849out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700850 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700851out_disable_bus:
852 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700853out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700854 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700855 return ret;
856}
857
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700858static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700859{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700860 mutex_lock(&pwr->power_lock);
861 if (pwr->power_count == 0) {
862 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
863 mutex_unlock(&pwr->power_lock);
864 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700865
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700866 } else if (pwr->power_count > 1) {
867 pwr->power_count--;
868 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700869 return;
870 }
871
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700872 arm_smmu_unprepare_clocks(pwr);
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700873 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700874 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -0700875 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700876 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700877}
878
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700879static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700880{
881 int ret;
882
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700883 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700884 if (ret)
885 return ret;
886
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700887 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700888 if (ret)
889 goto out_disable;
890
891 return 0;
892
893out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700894 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700895 return ret;
896}
897
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700898static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700899{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700900 arm_smmu_power_off_atomic(pwr);
901 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700902}
903
904/*
905 * Must be used instead of arm_smmu_power_on if it may be called from
906 * atomic context
907 */
908static int arm_smmu_domain_power_on(struct iommu_domain *domain,
909 struct arm_smmu_device *smmu)
910{
911 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
912 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
913
914 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700915 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700916
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700917 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700918}
919
920/*
921 * Must be used instead of arm_smmu_power_on if it may be called from
922 * atomic context
923 */
924static void arm_smmu_domain_power_off(struct iommu_domain *domain,
925 struct arm_smmu_device *smmu)
926{
927 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
928 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
929
930 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700931 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700932 return;
933 }
934
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700935 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700936}
937
Will Deacon45ae7cf2013-06-24 18:31:25 +0100938/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700939static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
940 int cbndx)
941{
942 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
943 u32 val;
944
945 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
946 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
947 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700948 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700949 dev_err(smmu->dev, "TLBSYNC timeout!\n");
950}
951
Will Deacon518f7132014-11-14 17:17:54 +0000952static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100953{
954 int count = 0;
955 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
956
957 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
958 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
959 & sTLBGSTATUS_GSACTIVE) {
960 cpu_relax();
961 if (++count == TLB_LOOP_TIMEOUT) {
962 dev_err_ratelimited(smmu->dev,
963 "TLB sync timed out -- SMMU may be deadlocked\n");
964 return;
965 }
966 udelay(1);
967 }
968}
969
Will Deacon518f7132014-11-14 17:17:54 +0000970static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100971{
Will Deacon518f7132014-11-14 17:17:54 +0000972 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700973 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000974}
975
Patrick Daly8befb662016-08-17 20:03:28 -0700976/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000977static void arm_smmu_tlb_inv_context(void *cookie)
978{
979 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100980 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
981 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100982 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000983 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100984
985 if (stage1) {
986 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800987 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100988 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700989 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100990 } else {
991 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800992 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100993 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700994 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100995 }
Will Deacon1463fe42013-07-31 19:21:27 +0100996}
997
Will Deacon518f7132014-11-14 17:17:54 +0000998static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000999 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001000{
1001 struct arm_smmu_domain *smmu_domain = cookie;
1002 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1003 struct arm_smmu_device *smmu = smmu_domain->smmu;
1004 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1005 void __iomem *reg;
1006
1007 if (stage1) {
1008 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1009 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1010
Robin Murphy7602b872016-04-28 17:12:09 +01001011 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001012 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001013 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001014 do {
1015 writel_relaxed(iova, reg);
1016 iova += granule;
1017 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001018 } else {
1019 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001020 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001021 do {
1022 writeq_relaxed(iova, reg);
1023 iova += granule >> 12;
1024 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001025 }
Will Deacon518f7132014-11-14 17:17:54 +00001026 } else if (smmu->version == ARM_SMMU_V2) {
1027 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1028 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1029 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001030 iova >>= 12;
1031 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001032 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001033 iova += granule >> 12;
1034 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001035 } else {
1036 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001037 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001038 }
1039}
1040
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001041struct arm_smmu_secure_pool_chunk {
1042 void *addr;
1043 size_t size;
1044 struct list_head list;
1045};
1046
1047static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1048 size_t size)
1049{
1050 struct arm_smmu_secure_pool_chunk *it;
1051
1052 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1053 if (it->size == size) {
1054 void *addr = it->addr;
1055
1056 list_del(&it->list);
1057 kfree(it);
1058 return addr;
1059 }
1060 }
1061
1062 return NULL;
1063}
1064
1065static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1066 void *addr, size_t size)
1067{
1068 struct arm_smmu_secure_pool_chunk *chunk;
1069
1070 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1071 if (!chunk)
1072 return -ENOMEM;
1073
1074 chunk->addr = addr;
1075 chunk->size = size;
1076 memset(addr, 0, size);
1077 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1078
1079 return 0;
1080}
1081
1082static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1083{
1084 struct arm_smmu_secure_pool_chunk *it, *i;
1085
1086 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1087 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1088 /* pages will be freed later (after being unassigned) */
1089 kfree(it);
1090 }
1091}
1092
Patrick Dalyc11d1082016-09-01 15:52:44 -07001093static void *arm_smmu_alloc_pages_exact(void *cookie,
1094 size_t size, gfp_t gfp_mask)
1095{
1096 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001097 void *page;
1098 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001099
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001100 if (!arm_smmu_is_domain_secure(smmu_domain))
1101 return alloc_pages_exact(size, gfp_mask);
1102
1103 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1104 if (page)
1105 return page;
1106
1107 page = alloc_pages_exact(size, gfp_mask);
1108 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001109 ret = arm_smmu_prepare_pgtable(page, cookie);
1110 if (ret) {
1111 free_pages_exact(page, size);
1112 return NULL;
1113 }
1114 }
1115
1116 return page;
1117}
1118
1119static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1120{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001121 struct arm_smmu_domain *smmu_domain = cookie;
1122
1123 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1124 free_pages_exact(virt, size);
1125 return;
1126 }
1127
1128 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1129 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001130}
1131
Will Deacon518f7132014-11-14 17:17:54 +00001132static struct iommu_gather_ops arm_smmu_gather_ops = {
1133 .tlb_flush_all = arm_smmu_tlb_inv_context,
1134 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1135 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001136 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1137 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001138};
1139
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001140static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1141 dma_addr_t iova, u32 fsr)
1142{
1143 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001144 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001145 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001146 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001147
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001148 phys = arm_smmu_iova_to_phys_hard(domain, iova);
1149 arm_smmu_tlb_inv_context(smmu_domain);
1150 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001151
Patrick Dalyad441dd2016-09-15 15:50:46 -07001152 if (phys != phys_post_tlbiall) {
1153 dev_err(smmu->dev,
1154 "ATOS results differed across TLBIALL...\n"
1155 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1156 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001157
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001158 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001159}
1160
Will Deacon45ae7cf2013-06-24 18:31:25 +01001161static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1162{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001163 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001164 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001165 unsigned long iova;
1166 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001167 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001168 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1169 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001170 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001171 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001172 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001173 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001174 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001175 bool non_fatal_fault = !!(smmu_domain->attributes &
1176 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001177
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001178 static DEFINE_RATELIMIT_STATE(_rs,
1179 DEFAULT_RATELIMIT_INTERVAL,
1180 DEFAULT_RATELIMIT_BURST);
1181
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001182 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001183 if (ret)
1184 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001185
Shalaj Jain04059c52015-03-03 13:34:59 -08001186 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001187 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1189
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001190 if (!(fsr & FSR_FAULT)) {
1191 ret = IRQ_NONE;
1192 goto out_power_off;
1193 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001194
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001195 if (fatal_asf && (fsr & FSR_ASF)) {
1196 dev_err(smmu->dev,
1197 "Took an address size fault. Refusing to recover.\n");
1198 BUG();
1199 }
1200
Will Deacon45ae7cf2013-06-24 18:31:25 +01001201 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001202 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001203 if (fsr & FSR_TF)
1204 flags |= IOMMU_FAULT_TRANSLATION;
1205 if (fsr & FSR_PF)
1206 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001207 if (fsr & FSR_EF)
1208 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001209 if (fsr & FSR_SS)
1210 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001211
Robin Murphyf9a05f02016-04-13 18:13:01 +01001212 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001213 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001214 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1215 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001216 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1217 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001218 dev_dbg(smmu->dev,
1219 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1220 iova, fsr, fsynr, cfg->cbndx);
1221 dev_dbg(smmu->dev,
1222 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001223 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001224 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001225 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001226 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1227 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001228 if (__ratelimit(&_rs)) {
1229 dev_err(smmu->dev,
1230 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1231 iova, fsr, fsynr, cfg->cbndx);
1232 dev_err(smmu->dev, "FAR = %016lx\n",
1233 (unsigned long)iova);
1234 dev_err(smmu->dev,
1235 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1236 fsr,
1237 (fsr & 0x02) ? "TF " : "",
1238 (fsr & 0x04) ? "AFF " : "",
1239 (fsr & 0x08) ? "PF " : "",
1240 (fsr & 0x10) ? "EF " : "",
1241 (fsr & 0x20) ? "TLBMCF " : "",
1242 (fsr & 0x40) ? "TLBLKF " : "",
1243 (fsr & 0x80) ? "MHF " : "",
1244 (fsr & 0x40000000) ? "SS " : "",
1245 (fsr & 0x80000000) ? "MULTI " : "");
1246 dev_err(smmu->dev,
1247 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001248 if (!phys_soft)
1249 dev_err(smmu->dev,
1250 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1251 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001252 if (phys_atos)
1253 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1254 &phys_atos);
1255 else
1256 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001257 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1258 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001259 ret = IRQ_NONE;
1260 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001261 if (!non_fatal_fault) {
1262 dev_err(smmu->dev,
1263 "Unhandled arm-smmu context fault!\n");
1264 BUG();
1265 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001266 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001268 /*
1269 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1270 * if stalled. This is required to keep the IOMMU client stalled on
1271 * the outstanding fault. This gives the client a chance to take any
1272 * debug action and then terminate the stalled transaction.
1273 * So, the sequence in case of stall on fault should be:
1274 * 1) Do not clear FSR or write to RESUME here
1275 * 2) Client takes any debug action
1276 * 3) Client terminates the stalled transaction and resumes the IOMMU
1277 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1278 * not before so that the fault remains outstanding. This ensures
1279 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1280 * need to be terminated.
1281 */
1282 if (tmp != -EBUSY) {
1283 /* Clear the faulting FSR */
1284 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001285
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001286 /*
1287 * Barrier required to ensure that the FSR is cleared
1288 * before resuming SMMU operation
1289 */
1290 wmb();
1291
1292 /* Retry or terminate any stalled transactions */
1293 if (fsr & FSR_SS)
1294 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1295 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001296
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001297out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001298 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001299
Patrick Daly5ba28112016-08-30 19:18:52 -07001300 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301}
1302
1303static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1304{
1305 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1306 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001307 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001308
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001309 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001310 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001311
Will Deacon45ae7cf2013-06-24 18:31:25 +01001312 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1313 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1314 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1315 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1316
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001317 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001318 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001319 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001320 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001321
Will Deacon45ae7cf2013-06-24 18:31:25 +01001322 dev_err_ratelimited(smmu->dev,
1323 "Unexpected global fault, this could be serious\n");
1324 dev_err_ratelimited(smmu->dev,
1325 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1326 gfsr, gfsynr0, gfsynr1, gfsynr2);
1327
1328 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001329 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001330 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331}
1332
Will Deacon518f7132014-11-14 17:17:54 +00001333static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1334 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001335{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001336 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001337 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001338 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001339 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1340 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001341 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001342
Will Deacon45ae7cf2013-06-24 18:31:25 +01001343 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001344 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1345 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346
Will Deacon4a1c93c2015-03-04 12:21:03 +00001347 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001348 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1349 reg = CBA2R_RW64_64BIT;
1350 else
1351 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001352 /* 16-bit VMIDs live in CBA2R */
1353 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001354 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001355
Will Deacon4a1c93c2015-03-04 12:21:03 +00001356 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1357 }
1358
Will Deacon45ae7cf2013-06-24 18:31:25 +01001359 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001360 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001361 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001362 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001363
Will Deacon57ca90f2014-02-06 14:59:05 +00001364 /*
1365 * Use the weakest shareability/memory types, so they are
1366 * overridden by the ttbcr/pte.
1367 */
1368 if (stage1) {
1369 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1370 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001371 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1372 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001373 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001374 }
Will Deacon44680ee2014-06-25 11:29:12 +01001375 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001376
Will Deacon518f7132014-11-14 17:17:54 +00001377 /* TTBRs */
1378 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001379 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001380
Robin Murphyb94df6f2016-08-11 17:44:06 +01001381 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1382 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1383 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1384 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1385 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1386 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1387 } else {
1388 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1389 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1390 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1391 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1392 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1393 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1394 }
Will Deacon518f7132014-11-14 17:17:54 +00001395 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001396 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001397 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001398 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001399
Will Deacon518f7132014-11-14 17:17:54 +00001400 /* TTBCR */
1401 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001402 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1403 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1404 reg2 = 0;
1405 } else {
1406 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1407 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1408 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001409 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001410 if (smmu->version > ARM_SMMU_V1)
1411 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001412 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001413 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001414 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001415 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001416
Will Deacon518f7132014-11-14 17:17:54 +00001417 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001418 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001419 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1420 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1421 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1422 } else {
1423 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1424 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1425 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001426 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001427 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001428 }
1429
Will Deacon45ae7cf2013-06-24 18:31:25 +01001430 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001431 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001432
1433 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1434 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1435 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001436 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001437 if (stage1)
1438 reg |= SCTLR_S1_ASIDPNE;
1439#ifdef __BIG_ENDIAN
1440 reg |= SCTLR_E;
1441#endif
Will Deacon25724842013-08-21 13:49:53 +01001442 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001443}
1444
Patrick Dalyc190d932016-08-30 17:23:28 -07001445static int arm_smmu_init_asid(struct iommu_domain *domain,
1446 struct arm_smmu_device *smmu)
1447{
1448 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1449 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1450 bool dynamic = is_dynamic_domain(domain);
1451 int ret;
1452
1453 if (!dynamic) {
1454 cfg->asid = cfg->cbndx + 1;
1455 } else {
1456 mutex_lock(&smmu->idr_mutex);
1457 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1458 smmu->num_context_banks + 2,
1459 MAX_ASID + 1, GFP_KERNEL);
1460
1461 mutex_unlock(&smmu->idr_mutex);
1462 if (ret < 0) {
1463 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1464 ret);
1465 return ret;
1466 }
1467 cfg->asid = ret;
1468 }
1469 return 0;
1470}
1471
1472static void arm_smmu_free_asid(struct iommu_domain *domain)
1473{
1474 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1475 struct arm_smmu_device *smmu = smmu_domain->smmu;
1476 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1477 bool dynamic = is_dynamic_domain(domain);
1478
1479 if (cfg->asid == INVALID_ASID || !dynamic)
1480 return;
1481
1482 mutex_lock(&smmu->idr_mutex);
1483 idr_remove(&smmu->asid_idr, cfg->asid);
1484 mutex_unlock(&smmu->idr_mutex);
1485}
1486
Will Deacon45ae7cf2013-06-24 18:31:25 +01001487static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001488 struct arm_smmu_device *smmu,
1489 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001490{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001491 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001492 unsigned long ias, oas;
1493 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001494 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001495 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001496 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001497 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001498 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001499 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001500
Will Deacon518f7132014-11-14 17:17:54 +00001501 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001502 if (smmu_domain->smmu)
1503 goto out_unlock;
1504
Patrick Dalyc190d932016-08-30 17:23:28 -07001505 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1506 smmu_domain->cfg.asid = INVALID_ASID;
1507
Patrick Dalyc190d932016-08-30 17:23:28 -07001508 dynamic = is_dynamic_domain(domain);
1509 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1510 dev_err(smmu->dev, "dynamic domains not supported\n");
1511 ret = -EPERM;
1512 goto out_unlock;
1513 }
1514
Will Deaconc752ce42014-06-25 22:46:31 +01001515 /*
1516 * Mapping the requested stage onto what we support is surprisingly
1517 * complicated, mainly because the spec allows S1+S2 SMMUs without
1518 * support for nested translation. That means we end up with the
1519 * following table:
1520 *
1521 * Requested Supported Actual
1522 * S1 N S1
1523 * S1 S1+S2 S1
1524 * S1 S2 S2
1525 * S1 S1 S1
1526 * N N N
1527 * N S1+S2 S2
1528 * N S2 S2
1529 * N S1 S1
1530 *
1531 * Note that you can't actually request stage-2 mappings.
1532 */
1533 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1534 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1535 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1536 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1537
Robin Murphy7602b872016-04-28 17:12:09 +01001538 /*
1539 * Choosing a suitable context format is even more fiddly. Until we
1540 * grow some way for the caller to express a preference, and/or move
1541 * the decision into the io-pgtable code where it arguably belongs,
1542 * just aim for the closest thing to the rest of the system, and hope
1543 * that the hardware isn't esoteric enough that we can't assume AArch64
1544 * support to be a superset of AArch32 support...
1545 */
1546 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1547 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001548 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1549 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1550 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1551 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1552 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001553 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1554 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1555 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1556 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1557 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1558
1559 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1560 ret = -EINVAL;
1561 goto out_unlock;
1562 }
1563
Will Deaconc752ce42014-06-25 22:46:31 +01001564 switch (smmu_domain->stage) {
1565 case ARM_SMMU_DOMAIN_S1:
1566 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1567 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001568 ias = smmu->va_size;
1569 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001570 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001571 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001572 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1573 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001574 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001575 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001576 ias = min(ias, 32UL);
1577 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001578 } else {
1579 fmt = ARM_V7S;
1580 ias = min(ias, 32UL);
1581 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001582 }
Will Deaconc752ce42014-06-25 22:46:31 +01001583 break;
1584 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001585 /*
1586 * We will likely want to change this if/when KVM gets
1587 * involved.
1588 */
Will Deaconc752ce42014-06-25 22:46:31 +01001589 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001590 cfg->cbar = CBAR_TYPE_S2_TRANS;
1591 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001592 ias = smmu->ipa_size;
1593 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001594 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001595 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001596 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001597 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001598 ias = min(ias, 40UL);
1599 oas = min(oas, 40UL);
1600 }
Will Deaconc752ce42014-06-25 22:46:31 +01001601 break;
1602 default:
1603 ret = -EINVAL;
1604 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001605 }
1606
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001607 if (is_fast)
1608 fmt = ARM_V8L_FAST;
1609
Patrick Dalyce6786f2016-11-09 14:19:23 -08001610 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1611 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001612 if (is_iommu_pt_coherent(smmu_domain))
1613 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001614
Patrick Dalyc190d932016-08-30 17:23:28 -07001615 /* Dynamic domains must set cbndx through domain attribute */
1616 if (!dynamic) {
1617 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001618 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001619 if (ret < 0)
1620 goto out_unlock;
1621 cfg->cbndx = ret;
1622 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001623 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001624 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1625 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001626 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001627 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001628 }
1629
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001630 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001631 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001632 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001633 .ias = ias,
1634 .oas = oas,
1635 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001636 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001637 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001638
Will Deacon518f7132014-11-14 17:17:54 +00001639 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001640 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001641 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1642 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001643 if (!pgtbl_ops) {
1644 ret = -ENOMEM;
1645 goto out_clear_smmu;
1646 }
1647
Patrick Dalyc11d1082016-09-01 15:52:44 -07001648 /*
1649 * assign any page table memory that might have been allocated
1650 * during alloc_io_pgtable_ops
1651 */
Patrick Dalye271f212016-10-04 13:24:49 -07001652 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001653 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001654 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001655
Robin Murphyd5466352016-05-09 17:20:09 +01001656 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001657 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001658 domain->geometry.aperture_end = (1UL << ias) - 1;
1659 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001660
Patrick Dalyc190d932016-08-30 17:23:28 -07001661 /* Assign an asid */
1662 ret = arm_smmu_init_asid(domain, smmu);
1663 if (ret)
1664 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001665
Patrick Dalyc190d932016-08-30 17:23:28 -07001666 if (!dynamic) {
1667 /* Initialise the context bank with our page table cfg */
1668 arm_smmu_init_context_bank(smmu_domain,
1669 &smmu_domain->pgtbl_cfg);
1670
1671 /*
1672 * Request context fault interrupt. Do this last to avoid the
1673 * handler seeing a half-initialised domain state.
1674 */
1675 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1676 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001677 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1678 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001679 if (ret < 0) {
1680 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1681 cfg->irptndx, irq);
1682 cfg->irptndx = INVALID_IRPTNDX;
1683 goto out_clear_smmu;
1684 }
1685 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001686 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001687 }
Will Deacon518f7132014-11-14 17:17:54 +00001688 mutex_unlock(&smmu_domain->init_mutex);
1689
1690 /* Publish page table ops for map/unmap */
1691 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001692 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001693
Will Deacon518f7132014-11-14 17:17:54 +00001694out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001695 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001696 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001697out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001698 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001699 return ret;
1700}
1701
Patrick Daly77db4f92016-10-14 15:34:10 -07001702static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1703{
1704 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1705 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1706 smmu_domain->secure_vmid = VMID_INVAL;
1707}
1708
Will Deacon45ae7cf2013-06-24 18:31:25 +01001709static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1710{
Joerg Roedel1d672632015-03-26 13:43:10 +01001711 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001712 struct arm_smmu_device *smmu = smmu_domain->smmu;
1713 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001714 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001715 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001716 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001717 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001718
Robin Murphy7e96c742016-09-14 15:26:46 +01001719 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001720 return;
1721
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001722 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001723 if (ret) {
1724 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1725 smmu);
1726 return;
1727 }
1728
Patrick Dalyc190d932016-08-30 17:23:28 -07001729 dynamic = is_dynamic_domain(domain);
1730 if (dynamic) {
1731 arm_smmu_free_asid(domain);
1732 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001733 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001734 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001735 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001736 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001737 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001738 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001739 return;
1740 }
1741
Will Deacon518f7132014-11-14 17:17:54 +00001742 /*
1743 * Disable the context bank and free the page tables before freeing
1744 * it.
1745 */
Will Deacon44680ee2014-06-25 11:29:12 +01001746 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001747 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001748
Will Deacon44680ee2014-06-25 11:29:12 +01001749 if (cfg->irptndx != INVALID_IRPTNDX) {
1750 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001751 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752 }
1753
Markus Elfring44830b02015-11-06 18:32:41 +01001754 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001755 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001756 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001757 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001758 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001759 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001760
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001761 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001762 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001763}
1764
Joerg Roedel1d672632015-03-26 13:43:10 +01001765static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001766{
1767 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001768
Patrick Daly09801312016-08-29 17:02:52 -07001769 /* Do not support DOMAIN_DMA for now */
1770 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001771 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772 /*
1773 * Allocate the domain and initialise some of its data structures.
1774 * We can't really do anything meaningful until we've added a
1775 * master.
1776 */
1777 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1778 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001779 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001780
Robin Murphy7e96c742016-09-14 15:26:46 +01001781 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1782 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001783 kfree(smmu_domain);
1784 return NULL;
1785 }
1786
Will Deacon518f7132014-11-14 17:17:54 +00001787 mutex_init(&smmu_domain->init_mutex);
1788 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001789 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1790 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001791 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001792 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001793 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001794
1795 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001796}
1797
Joerg Roedel1d672632015-03-26 13:43:10 +01001798static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799{
Joerg Roedel1d672632015-03-26 13:43:10 +01001800 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001801
1802 /*
1803 * Free the domain resources. We assume that all devices have
1804 * already been detached.
1805 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001806 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001807 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001808 kfree(smmu_domain);
1809}
1810
Robin Murphy468f4942016-09-12 17:13:49 +01001811static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1812{
1813 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01001814 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01001815
1816 if (smr->valid)
1817 reg |= SMR_VALID;
1818 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1819}
1820
Robin Murphya754fd12016-09-12 17:13:50 +01001821static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1822{
1823 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1824 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1825 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1826 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1827
1828 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1829}
1830
1831static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1832{
1833 arm_smmu_write_s2cr(smmu, idx);
1834 if (smmu->smrs)
1835 arm_smmu_write_smr(smmu, idx);
1836}
1837
Robin Murphy6668f692016-09-12 17:13:54 +01001838static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01001839{
1840 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01001841 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001842
Robin Murphy6668f692016-09-12 17:13:54 +01001843 /* Stream indexing is blissfully easy */
1844 if (!smrs)
1845 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01001846
Robin Murphy6668f692016-09-12 17:13:54 +01001847 /* Validating SMRs is... less so */
1848 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1849 if (!smrs[i].valid) {
1850 /*
1851 * Note the first free entry we come across, which
1852 * we'll claim in the end if nothing else matches.
1853 */
1854 if (free_idx < 0)
1855 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01001856 continue;
1857 }
Robin Murphy6668f692016-09-12 17:13:54 +01001858 /*
1859 * If the new entry is _entirely_ matched by an existing entry,
1860 * then reuse that, with the guarantee that there also cannot
1861 * be any subsequent conflicting entries. In normal use we'd
1862 * expect simply identical entries for this case, but there's
1863 * no harm in accommodating the generalisation.
1864 */
1865 if ((mask & smrs[i].mask) == mask &&
1866 !((id ^ smrs[i].id) & ~smrs[i].mask))
1867 return i;
1868 /*
1869 * If the new entry has any other overlap with an existing one,
1870 * though, then there always exists at least one stream ID
1871 * which would cause a conflict, and we can't allow that risk.
1872 */
1873 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1874 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001875 }
1876
Robin Murphy6668f692016-09-12 17:13:54 +01001877 return free_idx;
1878}
1879
1880static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1881{
1882 if (--smmu->s2crs[idx].count)
1883 return false;
1884
1885 smmu->s2crs[idx] = s2cr_init_val;
1886 if (smmu->smrs)
1887 smmu->smrs[idx].valid = false;
1888
1889 return true;
1890}
1891
1892static int arm_smmu_master_alloc_smes(struct device *dev)
1893{
Robin Murphy06e393e2016-09-12 17:13:55 +01001894 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1895 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01001896 struct arm_smmu_device *smmu = cfg->smmu;
1897 struct arm_smmu_smr *smrs = smmu->smrs;
1898 struct iommu_group *group;
1899 int i, idx, ret;
1900
1901 mutex_lock(&smmu->stream_map_mutex);
1902 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01001903 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001904 u16 sid = fwspec->ids[i];
1905 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1906
Robin Murphy6668f692016-09-12 17:13:54 +01001907 if (idx != INVALID_SMENDX) {
1908 ret = -EEXIST;
1909 goto out_err;
1910 }
1911
Robin Murphy7e96c742016-09-14 15:26:46 +01001912 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01001913 if (ret < 0)
1914 goto out_err;
1915
1916 idx = ret;
1917 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001918 smrs[idx].id = sid;
1919 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01001920 smrs[idx].valid = true;
1921 }
1922 smmu->s2crs[idx].count++;
1923 cfg->smendx[i] = (s16)idx;
1924 }
1925
1926 group = iommu_group_get_for_dev(dev);
1927 if (!group)
1928 group = ERR_PTR(-ENOMEM);
1929 if (IS_ERR(group)) {
1930 ret = PTR_ERR(group);
1931 goto out_err;
1932 }
1933 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01001934
Will Deacon45ae7cf2013-06-24 18:31:25 +01001935 /* It worked! Now, poke the actual hardware */
Robin Murphy06e393e2016-09-12 17:13:55 +01001936 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001937 arm_smmu_write_sme(smmu, idx);
1938 smmu->s2crs[idx].group = group;
1939 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001940
Robin Murphy6668f692016-09-12 17:13:54 +01001941 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001942 return 0;
1943
Robin Murphy6668f692016-09-12 17:13:54 +01001944out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01001945 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01001946 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01001947 cfg->smendx[i] = INVALID_SMENDX;
1948 }
Robin Murphy6668f692016-09-12 17:13:54 +01001949 mutex_unlock(&smmu->stream_map_mutex);
1950 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001951}
1952
Robin Murphy06e393e2016-09-12 17:13:55 +01001953static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954{
Robin Murphy06e393e2016-09-12 17:13:55 +01001955 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1956 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01001957 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001958
Robin Murphy6668f692016-09-12 17:13:54 +01001959 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01001960 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001961 if (arm_smmu_free_sme(smmu, idx))
1962 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01001963 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001964 }
Robin Murphy6668f692016-09-12 17:13:54 +01001965 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001966}
1967
Will Deacon45ae7cf2013-06-24 18:31:25 +01001968static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01001969 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001970{
Will Deacon44680ee2014-06-25 11:29:12 +01001971 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01001972 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1973 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1974 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01001975 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001976
Robin Murphy06e393e2016-09-12 17:13:55 +01001977 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphya754fd12016-09-12 17:13:50 +01001978 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy6668f692016-09-12 17:13:54 +01001979 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01001980
1981 s2cr[idx].type = type;
1982 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1983 s2cr[idx].cbndx = cbndx;
1984 arm_smmu_write_s2cr(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001985 }
1986
1987 return 0;
1988}
1989
Patrick Daly09801312016-08-29 17:02:52 -07001990static void arm_smmu_detach_dev(struct iommu_domain *domain,
1991 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001992{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001993 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001994 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Daly09801312016-08-29 17:02:52 -07001995 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07001996 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07001997
1998 if (dynamic)
1999 return;
2000
Patrick Daly09801312016-08-29 17:02:52 -07002001 if (!smmu) {
2002 dev_err(dev, "Domain not attached; cannot detach!\n");
2003 return;
2004 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002005
Patrick Daly8befb662016-08-17 20:03:28 -07002006 /* Remove additional vote for atomic power */
2007 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002008 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
2009 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002010 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002011}
2012
Patrick Dalye271f212016-10-04 13:24:49 -07002013static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002014{
Patrick Dalye271f212016-10-04 13:24:49 -07002015 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002016 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2017 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2018 int source_vmid = VMID_HLOS;
2019 struct arm_smmu_pte_info *pte_info, *temp;
2020
Patrick Dalye271f212016-10-04 13:24:49 -07002021 if (!arm_smmu_is_domain_secure(smmu_domain))
2022 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002023
Patrick Dalye271f212016-10-04 13:24:49 -07002024 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002025 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2026 PAGE_SIZE, &source_vmid, 1,
2027 dest_vmids, dest_perms, 2);
2028 if (WARN_ON(ret))
2029 break;
2030 }
2031
2032 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2033 entry) {
2034 list_del(&pte_info->entry);
2035 kfree(pte_info);
2036 }
Patrick Dalye271f212016-10-04 13:24:49 -07002037 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002038}
2039
2040static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2041{
2042 int ret;
2043 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002044 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002045 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2046 struct arm_smmu_pte_info *pte_info, *temp;
2047
Patrick Dalye271f212016-10-04 13:24:49 -07002048 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002049 return;
2050
2051 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2052 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2053 PAGE_SIZE, source_vmlist, 2,
2054 &dest_vmids, &dest_perms, 1);
2055 if (WARN_ON(ret))
2056 break;
2057 free_pages_exact(pte_info->virt_addr, pte_info->size);
2058 }
2059
2060 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2061 entry) {
2062 list_del(&pte_info->entry);
2063 kfree(pte_info);
2064 }
2065}
2066
2067static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2068{
2069 struct arm_smmu_domain *smmu_domain = cookie;
2070 struct arm_smmu_pte_info *pte_info;
2071
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002072 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002073
2074 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2075 if (!pte_info)
2076 return;
2077
2078 pte_info->virt_addr = addr;
2079 pte_info->size = size;
2080 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2081}
2082
2083static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2084{
2085 struct arm_smmu_domain *smmu_domain = cookie;
2086 struct arm_smmu_pte_info *pte_info;
2087
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002088 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002089
2090 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2091 if (!pte_info)
2092 return -ENOMEM;
2093 pte_info->virt_addr = addr;
2094 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2095 return 0;
2096}
2097
Will Deacon45ae7cf2013-06-24 18:31:25 +01002098static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2099{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002100 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002101 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002102 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002103 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002104 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002105
Robin Murphy06e393e2016-09-12 17:13:55 +01002106 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002107 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2108 return -ENXIO;
2109 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002110
Robin Murphy4f79b142016-10-17 12:06:21 +01002111 /*
2112 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2113 * domains between of_xlate() and add_device() - we have no way to cope
2114 * with that, so until ARM gets converted to rely on groups and default
2115 * domains, just say no (but more politely than by dereferencing NULL).
2116 * This should be at least a WARN_ON once that's sorted.
2117 */
2118 if (!fwspec->iommu_priv)
2119 return -ENODEV;
2120
Robin Murphy06e393e2016-09-12 17:13:55 +01002121 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002122
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002123 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002124 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002125 if (ret)
2126 return ret;
2127
Will Deacon518f7132014-11-14 17:17:54 +00002128 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002129 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002130 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002131 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002132
Patrick Dalyc190d932016-08-30 17:23:28 -07002133 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002134 if (is_dynamic_domain(domain)) {
2135 ret = 0;
2136 goto out_power_off;
2137 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002138
Will Deacon45ae7cf2013-06-24 18:31:25 +01002139 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002140 * Sanity check the domain. We don't support domains across
2141 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002142 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002143 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002144 dev_err(dev,
2145 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002146 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002147 ret = -EINVAL;
2148 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002149 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002150
2151 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002152 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002153
2154out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002155 /*
2156 * Keep an additional vote for non-atomic power until domain is
2157 * detached
2158 */
2159 if (!ret && atomic_domain) {
2160 WARN_ON(arm_smmu_power_on(smmu->pwr));
2161 arm_smmu_power_off_atomic(smmu->pwr);
2162 }
2163
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002164 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002165
Will Deacon45ae7cf2013-06-24 18:31:25 +01002166 return ret;
2167}
2168
Will Deacon45ae7cf2013-06-24 18:31:25 +01002169static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002170 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002171{
Will Deacon518f7132014-11-14 17:17:54 +00002172 int ret;
2173 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002174 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002175 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002176
Will Deacon518f7132014-11-14 17:17:54 +00002177 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002178 return -ENODEV;
2179
Patrick Dalye271f212016-10-04 13:24:49 -07002180 arm_smmu_secure_domain_lock(smmu_domain);
2181
Will Deacon518f7132014-11-14 17:17:54 +00002182 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2183 ret = ops->map(ops, iova, paddr, size, prot);
2184 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002185
2186 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002187 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002188
Will Deacon518f7132014-11-14 17:17:54 +00002189 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002190}
2191
2192static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2193 size_t size)
2194{
Will Deacon518f7132014-11-14 17:17:54 +00002195 size_t ret;
2196 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002197 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002198 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002199
Will Deacon518f7132014-11-14 17:17:54 +00002200 if (!ops)
2201 return 0;
2202
Patrick Daly8befb662016-08-17 20:03:28 -07002203 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002204 if (ret)
2205 return ret;
2206
Patrick Dalye271f212016-10-04 13:24:49 -07002207 arm_smmu_secure_domain_lock(smmu_domain);
2208
Will Deacon518f7132014-11-14 17:17:54 +00002209 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2210 ret = ops->unmap(ops, iova, size);
2211 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002212
Patrick Daly8befb662016-08-17 20:03:28 -07002213 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002214 /*
2215 * While splitting up block mappings, we might allocate page table
2216 * memory during unmap, so the vmids needs to be assigned to the
2217 * memory here as well.
2218 */
2219 arm_smmu_assign_table(smmu_domain);
2220 /* Also unassign any pages that were free'd during unmap */
2221 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002222 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002223 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002224}
2225
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002226static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2227 struct scatterlist *sg, unsigned int nents, int prot)
2228{
2229 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002230 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002231 unsigned long flags;
2232 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2233 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2234
2235 if (!ops)
2236 return -ENODEV;
2237
Patrick Daly8befb662016-08-17 20:03:28 -07002238 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002239 if (ret)
2240 return ret;
2241
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002242 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002243 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002244 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002245
2246 if (!ret)
2247 arm_smmu_unmap(domain, iova, size);
2248
Patrick Daly8befb662016-08-17 20:03:28 -07002249 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002250 arm_smmu_assign_table(smmu_domain);
2251
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002252 return ret;
2253}
2254
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002255static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002256 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002257{
Joerg Roedel1d672632015-03-26 13:43:10 +01002258 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002259 struct arm_smmu_device *smmu = smmu_domain->smmu;
2260 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2261 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2262 struct device *dev = smmu->dev;
2263 void __iomem *cb_base;
2264 u32 tmp;
2265 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002266 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002267
2268 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2269
Robin Murphy661d9622015-05-27 17:09:34 +01002270 /* ATS1 registers can only be written atomically */
2271 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002272 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002273 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2274 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002275 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002276
2277 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2278 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002279 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002280 dev_err(dev,
2281 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2282 &iova, &phys);
2283 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002284 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002285 }
2286
Robin Murphyf9a05f02016-04-13 18:13:01 +01002287 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002288 if (phys & CB_PAR_F) {
2289 dev_err(dev, "translation fault!\n");
2290 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002291 phys = 0;
2292 } else {
2293 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002294 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002295
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002296 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002297}
2298
Will Deacon45ae7cf2013-06-24 18:31:25 +01002299static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002300 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002301{
Will Deacon518f7132014-11-14 17:17:54 +00002302 phys_addr_t ret;
2303 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002304 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002305 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002306
Will Deacon518f7132014-11-14 17:17:54 +00002307 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002308 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002309
Will Deacon518f7132014-11-14 17:17:54 +00002310 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002311 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002312 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002313
Will Deacon518f7132014-11-14 17:17:54 +00002314 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002315}
2316
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002317/*
2318 * This function can sleep, and cannot be called from atomic context. Will
2319 * power on register block if required. This restriction does not apply to the
2320 * original iova_to_phys() op.
2321 */
2322static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2323 dma_addr_t iova)
2324{
2325 phys_addr_t ret = 0;
2326 unsigned long flags;
2327 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002328
Patrick Dalyad441dd2016-09-15 15:50:46 -07002329 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002330 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2331 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002332 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002333 return ret;
2334 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002335
2336 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2337 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2338 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002339 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002340
2341 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2342
2343 return ret;
2344}
2345
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002346static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002347{
Will Deacond0948942014-06-24 17:30:10 +01002348 switch (cap) {
2349 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002350 /*
2351 * Return true here as the SMMU can always send out coherent
2352 * requests.
2353 */
2354 return true;
Will Deacond0948942014-06-24 17:30:10 +01002355 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002356 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002357 case IOMMU_CAP_NOEXEC:
2358 return true;
Will Deacond0948942014-06-24 17:30:10 +01002359 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002360 return false;
Will Deacond0948942014-06-24 17:30:10 +01002361 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002362}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002363
Patrick Daly8e3371a2017-02-13 22:14:53 -08002364static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2365{
2366 struct arm_smmu_device *smmu;
2367 unsigned long flags;
2368
2369 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2370 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2371 if (smmu->dev->of_node == np) {
2372 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2373 return smmu;
2374 }
2375 }
2376 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2377 return NULL;
2378}
2379
Robin Murphy7e96c742016-09-14 15:26:46 +01002380static int arm_smmu_match_node(struct device *dev, void *data)
2381{
2382 return dev->of_node == data;
2383}
2384
2385static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2386{
2387 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2388 np, arm_smmu_match_node);
2389 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002390 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002391}
2392
Will Deacon03edb222015-01-19 14:27:33 +00002393static int arm_smmu_add_device(struct device *dev)
2394{
Robin Murphy06e393e2016-09-12 17:13:55 +01002395 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002396 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002397 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002398 int i, ret;
2399
Robin Murphy7e96c742016-09-14 15:26:46 +01002400 if (using_legacy_binding) {
2401 ret = arm_smmu_register_legacy_master(dev, &smmu);
2402 fwspec = dev->iommu_fwspec;
2403 if (ret)
2404 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002405 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002406 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2407 if (!smmu)
2408 return -ENODEV;
2409 } else {
2410 return -ENODEV;
2411 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002412
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002413 ret = arm_smmu_power_on(smmu->pwr);
2414 if (ret)
2415 goto out_free;
2416
Robin Murphyd5b41782016-09-14 15:21:39 +01002417 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002418 for (i = 0; i < fwspec->num_ids; i++) {
2419 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002420 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002421
Robin Murphy06e393e2016-09-12 17:13:55 +01002422 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002423 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002424 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002425 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002426 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002427 if (mask & ~smmu->smr_mask_mask) {
2428 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2429 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002430 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002431 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002432 }
Will Deacon03edb222015-01-19 14:27:33 +00002433
Robin Murphy06e393e2016-09-12 17:13:55 +01002434 ret = -ENOMEM;
2435 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2436 GFP_KERNEL);
2437 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002438 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002439
2440 cfg->smmu = smmu;
2441 fwspec->iommu_priv = cfg;
2442 while (i--)
2443 cfg->smendx[i] = INVALID_SMENDX;
2444
Robin Murphy6668f692016-09-12 17:13:54 +01002445 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002446 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002447 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002448
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002449 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002450 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002451
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002452out_pwr_off:
2453 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002454out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002455 if (fwspec)
2456 kfree(fwspec->iommu_priv);
2457 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002458 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002459}
2460
Will Deacon45ae7cf2013-06-24 18:31:25 +01002461static void arm_smmu_remove_device(struct device *dev)
2462{
Robin Murphy06e393e2016-09-12 17:13:55 +01002463 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002464 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002465
Robin Murphy06e393e2016-09-12 17:13:55 +01002466 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002467 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002468
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002469 smmu = fwspec_smmu(fwspec);
2470 if (arm_smmu_power_on(smmu->pwr)) {
2471 WARN_ON(1);
2472 return;
2473 }
2474
Robin Murphy06e393e2016-09-12 17:13:55 +01002475 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002476 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002477 kfree(fwspec->iommu_priv);
2478 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002479 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002480}
2481
Joerg Roedelaf659932015-10-21 23:51:41 +02002482static struct iommu_group *arm_smmu_device_group(struct device *dev)
2483{
Robin Murphy06e393e2016-09-12 17:13:55 +01002484 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2485 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002486 struct iommu_group *group = NULL;
2487 int i, idx;
2488
Robin Murphy06e393e2016-09-12 17:13:55 +01002489 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002490 if (group && smmu->s2crs[idx].group &&
2491 group != smmu->s2crs[idx].group)
2492 return ERR_PTR(-EINVAL);
2493
2494 group = smmu->s2crs[idx].group;
2495 }
2496
2497 if (group)
2498 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02002499
2500 if (dev_is_pci(dev))
2501 group = pci_device_group(dev);
2502 else
2503 group = generic_device_group(dev);
2504
Joerg Roedelaf659932015-10-21 23:51:41 +02002505 return group;
2506}
2507
Will Deaconc752ce42014-06-25 22:46:31 +01002508static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2509 enum iommu_attr attr, void *data)
2510{
Joerg Roedel1d672632015-03-26 13:43:10 +01002511 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002512 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002513
2514 switch (attr) {
2515 case DOMAIN_ATTR_NESTING:
2516 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2517 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002518 case DOMAIN_ATTR_PT_BASE_ADDR:
2519 *((phys_addr_t *)data) =
2520 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2521 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002522 case DOMAIN_ATTR_CONTEXT_BANK:
2523 /* context bank index isn't valid until we are attached */
2524 if (smmu_domain->smmu == NULL)
2525 return -ENODEV;
2526
2527 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2528 ret = 0;
2529 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002530 case DOMAIN_ATTR_TTBR0: {
2531 u64 val;
2532 struct arm_smmu_device *smmu = smmu_domain->smmu;
2533 /* not valid until we are attached */
2534 if (smmu == NULL)
2535 return -ENODEV;
2536
2537 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2538 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2539 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2540 << (TTBRn_ASID_SHIFT);
2541 *((u64 *)data) = val;
2542 ret = 0;
2543 break;
2544 }
2545 case DOMAIN_ATTR_CONTEXTIDR:
2546 /* not valid until attached */
2547 if (smmu_domain->smmu == NULL)
2548 return -ENODEV;
2549 *((u32 *)data) = smmu_domain->cfg.procid;
2550 ret = 0;
2551 break;
2552 case DOMAIN_ATTR_PROCID:
2553 *((u32 *)data) = smmu_domain->cfg.procid;
2554 ret = 0;
2555 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002556 case DOMAIN_ATTR_DYNAMIC:
2557 *((int *)data) = !!(smmu_domain->attributes
2558 & (1 << DOMAIN_ATTR_DYNAMIC));
2559 ret = 0;
2560 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002561 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2562 *((int *)data) = !!(smmu_domain->attributes
2563 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2564 ret = 0;
2565 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002566 case DOMAIN_ATTR_S1_BYPASS:
2567 *((int *)data) = !!(smmu_domain->attributes
2568 & (1 << DOMAIN_ATTR_S1_BYPASS));
2569 ret = 0;
2570 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002571 case DOMAIN_ATTR_SECURE_VMID:
2572 *((int *)data) = smmu_domain->secure_vmid;
2573 ret = 0;
2574 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002575 case DOMAIN_ATTR_PGTBL_INFO: {
2576 struct iommu_pgtbl_info *info = data;
2577
2578 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2579 ret = -ENODEV;
2580 break;
2581 }
2582 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2583 ret = 0;
2584 break;
2585 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002586 case DOMAIN_ATTR_FAST:
2587 *((int *)data) = !!(smmu_domain->attributes
2588 & (1 << DOMAIN_ATTR_FAST));
2589 ret = 0;
2590 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002591 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2592 *((int *)data) = !!(smmu_domain->attributes &
2593 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2594 ret = 0;
2595 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002596 case DOMAIN_ATTR_EARLY_MAP:
2597 *((int *)data) = !!(smmu_domain->attributes
2598 & (1 << DOMAIN_ATTR_EARLY_MAP));
2599 ret = 0;
2600 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002601 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
2602 if (!smmu_domain->smmu)
2603 return -ENODEV;
Liam Mark53cf2342016-12-20 11:36:07 -08002604 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
2605 ret = 0;
2606 break;
2607 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
2608 *((int *)data) = !!(smmu_domain->attributes
2609 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002610 ret = 0;
2611 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002612 default:
2613 return -ENODEV;
2614 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002615 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002616}
2617
2618static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2619 enum iommu_attr attr, void *data)
2620{
Will Deacon518f7132014-11-14 17:17:54 +00002621 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002622 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002623
Will Deacon518f7132014-11-14 17:17:54 +00002624 mutex_lock(&smmu_domain->init_mutex);
2625
Will Deaconc752ce42014-06-25 22:46:31 +01002626 switch (attr) {
2627 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002628 if (smmu_domain->smmu) {
2629 ret = -EPERM;
2630 goto out_unlock;
2631 }
2632
Will Deaconc752ce42014-06-25 22:46:31 +01002633 if (*(int *)data)
2634 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2635 else
2636 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2637
Will Deacon518f7132014-11-14 17:17:54 +00002638 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002639 case DOMAIN_ATTR_PROCID:
2640 if (smmu_domain->smmu != NULL) {
2641 dev_err(smmu_domain->smmu->dev,
2642 "cannot change procid attribute while attached\n");
2643 ret = -EBUSY;
2644 break;
2645 }
2646 smmu_domain->cfg.procid = *((u32 *)data);
2647 ret = 0;
2648 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002649 case DOMAIN_ATTR_DYNAMIC: {
2650 int dynamic = *((int *)data);
2651
2652 if (smmu_domain->smmu != NULL) {
2653 dev_err(smmu_domain->smmu->dev,
2654 "cannot change dynamic attribute while attached\n");
2655 ret = -EBUSY;
2656 break;
2657 }
2658
2659 if (dynamic)
2660 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2661 else
2662 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2663 ret = 0;
2664 break;
2665 }
2666 case DOMAIN_ATTR_CONTEXT_BANK:
2667 /* context bank can't be set while attached */
2668 if (smmu_domain->smmu != NULL) {
2669 ret = -EBUSY;
2670 break;
2671 }
2672 /* ... and it can only be set for dynamic contexts. */
2673 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2674 ret = -EINVAL;
2675 break;
2676 }
2677
2678 /* this will be validated during attach */
2679 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2680 ret = 0;
2681 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002682 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2683 u32 non_fatal_faults = *((int *)data);
2684
2685 if (non_fatal_faults)
2686 smmu_domain->attributes |=
2687 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2688 else
2689 smmu_domain->attributes &=
2690 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2691 ret = 0;
2692 break;
2693 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002694 case DOMAIN_ATTR_S1_BYPASS: {
2695 int bypass = *((int *)data);
2696
2697 /* bypass can't be changed while attached */
2698 if (smmu_domain->smmu != NULL) {
2699 ret = -EBUSY;
2700 break;
2701 }
2702 if (bypass)
2703 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2704 else
2705 smmu_domain->attributes &=
2706 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2707
2708 ret = 0;
2709 break;
2710 }
Patrick Daly8befb662016-08-17 20:03:28 -07002711 case DOMAIN_ATTR_ATOMIC:
2712 {
2713 int atomic_ctx = *((int *)data);
2714
2715 /* can't be changed while attached */
2716 if (smmu_domain->smmu != NULL) {
2717 ret = -EBUSY;
2718 break;
2719 }
2720 if (atomic_ctx)
2721 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2722 else
2723 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2724 break;
2725 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002726 case DOMAIN_ATTR_SECURE_VMID:
2727 if (smmu_domain->secure_vmid != VMID_INVAL) {
2728 ret = -ENODEV;
2729 WARN(1, "secure vmid already set!");
2730 break;
2731 }
2732 smmu_domain->secure_vmid = *((int *)data);
2733 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002734 case DOMAIN_ATTR_FAST:
2735 if (*((int *)data))
2736 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2737 ret = 0;
2738 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002739 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2740 /* can't be changed while attached */
2741 if (smmu_domain->smmu != NULL) {
2742 ret = -EBUSY;
2743 break;
2744 }
2745 if (*((int *)data))
2746 smmu_domain->attributes |=
2747 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
2748 ret = 0;
2749 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002750 case DOMAIN_ATTR_EARLY_MAP: {
2751 int early_map = *((int *)data);
2752
2753 ret = 0;
2754 if (early_map) {
2755 smmu_domain->attributes |=
2756 1 << DOMAIN_ATTR_EARLY_MAP;
2757 } else {
2758 if (smmu_domain->smmu)
2759 ret = arm_smmu_enable_s1_translations(
2760 smmu_domain);
2761
2762 if (!ret)
2763 smmu_domain->attributes &=
2764 ~(1 << DOMAIN_ATTR_EARLY_MAP);
2765 }
2766 break;
2767 }
Liam Mark53cf2342016-12-20 11:36:07 -08002768 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
2769 int force_coherent = *((int *)data);
2770
2771 if (smmu_domain->smmu != NULL) {
2772 dev_err(smmu_domain->smmu->dev,
2773 "cannot change force coherent attribute while attached\n");
2774 ret = -EBUSY;
2775 break;
2776 }
2777
2778 if (force_coherent)
2779 smmu_domain->attributes |=
2780 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
2781 else
2782 smmu_domain->attributes &=
2783 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
2784
2785 ret = 0;
2786 break;
2787 }
2788
Will Deaconc752ce42014-06-25 22:46:31 +01002789 default:
Will Deacon518f7132014-11-14 17:17:54 +00002790 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002791 }
Will Deacon518f7132014-11-14 17:17:54 +00002792
2793out_unlock:
2794 mutex_unlock(&smmu_domain->init_mutex);
2795 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002796}
2797
Robin Murphy7e96c742016-09-14 15:26:46 +01002798static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
2799{
2800 u32 fwid = 0;
2801
2802 if (args->args_count > 0)
2803 fwid |= (u16)args->args[0];
2804
2805 if (args->args_count > 1)
2806 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
2807
2808 return iommu_fwspec_add_ids(dev, &fwid, 1);
2809}
2810
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002811static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
2812{
2813 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2814 struct arm_smmu_device *smmu = smmu_domain->smmu;
2815 void __iomem *cb_base;
2816 u32 reg;
2817 int ret;
2818
2819 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2820 ret = arm_smmu_power_on(smmu->pwr);
2821 if (ret)
2822 return ret;
2823
2824 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
2825 reg |= SCTLR_M;
2826
2827 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
2828 arm_smmu_power_off(smmu->pwr);
2829 return ret;
2830}
2831
Liam Mark3ba41cf2016-12-09 14:39:04 -08002832static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
2833 dma_addr_t iova)
2834{
2835 bool ret;
2836 unsigned long flags;
2837 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2838 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2839
2840 if (!ops)
2841 return false;
2842
2843 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2844 ret = ops->is_iova_coherent(ops, iova);
2845 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2846 return ret;
2847}
2848
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002849static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2850 unsigned long flags)
2851{
2852 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2853 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2854 struct arm_smmu_device *smmu;
2855 void __iomem *cb_base;
2856
2857 if (!smmu_domain->smmu) {
2858 pr_err("Can't trigger faults on non-attached domains\n");
2859 return;
2860 }
2861
2862 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002863 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002864 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002865
2866 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2867 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2868 flags, cfg->cbndx);
2869 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002870 /* give the interrupt time to fire... */
2871 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002872
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002873 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002874}
2875
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002876static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2877 unsigned long offset)
2878{
2879 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2880 struct arm_smmu_device *smmu;
2881 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2882 void __iomem *cb_base;
2883 unsigned long val;
2884
2885 if (offset >= SZ_4K) {
2886 pr_err("Invalid offset: 0x%lx\n", offset);
2887 return 0;
2888 }
2889
2890 smmu = smmu_domain->smmu;
2891 if (!smmu) {
2892 WARN(1, "Can't read registers of a detached domain\n");
2893 val = 0;
2894 return val;
2895 }
2896
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002897 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002898 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002899
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002900 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2901 val = readl_relaxed(cb_base + offset);
2902
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002903 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002904 return val;
2905}
2906
2907static void arm_smmu_reg_write(struct iommu_domain *domain,
2908 unsigned long offset, unsigned long val)
2909{
2910 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2911 struct arm_smmu_device *smmu;
2912 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2913 void __iomem *cb_base;
2914
2915 if (offset >= SZ_4K) {
2916 pr_err("Invalid offset: 0x%lx\n", offset);
2917 return;
2918 }
2919
2920 smmu = smmu_domain->smmu;
2921 if (!smmu) {
2922 WARN(1, "Can't read registers of a detached domain\n");
2923 return;
2924 }
2925
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002926 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002927 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002928
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002929 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2930 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002931
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002932 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002933}
2934
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002935static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
2936{
2937 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
2938}
2939
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002940static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
2941{
2942 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2943
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002944 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002945}
2946
2947static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
2948{
2949 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2950
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002951 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002952}
2953
Will Deacon518f7132014-11-14 17:17:54 +00002954static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002955 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002956 .domain_alloc = arm_smmu_domain_alloc,
2957 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002958 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002959 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002960 .map = arm_smmu_map,
2961 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002962 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002963 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002964 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002965 .add_device = arm_smmu_add_device,
2966 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002967 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002968 .domain_get_attr = arm_smmu_domain_get_attr,
2969 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01002970 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00002971 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002972 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002973 .reg_read = arm_smmu_reg_read,
2974 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002975 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002976 .enable_config_clocks = arm_smmu_enable_config_clocks,
2977 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08002978 .is_iova_coherent = arm_smmu_is_iova_coherent,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002979};
2980
Patrick Dalyad441dd2016-09-15 15:50:46 -07002981#define IMPL_DEF1_MICRO_MMU_CTRL 0
2982#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
2983#define MICRO_MMU_CTRL_IDLE (1 << 3)
2984
2985/* Definitions for implementation-defined registers */
2986#define ACTLR_QCOM_OSH_SHIFT 28
2987#define ACTLR_QCOM_OSH 1
2988
2989#define ACTLR_QCOM_ISH_SHIFT 29
2990#define ACTLR_QCOM_ISH 1
2991
2992#define ACTLR_QCOM_NSH_SHIFT 30
2993#define ACTLR_QCOM_NSH 1
2994
2995static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002996{
2997 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002998 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002999
3000 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3001 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3002 0, 30000)) {
3003 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3004 return -EBUSY;
3005 }
3006
3007 return 0;
3008}
3009
Patrick Dalyad441dd2016-09-15 15:50:46 -07003010static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003011{
3012 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3013 u32 reg;
3014
3015 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3016 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3017 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3018
Patrick Dalyad441dd2016-09-15 15:50:46 -07003019 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003020}
3021
Patrick Dalyad441dd2016-09-15 15:50:46 -07003022static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003023{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003024 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003025}
3026
Patrick Dalyad441dd2016-09-15 15:50:46 -07003027static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003028{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003029 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003030}
3031
Patrick Dalyad441dd2016-09-15 15:50:46 -07003032static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003033{
3034 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3035 u32 reg;
3036
3037 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3038 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3039 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3040}
3041
Patrick Dalyad441dd2016-09-15 15:50:46 -07003042static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003043{
3044 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003045 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003046 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003047 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003048
Patrick Dalyad441dd2016-09-15 15:50:46 -07003049 /*
3050 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3051 * to prevent table walks with an inconsistent state.
3052 */
3053 for (i = 0; i < smmu->num_context_banks; ++i) {
3054 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3055 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3056 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3057 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3058 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3059 }
3060
3061 /* Program implementation defined registers */
3062 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003063 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3064 writel_relaxed(regs[i].value,
3065 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003066 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003067}
3068
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003069static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3070 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003071{
3072 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3073 struct arm_smmu_device *smmu = smmu_domain->smmu;
3074 int ret;
3075 phys_addr_t phys = 0;
3076 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003077 u32 sctlr, sctlr_orig, fsr;
3078 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003079
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003080 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003081 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003082 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003083
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003084 spin_lock_irqsave(&smmu->atos_lock, flags);
3085 cb_base = ARM_SMMU_CB_BASE(smmu) +
3086 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003087
3088 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003089 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003090 qsmmuv2_wait_for_halt(smmu);
3091
3092 /* clear FSR to allow ATOS to log any faults */
3093 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3094 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3095
3096 /* disable stall mode momentarily */
3097 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3098 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3099 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3100
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003101 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003102
3103 /* restore SCTLR */
3104 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3105
3106 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003107 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3108
3109 arm_smmu_power_off(smmu_domain->smmu->pwr);
3110 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003111}
3112
3113struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3114 .device_reset = qsmmuv2_device_reset,
3115 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003116};
3117
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003118static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003119{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003120 int i;
3121 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003122 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003123 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003124
Peng Fan3ca37122016-05-03 21:50:30 +08003125 /*
3126 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3127 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3128 * bit is only present in MMU-500r2 onwards.
3129 */
3130 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3131 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3132 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3133 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3134 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3135 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3136 }
3137
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003138 /* Make sure all context banks are disabled and clear CB_FSR */
3139 for (i = 0; i < smmu->num_context_banks; ++i) {
3140 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3141 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3142 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003143 /*
3144 * Disable MMU-500's not-particularly-beneficial next-page
3145 * prefetcher for the sake of errata #841119 and #826419.
3146 */
3147 if (smmu->model == ARM_MMU500) {
3148 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3149 reg &= ~ARM_MMU500_ACTLR_CPRE;
3150 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3151 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003152 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003153}
3154
3155static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3156{
3157 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003158 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003159 u32 reg;
3160
3161 /* clear global FSR */
3162 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3163 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3164
Robin Murphy468f4942016-09-12 17:13:49 +01003165 /*
3166 * Reset stream mapping groups: Initial values mark all SMRn as
3167 * invalid and all S2CRn as bypass unless overridden.
3168 */
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003169 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
Robin Murphya754fd12016-09-12 17:13:50 +01003170 for (i = 0; i < smmu->num_mapping_groups; ++i)
3171 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003172
3173 arm_smmu_context_bank_reset(smmu);
3174 }
Will Deacon1463fe42013-07-31 19:21:27 +01003175
Will Deacon45ae7cf2013-06-24 18:31:25 +01003176 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003177 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3178 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3179
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003180 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003181
Will Deacon45ae7cf2013-06-24 18:31:25 +01003182 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003183 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003184
3185 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003186 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003187
Robin Murphy25a1c962016-02-10 14:25:33 +00003188 /* Enable client access, handling unmatched streams as appropriate */
3189 reg &= ~sCR0_CLIENTPD;
3190 if (disable_bypass)
3191 reg |= sCR0_USFCFG;
3192 else
3193 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003194
3195 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003196 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003197
3198 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003199 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003200
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003201 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3202 reg |= sCR0_VMID16EN;
3203
Will Deacon45ae7cf2013-06-24 18:31:25 +01003204 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003205 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003206 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003207
3208 /* Manage any implementation defined features */
3209 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003210}
3211
3212static int arm_smmu_id_size_to_bits(int size)
3213{
3214 switch (size) {
3215 case 0:
3216 return 32;
3217 case 1:
3218 return 36;
3219 case 2:
3220 return 40;
3221 case 3:
3222 return 42;
3223 case 4:
3224 return 44;
3225 case 5:
3226 default:
3227 return 48;
3228 }
3229}
3230
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003231static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3232{
3233 struct device *dev = smmu->dev;
3234 int i, ntuples, ret;
3235 u32 *tuples;
3236 struct arm_smmu_impl_def_reg *regs, *regit;
3237
3238 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3239 return 0;
3240
3241 ntuples /= sizeof(u32);
3242 if (ntuples % 2) {
3243 dev_err(dev,
3244 "Invalid number of attach-impl-defs registers: %d\n",
3245 ntuples);
3246 return -EINVAL;
3247 }
3248
3249 regs = devm_kmalloc(
3250 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3251 GFP_KERNEL);
3252 if (!regs)
3253 return -ENOMEM;
3254
3255 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3256 if (!tuples)
3257 return -ENOMEM;
3258
3259 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3260 tuples, ntuples);
3261 if (ret)
3262 return ret;
3263
3264 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3265 regit->offset = tuples[i];
3266 regit->value = tuples[i + 1];
3267 }
3268
3269 devm_kfree(dev, tuples);
3270
3271 smmu->impl_def_attach_registers = regs;
3272 smmu->num_impl_def_attach_registers = ntuples / 2;
3273
3274 return 0;
3275}
3276
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003277
3278static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003279{
3280 const char *cname;
3281 struct property *prop;
3282 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003283 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003284
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003285 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003286 of_property_count_strings(dev->of_node, "clock-names");
3287
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003288 if (pwr->num_clocks < 1) {
3289 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003290 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003291 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003292
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003293 pwr->clocks = devm_kzalloc(
3294 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003295 GFP_KERNEL);
3296
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003297 if (!pwr->clocks)
3298 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003299
3300 i = 0;
3301 of_property_for_each_string(dev->of_node, "clock-names",
3302 prop, cname) {
3303 struct clk *c = devm_clk_get(dev, cname);
3304
3305 if (IS_ERR(c)) {
3306 dev_err(dev, "Couldn't get clock: %s",
3307 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003308 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003309 }
3310
3311 if (clk_get_rate(c) == 0) {
3312 long rate = clk_round_rate(c, 1000);
3313
3314 clk_set_rate(c, rate);
3315 }
3316
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003317 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003318
3319 ++i;
3320 }
3321 return 0;
3322}
3323
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003324static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003325{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003326 const char *cname;
3327 struct property *prop;
3328 int i, ret = 0;
3329 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003330
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003331 pwr->num_gdscs =
3332 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3333
3334 if (pwr->num_gdscs < 1) {
3335 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003336 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003337 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003338
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003339 pwr->gdscs = devm_kzalloc(
3340 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3341
3342 if (!pwr->gdscs)
3343 return -ENOMEM;
3344
3345 i = 0;
3346 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3347 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07003348 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003349
3350 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3351 return ret;
3352}
3353
3354static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3355{
3356 struct device *dev = pwr->dev;
3357
3358 /* We don't want the bus APIs to print an error message */
3359 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3360 dev_dbg(dev, "No bus scaling info\n");
3361 return 0;
3362 }
3363
3364 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3365 if (!pwr->bus_dt_data) {
3366 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3367 return -EINVAL;
3368 }
3369
3370 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3371 if (!pwr->bus_client) {
3372 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003373 return -EINVAL;
3374 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003375
3376 return 0;
3377}
3378
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003379/*
3380 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3381 */
3382static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3383 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003384{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003385 struct arm_smmu_power_resources *pwr;
3386 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003387
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003388 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3389 if (!pwr)
3390 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003391
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003392 pwr->dev = &pdev->dev;
3393 pwr->pdev = pdev;
3394 mutex_init(&pwr->power_lock);
3395 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003396
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003397 ret = arm_smmu_init_clocks(pwr);
3398 if (ret)
3399 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003400
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003401 ret = arm_smmu_init_regulators(pwr);
3402 if (ret)
3403 return ERR_PTR(ret);
3404
3405 ret = arm_smmu_init_bus_scaling(pwr);
3406 if (ret)
3407 return ERR_PTR(ret);
3408
3409 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003410}
3411
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003412/*
Patrick Dalyabeee952017-04-13 18:14:59 -07003413 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003414 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003415static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003416{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003417 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003418}
3419
Will Deacon45ae7cf2013-06-24 18:31:25 +01003420static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3421{
3422 unsigned long size;
3423 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3424 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003425 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003426 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003427
Mitchel Humpherysba822582015-10-20 11:37:41 -07003428 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3429 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003430 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003431
3432 /* ID0 */
3433 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003434
3435 /* Restrict available stages based on module parameter */
3436 if (force_stage == 1)
3437 id &= ~(ID0_S2TS | ID0_NTS);
3438 else if (force_stage == 2)
3439 id &= ~(ID0_S1TS | ID0_NTS);
3440
Will Deacon45ae7cf2013-06-24 18:31:25 +01003441 if (id & ID0_S1TS) {
3442 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003443 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003444 }
3445
3446 if (id & ID0_S2TS) {
3447 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003448 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003449 }
3450
3451 if (id & ID0_NTS) {
3452 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003453 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003454 }
3455
3456 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003457 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003458 dev_err(smmu->dev, "\tno translation support!\n");
3459 return -ENODEV;
3460 }
3461
Robin Murphyb7862e32016-04-13 18:13:03 +01003462 if ((id & ID0_S1TS) &&
3463 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003464 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003465 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003466 }
3467
Robin Murphybae2c2d2015-07-29 19:46:05 +01003468 /*
3469 * In order for DMA API calls to work properly, we must defer to what
3470 * the DT says about coherency, regardless of what the hardware claims.
3471 * Fortunately, this also opens up a workaround for systems where the
3472 * ID register value has ended up configured incorrectly.
3473 */
3474 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3475 cttw_reg = !!(id & ID0_CTTW);
3476 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003477 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003478 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003479 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003480 cttw_dt ? "" : "non-");
3481 if (cttw_dt != cttw_reg)
3482 dev_notice(smmu->dev,
3483 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003484
Robin Murphy53867802016-09-12 17:13:48 +01003485 /* Max. number of entries we have for stream matching/indexing */
3486 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3487 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003488 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003489 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003490
3491 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003492 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3493 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003494 dev_err(smmu->dev,
3495 "stream-matching supported, but no SMRs present!\n");
3496 return -ENODEV;
3497 }
3498
Robin Murphy53867802016-09-12 17:13:48 +01003499 /*
3500 * SMR.ID bits may not be preserved if the corresponding MASK
3501 * bits are set, so check each one separately. We can reject
3502 * masters later if they try to claim IDs outside these masks.
3503 */
3504 smr = smmu->streamid_mask << SMR_ID_SHIFT;
3505 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3506 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3507 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003508
Robin Murphy53867802016-09-12 17:13:48 +01003509 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
3510 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3511 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3512 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003513
Robin Murphy468f4942016-09-12 17:13:49 +01003514 /* Zero-initialised to mark as invalid */
3515 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3516 GFP_KERNEL);
3517 if (!smmu->smrs)
3518 return -ENOMEM;
3519
Robin Murphy53867802016-09-12 17:13:48 +01003520 dev_notice(smmu->dev,
3521 "\tstream matching with %lu register groups, mask 0x%x",
3522 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003523 }
Robin Murphya754fd12016-09-12 17:13:50 +01003524 /* s2cr->type == 0 means translation, so initialise explicitly */
3525 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3526 GFP_KERNEL);
3527 if (!smmu->s2crs)
3528 return -ENOMEM;
3529 for (i = 0; i < size; i++)
3530 smmu->s2crs[i] = s2cr_init_val;
3531
Robin Murphy53867802016-09-12 17:13:48 +01003532 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003533 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003534
Robin Murphy7602b872016-04-28 17:12:09 +01003535 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3536 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3537 if (!(id & ID0_PTFS_NO_AARCH32S))
3538 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3539 }
3540
Will Deacon45ae7cf2013-06-24 18:31:25 +01003541 /* ID1 */
3542 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003543 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003544
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003545 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003546 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003547 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003548 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003549 dev_warn(smmu->dev,
3550 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3551 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003552
Will Deacon518f7132014-11-14 17:17:54 +00003553 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003554 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3555 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3556 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3557 return -ENODEV;
3558 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003559 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003560 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003561 /*
3562 * Cavium CN88xx erratum #27704.
3563 * Ensure ASID and VMID allocation is unique across all SMMUs in
3564 * the system.
3565 */
3566 if (smmu->model == CAVIUM_SMMUV2) {
3567 smmu->cavium_id_base =
3568 atomic_add_return(smmu->num_context_banks,
3569 &cavium_smmu_context_count);
3570 smmu->cavium_id_base -= smmu->num_context_banks;
3571 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003572
3573 /* ID2 */
3574 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3575 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003576 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003577
Will Deacon518f7132014-11-14 17:17:54 +00003578 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003579 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003580 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003581
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003582 if (id & ID2_VMID16)
3583 smmu->features |= ARM_SMMU_FEAT_VMID16;
3584
Robin Murphyf1d84542015-03-04 16:41:05 +00003585 /*
3586 * What the page table walker can address actually depends on which
3587 * descriptor format is in use, but since a) we don't know that yet,
3588 * and b) it can vary per context bank, this will have to do...
3589 */
3590 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3591 dev_warn(smmu->dev,
3592 "failed to set DMA mask for table walker\n");
3593
Robin Murphyb7862e32016-04-13 18:13:03 +01003594 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003595 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003596 if (smmu->version == ARM_SMMU_V1_64K)
3597 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003598 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003599 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003600 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003601 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003602 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003603 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003604 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003605 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003606 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003607 }
3608
Robin Murphy7602b872016-04-28 17:12:09 +01003609 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003610 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003611 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003612 if (smmu->features &
3613 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003614 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003615 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003616 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003617 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003618 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003619
Robin Murphyd5466352016-05-09 17:20:09 +01003620 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3621 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3622 else
3623 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003624 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003625 smmu->pgsize_bitmap);
3626
Will Deacon518f7132014-11-14 17:17:54 +00003627
Will Deacon28d60072014-09-01 16:24:48 +01003628 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003629 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3630 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003631
3632 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003633 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3634 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003635
Will Deacon45ae7cf2013-06-24 18:31:25 +01003636 return 0;
3637}
3638
Patrick Dalyd7476202016-09-08 18:23:28 -07003639static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
3640{
3641 if (!smmu->arch_ops)
3642 return 0;
3643 if (!smmu->arch_ops->init)
3644 return 0;
3645 return smmu->arch_ops->init(smmu);
3646}
3647
3648static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
3649{
3650 if (!smmu->arch_ops)
3651 return;
3652 if (!smmu->arch_ops->device_reset)
3653 return;
3654 return smmu->arch_ops->device_reset(smmu);
3655}
3656
Robin Murphy67b65a32016-04-13 18:12:57 +01003657struct arm_smmu_match_data {
3658 enum arm_smmu_arch_version version;
3659 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003660 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01003661};
3662
Patrick Dalyd7476202016-09-08 18:23:28 -07003663#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
3664static struct arm_smmu_match_data name = { \
3665.version = ver, \
3666.model = imp, \
3667.arch_ops = ops, \
3668} \
Robin Murphy67b65a32016-04-13 18:12:57 +01003669
Patrick Daly1f8a2882016-09-12 17:32:05 -07003670struct arm_smmu_arch_ops qsmmuv500_arch_ops;
3671
Patrick Dalyd7476202016-09-08 18:23:28 -07003672ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
3673ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
3674ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
3675ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
3676ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003677ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07003678ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
3679 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01003680
Joerg Roedel09b52692014-10-02 12:24:45 +02003681static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003682 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3683 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3684 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003685 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003686 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003687 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003688 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07003689 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01003690 { },
3691};
3692MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3693
Patrick Dalyc47dcd42017-02-09 23:09:57 -08003694
3695static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
3696{
3697 if (!dev->iommu_fwspec)
3698 of_iommu_configure(dev, dev->of_node);
3699 return 0;
3700}
3701
Patrick Daly000a2f22017-02-13 22:18:12 -08003702static int arm_smmu_add_device_fixup(struct device *dev, void *data)
3703{
3704 struct iommu_ops *ops = data;
3705
3706 ops->add_device(dev);
3707 return 0;
3708}
3709
Patrick Daly1f8a2882016-09-12 17:32:05 -07003710static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003711static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3712{
Robin Murphy67b65a32016-04-13 18:12:57 +01003713 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003714 struct resource *res;
3715 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003716 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01003717 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01003718 bool legacy_binding;
3719
3720 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
3721 if (legacy_binding && !using_generic_binding) {
3722 if (!using_legacy_binding)
3723 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
3724 using_legacy_binding = true;
3725 } else if (!legacy_binding && !using_legacy_binding) {
3726 using_generic_binding = true;
3727 } else {
3728 dev_err(dev, "not probing due to mismatched DT properties\n");
3729 return -ENODEV;
3730 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003731
3732 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3733 if (!smmu) {
3734 dev_err(dev, "failed to allocate arm_smmu_device\n");
3735 return -ENOMEM;
3736 }
3737 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003738 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003739 idr_init(&smmu->asid_idr);
3740 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003741
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003742 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01003743 smmu->version = data->version;
3744 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003745 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01003746
Will Deacon45ae7cf2013-06-24 18:31:25 +01003747 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003748 smmu->base = devm_ioremap_resource(dev, res);
3749 if (IS_ERR(smmu->base))
3750 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003751 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003752
3753 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3754 &smmu->num_global_irqs)) {
3755 dev_err(dev, "missing #global-interrupts property\n");
3756 return -ENODEV;
3757 }
3758
3759 num_irqs = 0;
3760 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3761 num_irqs++;
3762 if (num_irqs > smmu->num_global_irqs)
3763 smmu->num_context_irqs++;
3764 }
3765
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003766 if (!smmu->num_context_irqs) {
3767 dev_err(dev, "found %d interrupts but expected at least %d\n",
3768 num_irqs, smmu->num_global_irqs + 1);
3769 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003770 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003771
3772 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3773 GFP_KERNEL);
3774 if (!smmu->irqs) {
3775 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3776 return -ENOMEM;
3777 }
3778
3779 for (i = 0; i < num_irqs; ++i) {
3780 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003781
Will Deacon45ae7cf2013-06-24 18:31:25 +01003782 if (irq < 0) {
3783 dev_err(dev, "failed to get irq index %d\n", i);
3784 return -ENODEV;
3785 }
3786 smmu->irqs[i] = irq;
3787 }
3788
Dhaval Patel031d7462015-05-09 14:47:29 -07003789 parse_driver_options(smmu);
3790
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003791 smmu->pwr = arm_smmu_init_power_resources(pdev);
3792 if (IS_ERR(smmu->pwr))
3793 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003794
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003795 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003796 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003797 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003798
3799 err = arm_smmu_device_cfg_probe(smmu);
3800 if (err)
3801 goto out_power_off;
3802
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003803 err = arm_smmu_parse_impl_def_registers(smmu);
3804 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003805 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003806
Robin Murphyb7862e32016-04-13 18:13:03 +01003807 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003808 smmu->num_context_banks != smmu->num_context_irqs) {
3809 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08003810 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
3811 smmu->num_context_irqs, smmu->num_context_banks,
3812 smmu->num_context_banks);
3813 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003814 }
3815
Will Deacon45ae7cf2013-06-24 18:31:25 +01003816 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003817 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3818 NULL, arm_smmu_global_fault,
3819 IRQF_ONESHOT | IRQF_SHARED,
3820 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003821 if (err) {
3822 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3823 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01003824 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003825 }
3826 }
3827
Patrick Dalyd7476202016-09-08 18:23:28 -07003828 err = arm_smmu_arch_init(smmu);
3829 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003830 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07003831
Robin Murphy06e393e2016-09-12 17:13:55 +01003832 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003833 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01003834 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003835 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07003836
Patrick Daly8e3371a2017-02-13 22:14:53 -08003837 INIT_LIST_HEAD(&smmu->list);
3838 spin_lock(&arm_smmu_devices_lock);
3839 list_add(&smmu->list, &arm_smmu_devices);
3840 spin_unlock(&arm_smmu_devices_lock);
3841
Patrick Dalyc47dcd42017-02-09 23:09:57 -08003842 /* bus_set_iommu depends on this. */
3843 bus_for_each_dev(&platform_bus_type, NULL, NULL,
3844 arm_smmu_of_iommu_configure_fixup);
3845
Robin Murphy7e96c742016-09-14 15:26:46 +01003846 /* Oh, for a proper bus abstraction */
3847 if (!iommu_present(&platform_bus_type))
3848 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08003849 else
3850 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
3851 arm_smmu_add_device_fixup);
Robin Murphy7e96c742016-09-14 15:26:46 +01003852#ifdef CONFIG_ARM_AMBA
3853 if (!iommu_present(&amba_bustype))
3854 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
3855#endif
3856#ifdef CONFIG_PCI
3857 if (!iommu_present(&pci_bus_type)) {
3858 pci_request_acs();
3859 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
3860 }
3861#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003862 return 0;
3863
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003864out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003865 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003866
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003867out_exit_power_resources:
3868 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003869
Will Deacon45ae7cf2013-06-24 18:31:25 +01003870 return err;
3871}
3872
3873static int arm_smmu_device_remove(struct platform_device *pdev)
3874{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003875 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003876
3877 if (!smmu)
3878 return -ENODEV;
3879
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003880 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003881 return -EINVAL;
3882
Will Deaconecfadb62013-07-31 19:21:28 +01003883 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003884 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003885
Patrick Dalyc190d932016-08-30 17:23:28 -07003886 idr_destroy(&smmu->asid_idr);
3887
Will Deacon45ae7cf2013-06-24 18:31:25 +01003888 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003889 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003890 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003891
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003892 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003893
Will Deacon45ae7cf2013-06-24 18:31:25 +01003894 return 0;
3895}
3896
Will Deacon45ae7cf2013-06-24 18:31:25 +01003897static struct platform_driver arm_smmu_driver = {
3898 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003899 .name = "arm-smmu",
3900 .of_match_table = of_match_ptr(arm_smmu_of_match),
3901 },
3902 .probe = arm_smmu_device_dt_probe,
3903 .remove = arm_smmu_device_remove,
3904};
3905
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08003906static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003907static int __init arm_smmu_init(void)
3908{
Robin Murphy7e96c742016-09-14 15:26:46 +01003909 static bool registered;
3910 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003911
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08003912 if (registered)
3913 return 0;
3914
3915 ret = platform_driver_register(&qsmmuv500_tbu_driver);
3916 if (ret)
3917 return ret;
3918
3919 ret = platform_driver_register(&arm_smmu_driver);
3920 registered = !ret;
Robin Murphy7e96c742016-09-14 15:26:46 +01003921 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003922}
3923
3924static void __exit arm_smmu_exit(void)
3925{
3926 return platform_driver_unregister(&arm_smmu_driver);
3927}
3928
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003929subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003930module_exit(arm_smmu_exit);
3931
Robin Murphy7e96c742016-09-14 15:26:46 +01003932static int __init arm_smmu_of_init(struct device_node *np)
3933{
3934 int ret = arm_smmu_init();
3935
3936 if (ret)
3937 return ret;
3938
3939 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
3940 return -ENODEV;
3941
3942 return 0;
3943}
3944IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
3945IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
3946IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
3947IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
3948IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
3949IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01003950
Patrick Dalya0fddb62017-03-27 19:26:59 -07003951#define TCU_HW_VERSION_HLOS1 (0x18)
3952
Patrick Daly1f8a2882016-09-12 17:32:05 -07003953#define DEBUG_SID_HALT_REG 0x0
3954#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07003955#define DEBUG_SID_HALT_SID_MASK 0x3ff
3956
3957#define DEBUG_VA_ADDR_REG 0x8
3958
3959#define DEBUG_TXN_TRIGG_REG 0x18
3960#define DEBUG_TXN_AXPROT_SHIFT 6
3961#define DEBUG_TXN_AXCACHE_SHIFT 2
3962#define DEBUG_TRX_WRITE (0x1 << 1)
3963#define DEBUG_TXN_READ (0x0 << 1)
3964#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07003965
3966#define DEBUG_SR_HALT_ACK_REG 0x20
3967#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07003968#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
3969
3970#define DEBUG_PAR_REG 0x28
3971#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
3972#define DEBUG_PAR_PA_SHIFT 12
3973#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07003974
3975#define TBU_DBG_TIMEOUT_US 30000
3976
Patrick Daly6b290f1e2017-03-27 19:26:59 -07003977struct qsmmuv500_archdata {
3978 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07003979 void __iomem *tcu_base;
3980 u32 version;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07003981};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07003982#define get_qsmmuv500_archdata(smmu) \
3983 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07003984
Patrick Daly1f8a2882016-09-12 17:32:05 -07003985struct qsmmuv500_tbu_device {
3986 struct list_head list;
3987 struct device *dev;
3988 struct arm_smmu_device *smmu;
3989 void __iomem *base;
3990 void __iomem *status_reg;
3991
3992 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07003993 u32 sid_start;
3994 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07003995
3996 /* Protects halt count */
3997 spinlock_t halt_lock;
3998 u32 halt_count;
3999};
4000
4001static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
4002{
4003 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004004 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004005 int ret = 0;
4006
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004007 list_for_each_entry(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004008 ret = arm_smmu_power_on(tbu->pwr);
4009 if (ret)
4010 break;
4011 }
4012 if (!ret)
4013 return 0;
4014
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004015 list_for_each_entry_continue_reverse(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004016 arm_smmu_power_off(tbu->pwr);
4017 }
4018 return ret;
4019}
4020
4021static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
4022{
4023 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004024 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004025
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004026 list_for_each_entry_reverse(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004027 arm_smmu_power_off(tbu->pwr);
4028 }
4029}
4030
4031static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
4032{
4033 unsigned long flags;
4034 u32 val;
4035 void __iomem *base;
4036
4037 spin_lock_irqsave(&tbu->halt_lock, flags);
4038 if (tbu->halt_count) {
4039 tbu->halt_count++;
4040 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4041 return 0;
4042 }
4043
4044 base = tbu->base;
4045 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4046 val |= DEBUG_SID_HALT_VAL;
4047 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4048
4049 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
4050 val, (val & DEBUG_SR_HALT_ACK_VAL),
4051 0, TBU_DBG_TIMEOUT_US)) {
4052 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4053 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4054 return -ETIMEDOUT;
4055 }
4056
4057 tbu->halt_count = 1;
4058 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4059 return 0;
4060}
4061
4062static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4063{
4064 unsigned long flags;
4065 u32 val;
4066 void __iomem *base;
4067
4068 spin_lock_irqsave(&tbu->halt_lock, flags);
4069 if (!tbu->halt_count) {
4070 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4071 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4072 return;
4073
4074 } else if (tbu->halt_count > 1) {
4075 tbu->halt_count--;
4076 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4077 return;
4078 }
4079
4080 base = tbu->base;
4081 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4082 val &= ~DEBUG_SID_HALT_VAL;
4083 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4084
4085 tbu->halt_count = 0;
4086 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4087}
4088
4089static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
4090{
4091 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004092 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004093 int ret = 0;
4094
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004095 list_for_each_entry(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004096 ret = qsmmuv500_tbu_halt(tbu);
4097 if (ret)
4098 break;
4099 }
4100
4101 if (!ret)
4102 return 0;
4103
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004104 list_for_each_entry_continue_reverse(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004105 qsmmuv500_tbu_resume(tbu);
4106 }
4107 return ret;
4108}
4109
4110static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
4111{
4112 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004113 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004114
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004115 list_for_each_entry(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004116 qsmmuv500_tbu_resume(tbu);
4117 }
4118}
4119
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004120static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4121 struct arm_smmu_device *smmu, u32 sid)
4122{
4123 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004124 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004125
4126 list_for_each_entry(tbu, &data->tbus, list) {
4127 if (tbu->sid_start <= sid &&
4128 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004129 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004130 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004131 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004132}
4133
Patrick Daly1f8a2882016-09-12 17:32:05 -07004134static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
4135{
4136 int i, ret;
4137 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
4138
4139 ret = qsmmuv500_tbu_power_on_all(smmu);
4140 if (ret)
4141 return;
4142
4143 /* Program implementation defined registers */
4144 qsmmuv500_halt_all(smmu);
4145 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
4146 writel_relaxed(regs[i].value,
4147 ARM_SMMU_GR0(smmu) + regs[i].offset);
4148 qsmmuv500_resume_all(smmu);
4149 qsmmuv500_tbu_power_off_all(smmu);
4150}
4151
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004152static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
4153 struct qsmmuv500_tbu_device *tbu,
4154 unsigned long *flags)
4155{
4156 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004157 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004158 u32 val;
4159
4160 spin_lock_irqsave(&smmu->atos_lock, *flags);
4161 /* The status register is not accessible on version 1.0 */
4162 if (data->version == 0x01000000)
4163 return 0;
4164
4165 if (readl_poll_timeout_atomic(tbu->status_reg,
4166 val, (val == 0x1), 0,
4167 TBU_DBG_TIMEOUT_US)) {
4168 dev_err(tbu->dev, "ECATS hw busy!\n");
4169 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4170 return -ETIMEDOUT;
4171 }
4172
4173 return 0;
4174}
4175
4176static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
4177 struct qsmmuv500_tbu_device *tbu,
4178 unsigned long *flags)
4179{
4180 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004181 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004182
4183 /* The status register is not accessible on version 1.0 */
4184 if (data->version != 0x01000000)
4185 writel_relaxed(0, tbu->status_reg);
4186 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4187}
4188
4189/*
4190 * Zero means failure.
4191 */
4192static phys_addr_t qsmmuv500_iova_to_phys(
4193 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
4194{
4195 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4196 struct arm_smmu_device *smmu = smmu_domain->smmu;
4197 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4198 struct qsmmuv500_tbu_device *tbu;
4199 int ret;
4200 phys_addr_t phys = 0;
4201 u64 val, fsr;
4202 unsigned long flags;
4203 void __iomem *cb_base;
4204 u32 sctlr_orig, sctlr;
4205 int needs_redo = 0;
4206
4207 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4208 tbu = qsmmuv500_find_tbu(smmu, sid);
4209 if (!tbu)
4210 return 0;
4211
4212 ret = arm_smmu_power_on(tbu->pwr);
4213 if (ret)
4214 return 0;
4215
4216 /*
4217 * Disable client transactions & wait for existing operations to
4218 * complete.
4219 */
4220 ret = qsmmuv500_tbu_halt(tbu);
4221 if (ret)
4222 goto out_power_off;
4223
4224 /* Only one concurrent atos operation */
4225 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
4226 if (ret)
4227 goto out_resume;
4228
4229 /*
4230 * We can be called from an interrupt handler with FSR already set
4231 * so terminate the faulting transaction prior to starting ecats.
4232 * No new racing faults can occur since we in the halted state.
4233 * ECATS can trigger the fault interrupt, so disable it temporarily
4234 * and check for an interrupt manually.
4235 */
4236 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4237 if (fsr & FSR_FAULT) {
4238 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4239 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4240 }
4241 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4242 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4243 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4244
4245redo:
4246 /* Set address and stream-id */
4247 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
4248 val |= sid & DEBUG_SID_HALT_SID_MASK;
4249 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
4250 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
4251
4252 /*
4253 * Write-back Read and Write-Allocate
4254 * Priviledged, nonsecure, data transaction
4255 * Read operation.
4256 */
4257 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
4258 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
4259 val |= DEBUG_TXN_TRIGGER;
4260 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
4261
4262 ret = 0;
4263 if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG,
4264 val, !(val & DEBUG_SR_ECATS_RUNNING_VAL),
4265 0, TBU_DBG_TIMEOUT_US)) {
4266 dev_err(tbu->dev, "ECATS translation timed out!\n");
4267 }
4268
4269 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4270 if (fsr & FSR_FAULT) {
4271 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
4272 val);
4273 ret = -EINVAL;
4274
4275 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
4276 /*
4277 * Clear pending interrupts
4278 * Barrier required to ensure that the FSR is cleared
4279 * before resuming SMMU operation
4280 */
4281 wmb();
4282 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4283 }
4284
4285 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
4286 if (val & DEBUG_PAR_FAULT_VAL) {
4287 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
4288 val);
4289 ret = -EINVAL;
4290 }
4291
4292 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
4293 if (ret < 0)
4294 phys = 0;
4295
4296 /* Reset hardware */
4297 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
4298 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
4299
4300 /*
4301 * After a failed translation, the next successful translation will
4302 * incorrectly be reported as a failure.
4303 */
4304 if (!phys && needs_redo++ < 2)
4305 goto redo;
4306
4307 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4308 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
4309
4310out_resume:
4311 qsmmuv500_tbu_resume(tbu);
4312
4313out_power_off:
4314 arm_smmu_power_off(tbu->pwr);
4315
4316 return phys;
4317}
4318
4319static phys_addr_t qsmmuv500_iova_to_phys_hard(
4320 struct iommu_domain *domain, dma_addr_t iova)
4321{
4322 u16 sid;
4323 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4324 struct iommu_fwspec *fwspec;
4325
4326 /* Select a sid */
4327 fwspec = smmu_domain->dev->iommu_fwspec;
4328 sid = (u16)fwspec->ids[0];
4329
4330 return qsmmuv500_iova_to_phys(domain, iova, sid);
4331}
4332
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004333static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004334{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004335 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004336 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004337 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004338
4339 if (!dev->driver) {
4340 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4341 return -EINVAL;
4342 }
4343
4344 tbu = dev_get_drvdata(dev);
4345
4346 INIT_LIST_HEAD(&tbu->list);
4347 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004348 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004349 return 0;
4350}
4351
4352static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4353{
Patrick Dalya0fddb62017-03-27 19:26:59 -07004354 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004355 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004356 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004357 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004358 int ret;
4359
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004360 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
4361 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004362 return -ENOMEM;
4363
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004364 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07004365
4366 pdev = container_of(dev, struct platform_device, dev);
4367 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
4368 data->tcu_base = devm_ioremap_resource(dev, res);
4369 if (IS_ERR(data->tcu_base))
4370 return PTR_ERR(data->tcu_base);
4371
4372 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004373 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004374
4375 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4376 if (ret)
4377 return ret;
4378
4379 /* Attempt to register child devices */
4380 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
4381 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07004382 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004383
4384 return 0;
4385}
4386
4387struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
4388 .init = qsmmuv500_arch_init,
4389 .device_reset = qsmmuv500_device_reset,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004390 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly1f8a2882016-09-12 17:32:05 -07004391};
4392
4393static const struct of_device_id qsmmuv500_tbu_of_match[] = {
4394 {.compatible = "qcom,qsmmuv500-tbu"},
4395 {}
4396};
4397
4398static int qsmmuv500_tbu_probe(struct platform_device *pdev)
4399{
4400 struct resource *res;
4401 struct device *dev = &pdev->dev;
4402 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004403 const __be32 *cell;
4404 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004405
4406 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
4407 if (!tbu)
4408 return -ENOMEM;
4409
4410 INIT_LIST_HEAD(&tbu->list);
4411 tbu->dev = dev;
4412 spin_lock_init(&tbu->halt_lock);
4413
4414 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
4415 tbu->base = devm_ioremap_resource(dev, res);
4416 if (IS_ERR(tbu->base))
4417 return PTR_ERR(tbu->base);
4418
4419 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
4420 tbu->status_reg = devm_ioremap_resource(dev, res);
4421 if (IS_ERR(tbu->status_reg))
4422 return PTR_ERR(tbu->status_reg);
4423
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004424 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
4425 if (!cell || len < 8)
4426 return -EINVAL;
4427
4428 tbu->sid_start = of_read_number(cell, 1);
4429 tbu->num_sids = of_read_number(cell + 1, 1);
4430
Patrick Daly1f8a2882016-09-12 17:32:05 -07004431 tbu->pwr = arm_smmu_init_power_resources(pdev);
4432 if (IS_ERR(tbu->pwr))
4433 return PTR_ERR(tbu->pwr);
4434
4435 dev_set_drvdata(dev, tbu);
4436 return 0;
4437}
4438
4439static struct platform_driver qsmmuv500_tbu_driver = {
4440 .driver = {
4441 .name = "qsmmuv500-tbu",
4442 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
4443 },
4444 .probe = qsmmuv500_tbu_probe,
4445};
4446
Will Deacon45ae7cf2013-06-24 18:31:25 +01004447MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
4448MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
4449MODULE_LICENSE("GPL v2");