blob: fb3406210fe3fc133b9533aaa8c09620ca6feff9 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070050#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070051#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070052#include <linux/msm-bus.h>
53#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55#include <linux/amba/bus.h>
56
Will Deacon518f7132014-11-14 17:17:54 +000057#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* Maximum number of context banks per SMMU */
60#define ARM_SMMU_MAX_CBS 128
61
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* SMMU global address space */
63#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010064#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000066/*
67 * SMMU global address space with conditional offset to access secure
68 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 * nsGFSYNR0: 0x450)
70 */
71#define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu)->base + \
73 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
74 ? 0x400 : 0))
75
Robin Murphyf9a05f02016-04-13 18:13:01 +010076/*
77 * Some 64-bit registers only make sense to write atomically, but in such
78 * cases all the data relevant to AArch32 formats lies within the lower word,
79 * therefore this actually makes more sense than it might first appear.
80 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#endif
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087/* Configuration registers */
88#define ARM_SMMU_GR0_sCR0 0x0
89#define sCR0_CLIENTPD (1 << 0)
90#define sCR0_GFRE (1 << 1)
91#define sCR0_GFIE (1 << 2)
92#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100131#define ID0_NUMSMRG_SHIFT 0
132#define ID0_NUMSMRG_MASK 0xff
133
134#define ID1_PAGESIZE (1 << 31)
135#define ID1_NUMPAGENDXB_SHIFT 28
136#define ID1_NUMPAGENDXB_MASK 7
137#define ID1_NUMS2CB_SHIFT 16
138#define ID1_NUMS2CB_MASK 0xff
139#define ID1_NUMCB_SHIFT 0
140#define ID1_NUMCB_MASK 0xff
141
142#define ID2_OAS_SHIFT 4
143#define ID2_OAS_MASK 0xf
144#define ID2_IAS_SHIFT 0
145#define ID2_IAS_MASK 0xf
146#define ID2_UBS_SHIFT 8
147#define ID2_UBS_MASK 0xf
148#define ID2_PTFS_4K (1 << 12)
149#define ID2_PTFS_16K (1 << 13)
150#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800151#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152
Peng Fan3ca37122016-05-03 21:50:30 +0800153#define ID7_MAJOR_SHIFT 4
154#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157#define ARM_SMMU_GR0_TLBIVMID 0x64
158#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
159#define ARM_SMMU_GR0_TLBIALLH 0x6c
160#define ARM_SMMU_GR0_sTLBGSYNC 0x70
161#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
162#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800163#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
165/* Stream mapping registers */
166#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
167#define SMR_VALID (1 << 31)
168#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100169#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170
171#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
172#define S2CR_CBNDX_SHIFT 0
173#define S2CR_CBNDX_MASK 0xff
174#define S2CR_TYPE_SHIFT 16
175#define S2CR_TYPE_MASK 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100176enum arm_smmu_s2cr_type {
177 S2CR_TYPE_TRANS,
178 S2CR_TYPE_BYPASS,
179 S2CR_TYPE_FAULT,
180};
181
182#define S2CR_PRIVCFG_SHIFT 24
183#define S2CR_PRIVCFG_MASK 0x3
184enum arm_smmu_s2cr_privcfg {
185 S2CR_PRIVCFG_DEFAULT,
186 S2CR_PRIVCFG_DIPAN,
187 S2CR_PRIVCFG_UNPRIV,
188 S2CR_PRIVCFG_PRIV,
189};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100190
191/* Context bank attribute registers */
192#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
193#define CBAR_VMID_SHIFT 0
194#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000195#define CBAR_S1_BPSHCFG_SHIFT 8
196#define CBAR_S1_BPSHCFG_MASK 3
197#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100198#define CBAR_S1_MEMATTR_SHIFT 12
199#define CBAR_S1_MEMATTR_MASK 0xf
200#define CBAR_S1_MEMATTR_WB 0xf
201#define CBAR_TYPE_SHIFT 16
202#define CBAR_TYPE_MASK 0x3
203#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
204#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
205#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
206#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
207#define CBAR_IRPTNDX_SHIFT 24
208#define CBAR_IRPTNDX_MASK 0xff
209
Shalaj Jain04059c52015-03-03 13:34:59 -0800210#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
211#define CBFRSYNRA_SID_MASK (0xffff)
212
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
214#define CBA2R_RW64_32BIT (0 << 0)
215#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800216#define CBA2R_VMID_SHIFT 16
217#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218
219/* Translation context bank */
220#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100221#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222
223#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100224#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_RESUME 0x8
226#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100227#define ARM_SMMU_CB_TTBR0 0x20
228#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600230#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100231#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100233#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100234#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700235#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100236#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000238#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100239#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000240#define ARM_SMMU_CB_S1_TLBIVAL 0x620
241#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
242#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700243#define ARM_SMMU_CB_TLBSYNC 0x7f0
244#define ARM_SMMU_CB_TLBSTATUS 0x7f4
245#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100246#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000247#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248
249#define SCTLR_S1_ASIDPNE (1 << 12)
250#define SCTLR_CFCFG (1 << 7)
251#define SCTLR_CFIE (1 << 6)
252#define SCTLR_CFRE (1 << 5)
253#define SCTLR_E (1 << 4)
254#define SCTLR_AFE (1 << 2)
255#define SCTLR_TRE (1 << 1)
256#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100258#define ARM_MMU500_ACTLR_CPRE (1 << 1)
259
Peng Fan3ca37122016-05-03 21:50:30 +0800260#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
261
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700262#define ARM_SMMU_IMPL_DEF0(smmu) \
263 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
264#define ARM_SMMU_IMPL_DEF1(smmu) \
265 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000266#define CB_PAR_F (1 << 0)
267
268#define ATSR_ACTIVE (1 << 0)
269
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270#define RESUME_RETRY (0 << 0)
271#define RESUME_TERMINATE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100274#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100276#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
278#define FSR_MULTI (1 << 31)
279#define FSR_SS (1 << 30)
280#define FSR_UUT (1 << 8)
281#define FSR_ASF (1 << 7)
282#define FSR_TLBLKF (1 << 6)
283#define FSR_TLBMCF (1 << 5)
284#define FSR_EF (1 << 4)
285#define FSR_PF (1 << 3)
286#define FSR_AFF (1 << 2)
287#define FSR_TF (1 << 1)
288
Mitchel Humpherys29073202014-07-08 09:52:18 -0700289#define FSR_IGN (FSR_AFF | FSR_ASF | \
290 FSR_TLBMCF | FSR_TLBLKF)
291#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100292 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293
294#define FSYNR0_WNR (1 << 4)
295
Will Deacon4cf740b2014-07-14 19:47:39 +0100296static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000297module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100298MODULE_PARM_DESC(force_stage,
299 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800300static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000301module_param(disable_bypass, bool, S_IRUGO);
302MODULE_PARM_DESC(disable_bypass,
303 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100304
Robin Murphy09360402014-08-28 17:51:59 +0100305enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100306 ARM_SMMU_V1,
307 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100308 ARM_SMMU_V2,
309};
310
Robin Murphy67b65a32016-04-13 18:12:57 +0100311enum arm_smmu_implementation {
312 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100313 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100314 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700315 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700316 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100317};
318
Patrick Dalyd7476202016-09-08 18:23:28 -0700319struct arm_smmu_device;
320struct arm_smmu_arch_ops {
321 int (*init)(struct arm_smmu_device *smmu);
322 void (*device_reset)(struct arm_smmu_device *smmu);
323 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
324 dma_addr_t iova);
Patrick Dalyd7476202016-09-08 18:23:28 -0700325};
326
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700327struct arm_smmu_impl_def_reg {
328 u32 offset;
329 u32 value;
330};
331
Robin Murphya754fd12016-09-12 17:13:50 +0100332struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100333 struct iommu_group *group;
334 int count;
Robin Murphya754fd12016-09-12 17:13:50 +0100335 enum arm_smmu_s2cr_type type;
336 enum arm_smmu_s2cr_privcfg privcfg;
337 u8 cbndx;
338};
339
340#define s2cr_init_val (struct arm_smmu_s2cr){ \
341 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
342}
343
Will Deacon45ae7cf2013-06-24 18:31:25 +0100344struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100345 u16 mask;
346 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100347 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100348};
349
Will Deacona9a1b0b2014-05-01 18:05:08 +0100350struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100351 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100352 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100353};
Robin Murphy468f4942016-09-12 17:13:49 +0100354#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100355#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
356#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000357#define fwspec_smendx(fw, i) \
358 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100359#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000360 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100361
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700362/*
363 * Describes resources required for on/off power operation.
364 * Separate reference count is provided for atomic/nonatomic
365 * operations.
366 */
367struct arm_smmu_power_resources {
368 struct platform_device *pdev;
369 struct device *dev;
370
371 struct clk **clocks;
372 int num_clocks;
373
374 struct regulator_bulk_data *gdscs;
375 int num_gdscs;
376
377 uint32_t bus_client;
378 struct msm_bus_scale_pdata *bus_dt_data;
379
380 /* Protects power_count */
381 struct mutex power_lock;
382 int power_count;
383
384 /* Protects clock_refs_count */
385 spinlock_t clock_refs_lock;
386 int clock_refs_count;
387};
388
Will Deacon45ae7cf2013-06-24 18:31:25 +0100389struct arm_smmu_device {
390 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100391
392 void __iomem *base;
393 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100394 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100395
396#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
397#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
398#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
399#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
400#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000401#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800402#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100403#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
404#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
405#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
406#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
407#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100408 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000409
410#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800411#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800412#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700413#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Patrick Daly4423d3e2017-05-04 18:17:51 -0700414#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000415 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100416 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100417 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100418
419 u32 num_context_banks;
420 u32 num_s2_context_banks;
421 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
422 atomic_t irptndx;
423
424 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100425 u16 streamid_mask;
426 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100427 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100428 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100429 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100430
Will Deacon518f7132014-11-14 17:17:54 +0000431 unsigned long va_size;
432 unsigned long ipa_size;
433 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100434 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100435
436 u32 num_global_irqs;
437 u32 num_context_irqs;
438 unsigned int *irqs;
439
Patrick Daly8e3371a2017-02-13 22:14:53 -0800440 struct list_head list;
441
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800442 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700443 /* Specific to QCOM */
444 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
445 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800446
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700447 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700448
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800449 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700450
451 /* protects idr */
452 struct mutex idr_mutex;
453 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700454
455 struct arm_smmu_arch_ops *arch_ops;
456 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100457};
458
Robin Murphy7602b872016-04-28 17:12:09 +0100459enum arm_smmu_context_fmt {
460 ARM_SMMU_CTX_FMT_NONE,
461 ARM_SMMU_CTX_FMT_AARCH64,
462 ARM_SMMU_CTX_FMT_AARCH32_L,
463 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100464};
465
466struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100467 u8 cbndx;
468 u8 irptndx;
469 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600470 u32 procid;
471 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100472 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100473};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100474#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600475#define INVALID_CBNDX 0xff
476#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700477/*
478 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
479 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
480 */
481#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100482
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600483#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800484#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100485
Will Deaconc752ce42014-06-25 22:46:31 +0100486enum arm_smmu_domain_stage {
487 ARM_SMMU_DOMAIN_S1 = 0,
488 ARM_SMMU_DOMAIN_S2,
489 ARM_SMMU_DOMAIN_NESTED,
490};
491
Patrick Dalyc11d1082016-09-01 15:52:44 -0700492struct arm_smmu_pte_info {
493 void *virt_addr;
494 size_t size;
495 struct list_head entry;
496};
497
Will Deacon45ae7cf2013-06-24 18:31:25 +0100498struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100499 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800500 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000501 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700502 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000503 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100504 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100505 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000506 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700507 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700508 u32 secure_vmid;
509 struct list_head pte_info_list;
510 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700511 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700512 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100513 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100514};
515
Patrick Daly8e3371a2017-02-13 22:14:53 -0800516static DEFINE_SPINLOCK(arm_smmu_devices_lock);
517static LIST_HEAD(arm_smmu_devices);
518
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000519struct arm_smmu_option_prop {
520 u32 opt;
521 const char *prop;
522};
523
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800524static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
525
Robin Murphy7e96c742016-09-14 15:26:46 +0100526static bool using_legacy_binding, using_generic_binding;
527
Mitchel Humpherys29073202014-07-08 09:52:18 -0700528static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000529 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800530 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800531 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700532 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700533 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000534 { 0, NULL},
535};
536
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800537static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
538 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700539static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
540 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600541static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800542
Patrick Dalyc11d1082016-09-01 15:52:44 -0700543static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
544static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700545static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700546static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
547
Patrick Dalyd7476202016-09-08 18:23:28 -0700548static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
549static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
550
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800551static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
552
Joerg Roedel1d672632015-03-26 13:43:10 +0100553static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
554{
555 return container_of(dom, struct arm_smmu_domain, domain);
556}
557
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000558static void parse_driver_options(struct arm_smmu_device *smmu)
559{
560 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700561
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000562 do {
563 if (of_property_read_bool(smmu->dev->of_node,
564 arm_smmu_options[i].prop)) {
565 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700566 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000567 arm_smmu_options[i].prop);
568 }
569 } while (arm_smmu_options[++i].opt);
570}
571
Patrick Dalyc190d932016-08-30 17:23:28 -0700572static bool is_dynamic_domain(struct iommu_domain *domain)
573{
574 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
575
576 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
577}
578
Liam Mark53cf2342016-12-20 11:36:07 -0800579static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
580{
581 if (smmu_domain->attributes &
582 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
583 return true;
584 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
585 return smmu_domain->smmu->dev->archdata.dma_coherent;
586 else
587 return false;
588}
589
Patrick Dalye271f212016-10-04 13:24:49 -0700590static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
591{
592 return (smmu_domain->secure_vmid != VMID_INVAL);
593}
594
595static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
596{
597 if (arm_smmu_is_domain_secure(smmu_domain))
598 mutex_lock(&smmu_domain->assign_lock);
599}
600
601static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
602{
603 if (arm_smmu_is_domain_secure(smmu_domain))
604 mutex_unlock(&smmu_domain->assign_lock);
605}
606
Will Deacon8f68f8e2014-07-15 11:27:08 +0100607static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100608{
609 if (dev_is_pci(dev)) {
610 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700611
Will Deacona9a1b0b2014-05-01 18:05:08 +0100612 while (!pci_is_root_bus(bus))
613 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100614 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100615 }
616
Robin Murphyd5b41782016-09-14 15:21:39 +0100617 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100618}
619
Robin Murphyd5b41782016-09-14 15:21:39 +0100620static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100621{
Robin Murphyd5b41782016-09-14 15:21:39 +0100622 *((__be32 *)data) = cpu_to_be32(alias);
623 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100624}
625
Robin Murphyd5b41782016-09-14 15:21:39 +0100626static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100627{
Robin Murphyd5b41782016-09-14 15:21:39 +0100628 struct of_phandle_iterator *it = *(void **)data;
629 struct device_node *np = it->node;
630 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100631
Robin Murphyd5b41782016-09-14 15:21:39 +0100632 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
633 "#stream-id-cells", 0)
634 if (it->node == np) {
635 *(void **)data = dev;
636 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700637 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100638 it->node = np;
639 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100640}
641
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100642static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100643static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100644
Robin Murphy06e393e2016-09-12 17:13:55 +0100645static int arm_smmu_register_legacy_master(struct device *dev,
646 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100647{
Robin Murphy06e393e2016-09-12 17:13:55 +0100648 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100649 struct device_node *np;
650 struct of_phandle_iterator it;
651 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100652 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100653 __be32 pci_sid;
654 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100655
Stephen Boydfecdeef2017-03-01 16:53:19 -0800656 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100657 np = dev_get_dev_node(dev);
658 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
659 of_node_put(np);
660 return -ENODEV;
661 }
662
663 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100664 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
665 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100666 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100667 of_node_put(np);
668 if (err == 0)
669 return -ENODEV;
670 if (err < 0)
671 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100672
Robin Murphyd5b41782016-09-14 15:21:39 +0100673 if (dev_is_pci(dev)) {
674 /* "mmu-masters" assumes Stream ID == Requester ID */
675 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
676 &pci_sid);
677 it.cur = &pci_sid;
678 it.cur_count = 1;
679 }
680
Robin Murphy06e393e2016-09-12 17:13:55 +0100681 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
682 &arm_smmu_ops);
683 if (err)
684 return err;
685
686 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
687 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100688 return -ENOMEM;
689
Robin Murphy06e393e2016-09-12 17:13:55 +0100690 *smmu = dev_get_drvdata(smmu_dev);
691 of_phandle_iterator_args(&it, sids, it.cur_count);
692 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
693 kfree(sids);
694 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100695}
696
697static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
698{
699 int idx;
700
701 do {
702 idx = find_next_zero_bit(map, end, start);
703 if (idx == end)
704 return -ENOSPC;
705 } while (test_and_set_bit(idx, map));
706
707 return idx;
708}
709
710static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
711{
712 clear_bit(idx, map);
713}
714
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700715static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700716{
717 int i, ret = 0;
718
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700719 for (i = 0; i < pwr->num_clocks; ++i) {
720 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700721 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700722 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700723 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700724 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700725 break;
726 }
727 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700728 return ret;
729}
730
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700731static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700732{
733 int i;
734
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700735 for (i = pwr->num_clocks; i; --i)
736 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700737}
738
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700739static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700740{
741 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700742
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700743 for (i = 0; i < pwr->num_clocks; ++i) {
744 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700745 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700746 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700747 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700748 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700749 break;
750 }
751 }
752
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700753 return ret;
754}
Patrick Daly8befb662016-08-17 20:03:28 -0700755
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700756static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
757{
758 int i;
759
760 for (i = pwr->num_clocks; i; --i)
761 clk_disable(pwr->clocks[i - 1]);
762}
763
764static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
765{
766 if (!pwr->bus_client)
767 return 0;
768 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
769}
770
771static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
772{
773 if (!pwr->bus_client)
774 return;
775 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
776}
777
778/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
779static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
780{
781 int ret = 0;
782 unsigned long flags;
783
784 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
785 if (pwr->clock_refs_count > 0) {
786 pwr->clock_refs_count++;
787 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
788 return 0;
789 }
790
791 ret = arm_smmu_enable_clocks(pwr);
792 if (!ret)
793 pwr->clock_refs_count = 1;
794
795 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700796 return ret;
797}
798
799/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700800static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700801{
Patrick Daly8befb662016-08-17 20:03:28 -0700802 unsigned long flags;
803
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700804 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
805 if (pwr->clock_refs_count == 0) {
806 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
807 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
808 return;
809
810 } else if (pwr->clock_refs_count > 1) {
811 pwr->clock_refs_count--;
812 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700813 return;
814 }
815
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700816 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700817
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700818 pwr->clock_refs_count = 0;
819 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700820}
821
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700822static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700823{
824 int ret;
825
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700826 mutex_lock(&pwr->power_lock);
827 if (pwr->power_count > 0) {
828 pwr->power_count += 1;
829 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700830 return 0;
831 }
832
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700833 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700834 if (ret)
835 goto out_unlock;
836
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700837 ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700838 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700839 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700840
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700841 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700842 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700843 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -0700844
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700845 pwr->power_count = 1;
846 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700847 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700848
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700849out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700850 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700851out_disable_bus:
852 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700853out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700854 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700855 return ret;
856}
857
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700858static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700859{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700860 mutex_lock(&pwr->power_lock);
861 if (pwr->power_count == 0) {
862 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
863 mutex_unlock(&pwr->power_lock);
864 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700865
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700866 } else if (pwr->power_count > 1) {
867 pwr->power_count--;
868 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700869 return;
870 }
871
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700872 arm_smmu_unprepare_clocks(pwr);
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700873 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700874 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -0700875 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700876 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700877}
878
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700879static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700880{
881 int ret;
882
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700883 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700884 if (ret)
885 return ret;
886
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700887 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700888 if (ret)
889 goto out_disable;
890
891 return 0;
892
893out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700894 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700895 return ret;
896}
897
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700898static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700899{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700900 arm_smmu_power_off_atomic(pwr);
901 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700902}
903
904/*
905 * Must be used instead of arm_smmu_power_on if it may be called from
906 * atomic context
907 */
908static int arm_smmu_domain_power_on(struct iommu_domain *domain,
909 struct arm_smmu_device *smmu)
910{
911 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
912 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
913
914 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700915 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700916
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700917 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700918}
919
920/*
921 * Must be used instead of arm_smmu_power_on if it may be called from
922 * atomic context
923 */
924static void arm_smmu_domain_power_off(struct iommu_domain *domain,
925 struct arm_smmu_device *smmu)
926{
927 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
928 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
929
930 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700931 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700932 return;
933 }
934
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700935 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700936}
937
Will Deacon45ae7cf2013-06-24 18:31:25 +0100938/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700939static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
940 int cbndx)
941{
942 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
943 u32 val;
944
945 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
946 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
947 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700948 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700949 dev_err(smmu->dev, "TLBSYNC timeout!\n");
950}
951
Will Deacon518f7132014-11-14 17:17:54 +0000952static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100953{
954 int count = 0;
955 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
956
957 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
958 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
959 & sTLBGSTATUS_GSACTIVE) {
960 cpu_relax();
961 if (++count == TLB_LOOP_TIMEOUT) {
962 dev_err_ratelimited(smmu->dev,
963 "TLB sync timed out -- SMMU may be deadlocked\n");
964 return;
965 }
966 udelay(1);
967 }
968}
969
Will Deacon518f7132014-11-14 17:17:54 +0000970static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100971{
Will Deacon518f7132014-11-14 17:17:54 +0000972 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700973 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000974}
975
Patrick Daly8befb662016-08-17 20:03:28 -0700976/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000977static void arm_smmu_tlb_inv_context(void *cookie)
978{
979 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100980 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
981 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100982 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000983 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100984
985 if (stage1) {
986 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800987 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100988 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700989 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100990 } else {
991 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800992 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100993 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700994 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100995 }
Will Deacon1463fe42013-07-31 19:21:27 +0100996}
997
Will Deacon518f7132014-11-14 17:17:54 +0000998static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000999 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001000{
1001 struct arm_smmu_domain *smmu_domain = cookie;
1002 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1003 struct arm_smmu_device *smmu = smmu_domain->smmu;
1004 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1005 void __iomem *reg;
1006
1007 if (stage1) {
1008 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1009 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1010
Robin Murphy7602b872016-04-28 17:12:09 +01001011 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001012 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001013 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001014 do {
1015 writel_relaxed(iova, reg);
1016 iova += granule;
1017 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001018 } else {
1019 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001020 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001021 do {
1022 writeq_relaxed(iova, reg);
1023 iova += granule >> 12;
1024 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001025 }
Will Deacon518f7132014-11-14 17:17:54 +00001026 } else if (smmu->version == ARM_SMMU_V2) {
1027 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1028 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1029 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001030 iova >>= 12;
1031 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001032 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001033 iova += granule >> 12;
1034 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001035 } else {
1036 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001037 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001038 }
1039}
1040
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001041struct arm_smmu_secure_pool_chunk {
1042 void *addr;
1043 size_t size;
1044 struct list_head list;
1045};
1046
1047static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1048 size_t size)
1049{
1050 struct arm_smmu_secure_pool_chunk *it;
1051
1052 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1053 if (it->size == size) {
1054 void *addr = it->addr;
1055
1056 list_del(&it->list);
1057 kfree(it);
1058 return addr;
1059 }
1060 }
1061
1062 return NULL;
1063}
1064
1065static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1066 void *addr, size_t size)
1067{
1068 struct arm_smmu_secure_pool_chunk *chunk;
1069
1070 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1071 if (!chunk)
1072 return -ENOMEM;
1073
1074 chunk->addr = addr;
1075 chunk->size = size;
1076 memset(addr, 0, size);
1077 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1078
1079 return 0;
1080}
1081
1082static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1083{
1084 struct arm_smmu_secure_pool_chunk *it, *i;
1085
1086 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1087 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1088 /* pages will be freed later (after being unassigned) */
1089 kfree(it);
1090 }
1091}
1092
Patrick Dalyc11d1082016-09-01 15:52:44 -07001093static void *arm_smmu_alloc_pages_exact(void *cookie,
1094 size_t size, gfp_t gfp_mask)
1095{
1096 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001097 void *page;
1098 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001099
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001100 if (!arm_smmu_is_domain_secure(smmu_domain))
1101 return alloc_pages_exact(size, gfp_mask);
1102
1103 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1104 if (page)
1105 return page;
1106
1107 page = alloc_pages_exact(size, gfp_mask);
1108 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001109 ret = arm_smmu_prepare_pgtable(page, cookie);
1110 if (ret) {
1111 free_pages_exact(page, size);
1112 return NULL;
1113 }
1114 }
1115
1116 return page;
1117}
1118
1119static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1120{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001121 struct arm_smmu_domain *smmu_domain = cookie;
1122
1123 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1124 free_pages_exact(virt, size);
1125 return;
1126 }
1127
1128 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1129 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001130}
1131
Will Deacon518f7132014-11-14 17:17:54 +00001132static struct iommu_gather_ops arm_smmu_gather_ops = {
1133 .tlb_flush_all = arm_smmu_tlb_inv_context,
1134 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1135 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001136 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1137 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001138};
1139
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001140static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1141 dma_addr_t iova, u32 fsr)
1142{
1143 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001144 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001145 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001146 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001147
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001148 phys = arm_smmu_iova_to_phys_hard(domain, iova);
1149 arm_smmu_tlb_inv_context(smmu_domain);
1150 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001151
Patrick Dalyad441dd2016-09-15 15:50:46 -07001152 if (phys != phys_post_tlbiall) {
1153 dev_err(smmu->dev,
1154 "ATOS results differed across TLBIALL...\n"
1155 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1156 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001157
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001158 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001159}
1160
Will Deacon45ae7cf2013-06-24 18:31:25 +01001161static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1162{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001163 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001164 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001165 unsigned long iova;
1166 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001167 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001168 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1169 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001170 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001171 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001172 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001173 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001174 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001175 bool non_fatal_fault = !!(smmu_domain->attributes &
1176 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001177
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001178 static DEFINE_RATELIMIT_STATE(_rs,
1179 DEFAULT_RATELIMIT_INTERVAL,
1180 DEFAULT_RATELIMIT_BURST);
1181
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001182 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001183 if (ret)
1184 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001185
Shalaj Jain04059c52015-03-03 13:34:59 -08001186 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001187 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1189
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001190 if (!(fsr & FSR_FAULT)) {
1191 ret = IRQ_NONE;
1192 goto out_power_off;
1193 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001194
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001195 if (fatal_asf && (fsr & FSR_ASF)) {
1196 dev_err(smmu->dev,
1197 "Took an address size fault. Refusing to recover.\n");
1198 BUG();
1199 }
1200
Will Deacon45ae7cf2013-06-24 18:31:25 +01001201 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001202 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001203 if (fsr & FSR_TF)
1204 flags |= IOMMU_FAULT_TRANSLATION;
1205 if (fsr & FSR_PF)
1206 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001207 if (fsr & FSR_EF)
1208 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001209 if (fsr & FSR_SS)
1210 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001211
Robin Murphyf9a05f02016-04-13 18:13:01 +01001212 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001213 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001214 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1215 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001216 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1217 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001218 dev_dbg(smmu->dev,
1219 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1220 iova, fsr, fsynr, cfg->cbndx);
1221 dev_dbg(smmu->dev,
1222 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001223 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001224 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001225 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001226 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1227 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001228 if (__ratelimit(&_rs)) {
1229 dev_err(smmu->dev,
1230 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1231 iova, fsr, fsynr, cfg->cbndx);
1232 dev_err(smmu->dev, "FAR = %016lx\n",
1233 (unsigned long)iova);
1234 dev_err(smmu->dev,
1235 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1236 fsr,
1237 (fsr & 0x02) ? "TF " : "",
1238 (fsr & 0x04) ? "AFF " : "",
1239 (fsr & 0x08) ? "PF " : "",
1240 (fsr & 0x10) ? "EF " : "",
1241 (fsr & 0x20) ? "TLBMCF " : "",
1242 (fsr & 0x40) ? "TLBLKF " : "",
1243 (fsr & 0x80) ? "MHF " : "",
1244 (fsr & 0x40000000) ? "SS " : "",
1245 (fsr & 0x80000000) ? "MULTI " : "");
1246 dev_err(smmu->dev,
1247 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001248 if (!phys_soft)
1249 dev_err(smmu->dev,
1250 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1251 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001252 if (phys_atos)
1253 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1254 &phys_atos);
1255 else
1256 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001257 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1258 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001259 ret = IRQ_NONE;
1260 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001261 if (!non_fatal_fault) {
1262 dev_err(smmu->dev,
1263 "Unhandled arm-smmu context fault!\n");
1264 BUG();
1265 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001266 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001268 /*
1269 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1270 * if stalled. This is required to keep the IOMMU client stalled on
1271 * the outstanding fault. This gives the client a chance to take any
1272 * debug action and then terminate the stalled transaction.
1273 * So, the sequence in case of stall on fault should be:
1274 * 1) Do not clear FSR or write to RESUME here
1275 * 2) Client takes any debug action
1276 * 3) Client terminates the stalled transaction and resumes the IOMMU
1277 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1278 * not before so that the fault remains outstanding. This ensures
1279 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1280 * need to be terminated.
1281 */
1282 if (tmp != -EBUSY) {
1283 /* Clear the faulting FSR */
1284 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001285
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001286 /*
1287 * Barrier required to ensure that the FSR is cleared
1288 * before resuming SMMU operation
1289 */
1290 wmb();
1291
1292 /* Retry or terminate any stalled transactions */
1293 if (fsr & FSR_SS)
1294 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1295 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001296
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001297out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001298 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001299
Patrick Daly5ba28112016-08-30 19:18:52 -07001300 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301}
1302
1303static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1304{
1305 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1306 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001307 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001308
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001309 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001310 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001311
Will Deacon45ae7cf2013-06-24 18:31:25 +01001312 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1313 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1314 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1315 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1316
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001317 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001318 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001319 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001320 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001321
Will Deacon45ae7cf2013-06-24 18:31:25 +01001322 dev_err_ratelimited(smmu->dev,
1323 "Unexpected global fault, this could be serious\n");
1324 dev_err_ratelimited(smmu->dev,
1325 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1326 gfsr, gfsynr0, gfsynr1, gfsynr2);
1327
1328 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001329 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001330 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331}
1332
Will Deacon518f7132014-11-14 17:17:54 +00001333static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1334 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001335{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001336 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001337 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001338 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001339 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1340 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001341 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001342
Will Deacon45ae7cf2013-06-24 18:31:25 +01001343 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001344 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1345 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346
Will Deacon4a1c93c2015-03-04 12:21:03 +00001347 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001348 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1349 reg = CBA2R_RW64_64BIT;
1350 else
1351 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001352 /* 16-bit VMIDs live in CBA2R */
1353 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001354 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001355
Will Deacon4a1c93c2015-03-04 12:21:03 +00001356 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1357 }
1358
Will Deacon45ae7cf2013-06-24 18:31:25 +01001359 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001360 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001361 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001362 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001363
Will Deacon57ca90f2014-02-06 14:59:05 +00001364 /*
1365 * Use the weakest shareability/memory types, so they are
1366 * overridden by the ttbcr/pte.
1367 */
1368 if (stage1) {
1369 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1370 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001371 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1372 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001373 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001374 }
Will Deacon44680ee2014-06-25 11:29:12 +01001375 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001376
Will Deacon518f7132014-11-14 17:17:54 +00001377 /* TTBRs */
1378 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001379 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001380
Robin Murphyb94df6f2016-08-11 17:44:06 +01001381 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1382 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1383 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1384 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1385 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1386 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1387 } else {
1388 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1389 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1390 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1391 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1392 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1393 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1394 }
Will Deacon518f7132014-11-14 17:17:54 +00001395 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001396 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001397 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001398 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001399
Will Deacon518f7132014-11-14 17:17:54 +00001400 /* TTBCR */
1401 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001402 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1403 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1404 reg2 = 0;
1405 } else {
1406 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1407 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1408 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001409 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001410 if (smmu->version > ARM_SMMU_V1)
1411 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001412 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001413 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001414 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001415 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001416
Will Deacon518f7132014-11-14 17:17:54 +00001417 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001418 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001419 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1420 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1421 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1422 } else {
1423 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1424 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1425 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001426 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001427 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001428 }
1429
Will Deacon45ae7cf2013-06-24 18:31:25 +01001430 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001431 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001432
1433 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1434 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1435 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001436 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001437 if (stage1)
1438 reg |= SCTLR_S1_ASIDPNE;
1439#ifdef __BIG_ENDIAN
1440 reg |= SCTLR_E;
1441#endif
Will Deacon25724842013-08-21 13:49:53 +01001442 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001443}
1444
Patrick Dalyc190d932016-08-30 17:23:28 -07001445static int arm_smmu_init_asid(struct iommu_domain *domain,
1446 struct arm_smmu_device *smmu)
1447{
1448 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1449 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1450 bool dynamic = is_dynamic_domain(domain);
1451 int ret;
1452
1453 if (!dynamic) {
1454 cfg->asid = cfg->cbndx + 1;
1455 } else {
1456 mutex_lock(&smmu->idr_mutex);
1457 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1458 smmu->num_context_banks + 2,
1459 MAX_ASID + 1, GFP_KERNEL);
1460
1461 mutex_unlock(&smmu->idr_mutex);
1462 if (ret < 0) {
1463 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1464 ret);
1465 return ret;
1466 }
1467 cfg->asid = ret;
1468 }
1469 return 0;
1470}
1471
1472static void arm_smmu_free_asid(struct iommu_domain *domain)
1473{
1474 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1475 struct arm_smmu_device *smmu = smmu_domain->smmu;
1476 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1477 bool dynamic = is_dynamic_domain(domain);
1478
1479 if (cfg->asid == INVALID_ASID || !dynamic)
1480 return;
1481
1482 mutex_lock(&smmu->idr_mutex);
1483 idr_remove(&smmu->asid_idr, cfg->asid);
1484 mutex_unlock(&smmu->idr_mutex);
1485}
1486
Will Deacon45ae7cf2013-06-24 18:31:25 +01001487static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001488 struct arm_smmu_device *smmu,
1489 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001490{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001491 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001492 unsigned long ias, oas;
1493 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001494 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001495 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001496 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001497 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001498 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001499 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001500
Will Deacon518f7132014-11-14 17:17:54 +00001501 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001502 if (smmu_domain->smmu)
1503 goto out_unlock;
1504
Patrick Dalyc190d932016-08-30 17:23:28 -07001505 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1506 smmu_domain->cfg.asid = INVALID_ASID;
1507
Patrick Dalyc190d932016-08-30 17:23:28 -07001508 dynamic = is_dynamic_domain(domain);
1509 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1510 dev_err(smmu->dev, "dynamic domains not supported\n");
1511 ret = -EPERM;
1512 goto out_unlock;
1513 }
1514
Will Deaconc752ce42014-06-25 22:46:31 +01001515 /*
1516 * Mapping the requested stage onto what we support is surprisingly
1517 * complicated, mainly because the spec allows S1+S2 SMMUs without
1518 * support for nested translation. That means we end up with the
1519 * following table:
1520 *
1521 * Requested Supported Actual
1522 * S1 N S1
1523 * S1 S1+S2 S1
1524 * S1 S2 S2
1525 * S1 S1 S1
1526 * N N N
1527 * N S1+S2 S2
1528 * N S2 S2
1529 * N S1 S1
1530 *
1531 * Note that you can't actually request stage-2 mappings.
1532 */
1533 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1534 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1535 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1536 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1537
Robin Murphy7602b872016-04-28 17:12:09 +01001538 /*
1539 * Choosing a suitable context format is even more fiddly. Until we
1540 * grow some way for the caller to express a preference, and/or move
1541 * the decision into the io-pgtable code where it arguably belongs,
1542 * just aim for the closest thing to the rest of the system, and hope
1543 * that the hardware isn't esoteric enough that we can't assume AArch64
1544 * support to be a superset of AArch32 support...
1545 */
1546 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1547 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001548 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1549 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1550 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1551 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1552 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001553 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1554 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1555 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1556 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1557 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1558
1559 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1560 ret = -EINVAL;
1561 goto out_unlock;
1562 }
1563
Will Deaconc752ce42014-06-25 22:46:31 +01001564 switch (smmu_domain->stage) {
1565 case ARM_SMMU_DOMAIN_S1:
1566 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1567 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001568 ias = smmu->va_size;
1569 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001570 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001571 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001572 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1573 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001574 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001575 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001576 ias = min(ias, 32UL);
1577 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001578 } else {
1579 fmt = ARM_V7S;
1580 ias = min(ias, 32UL);
1581 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001582 }
Will Deaconc752ce42014-06-25 22:46:31 +01001583 break;
1584 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001585 /*
1586 * We will likely want to change this if/when KVM gets
1587 * involved.
1588 */
Will Deaconc752ce42014-06-25 22:46:31 +01001589 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001590 cfg->cbar = CBAR_TYPE_S2_TRANS;
1591 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001592 ias = smmu->ipa_size;
1593 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001594 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001595 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001596 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001597 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001598 ias = min(ias, 40UL);
1599 oas = min(oas, 40UL);
1600 }
Will Deaconc752ce42014-06-25 22:46:31 +01001601 break;
1602 default:
1603 ret = -EINVAL;
1604 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001605 }
1606
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001607 if (is_fast)
1608 fmt = ARM_V8L_FAST;
1609
Patrick Dalyce6786f2016-11-09 14:19:23 -08001610 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1611 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001612 if (is_iommu_pt_coherent(smmu_domain))
1613 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001614
Patrick Dalyc190d932016-08-30 17:23:28 -07001615 /* Dynamic domains must set cbndx through domain attribute */
1616 if (!dynamic) {
1617 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001618 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001619 if (ret < 0)
1620 goto out_unlock;
1621 cfg->cbndx = ret;
1622 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001623 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001624 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1625 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001626 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001627 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001628 }
1629
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001630 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001631 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001632 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001633 .ias = ias,
1634 .oas = oas,
1635 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001636 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001637 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001638
Will Deacon518f7132014-11-14 17:17:54 +00001639 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001640 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001641 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1642 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001643 if (!pgtbl_ops) {
1644 ret = -ENOMEM;
1645 goto out_clear_smmu;
1646 }
1647
Patrick Dalyc11d1082016-09-01 15:52:44 -07001648 /*
1649 * assign any page table memory that might have been allocated
1650 * during alloc_io_pgtable_ops
1651 */
Patrick Dalye271f212016-10-04 13:24:49 -07001652 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001653 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001654 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001655
Robin Murphyd5466352016-05-09 17:20:09 +01001656 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001657 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001658 domain->geometry.aperture_end = (1UL << ias) - 1;
1659 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001660
Patrick Dalyc190d932016-08-30 17:23:28 -07001661 /* Assign an asid */
1662 ret = arm_smmu_init_asid(domain, smmu);
1663 if (ret)
1664 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001665
Patrick Dalyc190d932016-08-30 17:23:28 -07001666 if (!dynamic) {
1667 /* Initialise the context bank with our page table cfg */
1668 arm_smmu_init_context_bank(smmu_domain,
1669 &smmu_domain->pgtbl_cfg);
1670
1671 /*
1672 * Request context fault interrupt. Do this last to avoid the
1673 * handler seeing a half-initialised domain state.
1674 */
1675 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1676 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001677 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1678 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001679 if (ret < 0) {
1680 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1681 cfg->irptndx, irq);
1682 cfg->irptndx = INVALID_IRPTNDX;
1683 goto out_clear_smmu;
1684 }
1685 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001686 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001687 }
Will Deacon518f7132014-11-14 17:17:54 +00001688 mutex_unlock(&smmu_domain->init_mutex);
1689
1690 /* Publish page table ops for map/unmap */
1691 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001692 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001693
Will Deacon518f7132014-11-14 17:17:54 +00001694out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001695 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001696 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001697out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001698 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001699 return ret;
1700}
1701
Patrick Daly77db4f92016-10-14 15:34:10 -07001702static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1703{
1704 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1705 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1706 smmu_domain->secure_vmid = VMID_INVAL;
1707}
1708
Will Deacon45ae7cf2013-06-24 18:31:25 +01001709static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1710{
Joerg Roedel1d672632015-03-26 13:43:10 +01001711 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001712 struct arm_smmu_device *smmu = smmu_domain->smmu;
1713 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001714 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001715 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001716 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001717 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001718
Robin Murphy7e96c742016-09-14 15:26:46 +01001719 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001720 return;
1721
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001722 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001723 if (ret) {
1724 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1725 smmu);
1726 return;
1727 }
1728
Patrick Dalyc190d932016-08-30 17:23:28 -07001729 dynamic = is_dynamic_domain(domain);
1730 if (dynamic) {
1731 arm_smmu_free_asid(domain);
1732 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001733 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001734 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001735 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001736 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001737 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001738 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001739 return;
1740 }
1741
Will Deacon518f7132014-11-14 17:17:54 +00001742 /*
1743 * Disable the context bank and free the page tables before freeing
1744 * it.
1745 */
Will Deacon44680ee2014-06-25 11:29:12 +01001746 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001747 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001748
Will Deacon44680ee2014-06-25 11:29:12 +01001749 if (cfg->irptndx != INVALID_IRPTNDX) {
1750 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001751 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752 }
1753
Markus Elfring44830b02015-11-06 18:32:41 +01001754 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001755 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001756 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001757 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001758 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001759 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001760
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001761 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001762 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001763}
1764
Joerg Roedel1d672632015-03-26 13:43:10 +01001765static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001766{
1767 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001768
Patrick Daly09801312016-08-29 17:02:52 -07001769 /* Do not support DOMAIN_DMA for now */
1770 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001771 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772 /*
1773 * Allocate the domain and initialise some of its data structures.
1774 * We can't really do anything meaningful until we've added a
1775 * master.
1776 */
1777 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1778 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001779 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001780
Robin Murphy7e96c742016-09-14 15:26:46 +01001781 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1782 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001783 kfree(smmu_domain);
1784 return NULL;
1785 }
1786
Will Deacon518f7132014-11-14 17:17:54 +00001787 mutex_init(&smmu_domain->init_mutex);
1788 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001789 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1790 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001791 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001792 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001793 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001794
1795 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001796}
1797
Joerg Roedel1d672632015-03-26 13:43:10 +01001798static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799{
Joerg Roedel1d672632015-03-26 13:43:10 +01001800 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001801
1802 /*
1803 * Free the domain resources. We assume that all devices have
1804 * already been detached.
1805 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001806 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001807 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001808 kfree(smmu_domain);
1809}
1810
Robin Murphy468f4942016-09-12 17:13:49 +01001811static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1812{
1813 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01001814 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01001815
1816 if (smr->valid)
1817 reg |= SMR_VALID;
1818 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1819}
1820
Robin Murphya754fd12016-09-12 17:13:50 +01001821static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1822{
1823 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1824 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1825 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1826 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1827
1828 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1829}
1830
1831static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1832{
1833 arm_smmu_write_s2cr(smmu, idx);
1834 if (smmu->smrs)
1835 arm_smmu_write_smr(smmu, idx);
1836}
1837
Robin Murphy6668f692016-09-12 17:13:54 +01001838static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01001839{
1840 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01001841 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001842
Robin Murphy6668f692016-09-12 17:13:54 +01001843 /* Stream indexing is blissfully easy */
1844 if (!smrs)
1845 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01001846
Robin Murphy6668f692016-09-12 17:13:54 +01001847 /* Validating SMRs is... less so */
1848 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1849 if (!smrs[i].valid) {
1850 /*
1851 * Note the first free entry we come across, which
1852 * we'll claim in the end if nothing else matches.
1853 */
1854 if (free_idx < 0)
1855 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01001856 continue;
1857 }
Robin Murphy6668f692016-09-12 17:13:54 +01001858 /*
1859 * If the new entry is _entirely_ matched by an existing entry,
1860 * then reuse that, with the guarantee that there also cannot
1861 * be any subsequent conflicting entries. In normal use we'd
1862 * expect simply identical entries for this case, but there's
1863 * no harm in accommodating the generalisation.
1864 */
1865 if ((mask & smrs[i].mask) == mask &&
1866 !((id ^ smrs[i].id) & ~smrs[i].mask))
1867 return i;
1868 /*
1869 * If the new entry has any other overlap with an existing one,
1870 * though, then there always exists at least one stream ID
1871 * which would cause a conflict, and we can't allow that risk.
1872 */
1873 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1874 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001875 }
1876
Robin Murphy6668f692016-09-12 17:13:54 +01001877 return free_idx;
1878}
1879
1880static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1881{
1882 if (--smmu->s2crs[idx].count)
1883 return false;
1884
1885 smmu->s2crs[idx] = s2cr_init_val;
1886 if (smmu->smrs)
1887 smmu->smrs[idx].valid = false;
1888
1889 return true;
1890}
1891
1892static int arm_smmu_master_alloc_smes(struct device *dev)
1893{
Robin Murphy06e393e2016-09-12 17:13:55 +01001894 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1895 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01001896 struct arm_smmu_device *smmu = cfg->smmu;
1897 struct arm_smmu_smr *smrs = smmu->smrs;
1898 struct iommu_group *group;
1899 int i, idx, ret;
1900
1901 mutex_lock(&smmu->stream_map_mutex);
1902 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01001903 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001904 u16 sid = fwspec->ids[i];
1905 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1906
Robin Murphy6668f692016-09-12 17:13:54 +01001907 if (idx != INVALID_SMENDX) {
1908 ret = -EEXIST;
1909 goto out_err;
1910 }
1911
Robin Murphy7e96c742016-09-14 15:26:46 +01001912 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01001913 if (ret < 0)
1914 goto out_err;
1915
1916 idx = ret;
1917 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001918 smrs[idx].id = sid;
1919 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01001920 smrs[idx].valid = true;
1921 }
1922 smmu->s2crs[idx].count++;
1923 cfg->smendx[i] = (s16)idx;
1924 }
1925
1926 group = iommu_group_get_for_dev(dev);
1927 if (!group)
1928 group = ERR_PTR(-ENOMEM);
1929 if (IS_ERR(group)) {
1930 ret = PTR_ERR(group);
1931 goto out_err;
1932 }
1933 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01001934
Will Deacon45ae7cf2013-06-24 18:31:25 +01001935 /* It worked! Now, poke the actual hardware */
Robin Murphy06e393e2016-09-12 17:13:55 +01001936 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001937 arm_smmu_write_sme(smmu, idx);
1938 smmu->s2crs[idx].group = group;
1939 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001940
Robin Murphy6668f692016-09-12 17:13:54 +01001941 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001942 return 0;
1943
Robin Murphy6668f692016-09-12 17:13:54 +01001944out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01001945 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01001946 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01001947 cfg->smendx[i] = INVALID_SMENDX;
1948 }
Robin Murphy6668f692016-09-12 17:13:54 +01001949 mutex_unlock(&smmu->stream_map_mutex);
1950 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001951}
1952
Robin Murphy06e393e2016-09-12 17:13:55 +01001953static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954{
Robin Murphy06e393e2016-09-12 17:13:55 +01001955 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1956 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01001957 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001958
Robin Murphy6668f692016-09-12 17:13:54 +01001959 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01001960 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001961 if (arm_smmu_free_sme(smmu, idx))
1962 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01001963 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001964 }
Robin Murphy6668f692016-09-12 17:13:54 +01001965 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001966}
1967
Will Deacon45ae7cf2013-06-24 18:31:25 +01001968static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01001969 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001970{
Will Deacon44680ee2014-06-25 11:29:12 +01001971 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01001972 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1973 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1974 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01001975 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001976
Robin Murphy06e393e2016-09-12 17:13:55 +01001977 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphya754fd12016-09-12 17:13:50 +01001978 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy6668f692016-09-12 17:13:54 +01001979 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01001980
1981 s2cr[idx].type = type;
1982 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1983 s2cr[idx].cbndx = cbndx;
1984 arm_smmu_write_s2cr(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001985 }
1986
1987 return 0;
1988}
1989
Patrick Daly09801312016-08-29 17:02:52 -07001990static void arm_smmu_detach_dev(struct iommu_domain *domain,
1991 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001992{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001993 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001994 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Daly09801312016-08-29 17:02:52 -07001995 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07001996 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07001997
1998 if (dynamic)
1999 return;
2000
Patrick Daly09801312016-08-29 17:02:52 -07002001 if (!smmu) {
2002 dev_err(dev, "Domain not attached; cannot detach!\n");
2003 return;
2004 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002005
Patrick Daly8befb662016-08-17 20:03:28 -07002006 /* Remove additional vote for atomic power */
2007 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002008 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
2009 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002010 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002011}
2012
Patrick Dalye271f212016-10-04 13:24:49 -07002013static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002014{
Patrick Dalye271f212016-10-04 13:24:49 -07002015 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002016 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2017 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2018 int source_vmid = VMID_HLOS;
2019 struct arm_smmu_pte_info *pte_info, *temp;
2020
Patrick Dalye271f212016-10-04 13:24:49 -07002021 if (!arm_smmu_is_domain_secure(smmu_domain))
2022 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002023
Patrick Dalye271f212016-10-04 13:24:49 -07002024 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002025 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2026 PAGE_SIZE, &source_vmid, 1,
2027 dest_vmids, dest_perms, 2);
2028 if (WARN_ON(ret))
2029 break;
2030 }
2031
2032 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2033 entry) {
2034 list_del(&pte_info->entry);
2035 kfree(pte_info);
2036 }
Patrick Dalye271f212016-10-04 13:24:49 -07002037 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002038}
2039
2040static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2041{
2042 int ret;
2043 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002044 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002045 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2046 struct arm_smmu_pte_info *pte_info, *temp;
2047
Patrick Dalye271f212016-10-04 13:24:49 -07002048 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002049 return;
2050
2051 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2052 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2053 PAGE_SIZE, source_vmlist, 2,
2054 &dest_vmids, &dest_perms, 1);
2055 if (WARN_ON(ret))
2056 break;
2057 free_pages_exact(pte_info->virt_addr, pte_info->size);
2058 }
2059
2060 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2061 entry) {
2062 list_del(&pte_info->entry);
2063 kfree(pte_info);
2064 }
2065}
2066
2067static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2068{
2069 struct arm_smmu_domain *smmu_domain = cookie;
2070 struct arm_smmu_pte_info *pte_info;
2071
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002072 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002073
2074 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2075 if (!pte_info)
2076 return;
2077
2078 pte_info->virt_addr = addr;
2079 pte_info->size = size;
2080 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2081}
2082
2083static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2084{
2085 struct arm_smmu_domain *smmu_domain = cookie;
2086 struct arm_smmu_pte_info *pte_info;
2087
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002088 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002089
2090 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2091 if (!pte_info)
2092 return -ENOMEM;
2093 pte_info->virt_addr = addr;
2094 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2095 return 0;
2096}
2097
Will Deacon45ae7cf2013-06-24 18:31:25 +01002098static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2099{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002100 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002101 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002102 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002103 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002104 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002105
Robin Murphy06e393e2016-09-12 17:13:55 +01002106 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002107 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2108 return -ENXIO;
2109 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002110
Robin Murphy4f79b142016-10-17 12:06:21 +01002111 /*
2112 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2113 * domains between of_xlate() and add_device() - we have no way to cope
2114 * with that, so until ARM gets converted to rely on groups and default
2115 * domains, just say no (but more politely than by dereferencing NULL).
2116 * This should be at least a WARN_ON once that's sorted.
2117 */
2118 if (!fwspec->iommu_priv)
2119 return -ENODEV;
2120
Robin Murphy06e393e2016-09-12 17:13:55 +01002121 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002122
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002123 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002124 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002125 if (ret)
2126 return ret;
2127
Will Deacon518f7132014-11-14 17:17:54 +00002128 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002129 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002130 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002131 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002132
Patrick Dalyc190d932016-08-30 17:23:28 -07002133 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002134 if (is_dynamic_domain(domain)) {
2135 ret = 0;
2136 goto out_power_off;
2137 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002138
Will Deacon45ae7cf2013-06-24 18:31:25 +01002139 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002140 * Sanity check the domain. We don't support domains across
2141 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002142 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002143 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002144 dev_err(dev,
2145 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002146 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002147 ret = -EINVAL;
2148 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002149 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002150
2151 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002152 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002153
2154out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002155 /*
2156 * Keep an additional vote for non-atomic power until domain is
2157 * detached
2158 */
2159 if (!ret && atomic_domain) {
2160 WARN_ON(arm_smmu_power_on(smmu->pwr));
2161 arm_smmu_power_off_atomic(smmu->pwr);
2162 }
2163
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002164 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002165
Will Deacon45ae7cf2013-06-24 18:31:25 +01002166 return ret;
2167}
2168
Will Deacon45ae7cf2013-06-24 18:31:25 +01002169static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002170 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002171{
Will Deacon518f7132014-11-14 17:17:54 +00002172 int ret;
2173 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002174 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002175 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002176
Will Deacon518f7132014-11-14 17:17:54 +00002177 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002178 return -ENODEV;
2179
Patrick Dalye271f212016-10-04 13:24:49 -07002180 arm_smmu_secure_domain_lock(smmu_domain);
2181
Will Deacon518f7132014-11-14 17:17:54 +00002182 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2183 ret = ops->map(ops, iova, paddr, size, prot);
2184 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002185
2186 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002187 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002188
Will Deacon518f7132014-11-14 17:17:54 +00002189 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002190}
2191
2192static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2193 size_t size)
2194{
Will Deacon518f7132014-11-14 17:17:54 +00002195 size_t ret;
2196 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002197 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002198 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002199
Will Deacon518f7132014-11-14 17:17:54 +00002200 if (!ops)
2201 return 0;
2202
Patrick Daly8befb662016-08-17 20:03:28 -07002203 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002204 if (ret)
2205 return ret;
2206
Patrick Dalye271f212016-10-04 13:24:49 -07002207 arm_smmu_secure_domain_lock(smmu_domain);
2208
Will Deacon518f7132014-11-14 17:17:54 +00002209 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2210 ret = ops->unmap(ops, iova, size);
2211 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002212
Patrick Daly8befb662016-08-17 20:03:28 -07002213 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002214 /*
2215 * While splitting up block mappings, we might allocate page table
2216 * memory during unmap, so the vmids needs to be assigned to the
2217 * memory here as well.
2218 */
2219 arm_smmu_assign_table(smmu_domain);
2220 /* Also unassign any pages that were free'd during unmap */
2221 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002222 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002223 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002224}
2225
Patrick Daly88d321d2017-02-09 18:02:13 -08002226#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002227static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2228 struct scatterlist *sg, unsigned int nents, int prot)
2229{
2230 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002231 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002232 unsigned long flags;
2233 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2234 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002235 unsigned int idx_start, idx_end;
2236 struct scatterlist *sg_start, *sg_end;
2237 unsigned long __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002238
2239 if (!ops)
2240 return -ENODEV;
2241
Patrick Daly8befb662016-08-17 20:03:28 -07002242 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002243 if (ret)
2244 return ret;
2245
Patrick Daly88d321d2017-02-09 18:02:13 -08002246 __saved_iova_start = iova;
2247 idx_start = idx_end = 0;
2248 sg_start = sg_end = sg;
2249 while (idx_end < nents) {
2250 batch_size = sg_end->length;
2251 sg_end = sg_next(sg_end);
2252 idx_end++;
2253 while ((idx_end < nents) &&
2254 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002255
Patrick Daly88d321d2017-02-09 18:02:13 -08002256 batch_size += sg_end->length;
2257 sg_end = sg_next(sg_end);
2258 idx_end++;
2259 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002260
Patrick Daly88d321d2017-02-09 18:02:13 -08002261 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2262 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2263 prot, &size);
2264 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2265 /* Returns 0 on error */
2266 if (!ret) {
2267 size_to_unmap = iova + size - __saved_iova_start;
2268 goto out;
2269 }
2270
2271 iova += batch_size;
2272 idx_start = idx_end;
2273 sg_start = sg_end;
2274 }
2275
2276out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002277 arm_smmu_assign_table(smmu_domain);
2278
Patrick Daly88d321d2017-02-09 18:02:13 -08002279 if (size_to_unmap) {
2280 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2281 iova = __saved_iova_start;
2282 }
2283 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
2284 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002285}
2286
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002287static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002288 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002289{
Joerg Roedel1d672632015-03-26 13:43:10 +01002290 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002291 struct arm_smmu_device *smmu = smmu_domain->smmu;
2292 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2293 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2294 struct device *dev = smmu->dev;
2295 void __iomem *cb_base;
2296 u32 tmp;
2297 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002298 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002299
2300 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2301
Robin Murphy661d9622015-05-27 17:09:34 +01002302 /* ATS1 registers can only be written atomically */
2303 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002304 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002305 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2306 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002307 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002308
2309 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2310 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002311 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002312 dev_err(dev,
2313 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2314 &iova, &phys);
2315 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002316 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002317 }
2318
Robin Murphyf9a05f02016-04-13 18:13:01 +01002319 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002320 if (phys & CB_PAR_F) {
2321 dev_err(dev, "translation fault!\n");
2322 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002323 phys = 0;
2324 } else {
2325 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002326 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002327
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002328 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002329}
2330
Will Deacon45ae7cf2013-06-24 18:31:25 +01002331static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002332 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002333{
Will Deacon518f7132014-11-14 17:17:54 +00002334 phys_addr_t ret;
2335 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002336 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002337 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002338
Will Deacon518f7132014-11-14 17:17:54 +00002339 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002340 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002341
Will Deacon518f7132014-11-14 17:17:54 +00002342 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002343 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002344 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002345
Will Deacon518f7132014-11-14 17:17:54 +00002346 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002347}
2348
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002349/*
2350 * This function can sleep, and cannot be called from atomic context. Will
2351 * power on register block if required. This restriction does not apply to the
2352 * original iova_to_phys() op.
2353 */
2354static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2355 dma_addr_t iova)
2356{
2357 phys_addr_t ret = 0;
2358 unsigned long flags;
2359 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002360
Patrick Dalyad441dd2016-09-15 15:50:46 -07002361 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002362 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2363 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002364 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002365 return ret;
2366 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002367
2368 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2369 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2370 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002371 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002372
2373 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2374
2375 return ret;
2376}
2377
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002378static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002379{
Will Deacond0948942014-06-24 17:30:10 +01002380 switch (cap) {
2381 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002382 /*
2383 * Return true here as the SMMU can always send out coherent
2384 * requests.
2385 */
2386 return true;
Will Deacond0948942014-06-24 17:30:10 +01002387 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002388 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002389 case IOMMU_CAP_NOEXEC:
2390 return true;
Will Deacond0948942014-06-24 17:30:10 +01002391 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002392 return false;
Will Deacond0948942014-06-24 17:30:10 +01002393 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002394}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002395
Patrick Daly8e3371a2017-02-13 22:14:53 -08002396static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2397{
2398 struct arm_smmu_device *smmu;
2399 unsigned long flags;
2400
2401 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2402 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2403 if (smmu->dev->of_node == np) {
2404 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2405 return smmu;
2406 }
2407 }
2408 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2409 return NULL;
2410}
2411
Robin Murphy7e96c742016-09-14 15:26:46 +01002412static int arm_smmu_match_node(struct device *dev, void *data)
2413{
2414 return dev->of_node == data;
2415}
2416
2417static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2418{
2419 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2420 np, arm_smmu_match_node);
2421 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002422 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002423}
2424
Will Deacon03edb222015-01-19 14:27:33 +00002425static int arm_smmu_add_device(struct device *dev)
2426{
Robin Murphy06e393e2016-09-12 17:13:55 +01002427 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002428 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002429 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002430 int i, ret;
2431
Robin Murphy7e96c742016-09-14 15:26:46 +01002432 if (using_legacy_binding) {
2433 ret = arm_smmu_register_legacy_master(dev, &smmu);
2434 fwspec = dev->iommu_fwspec;
2435 if (ret)
2436 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002437 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002438 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2439 if (!smmu)
2440 return -ENODEV;
2441 } else {
2442 return -ENODEV;
2443 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002444
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002445 ret = arm_smmu_power_on(smmu->pwr);
2446 if (ret)
2447 goto out_free;
2448
Robin Murphyd5b41782016-09-14 15:21:39 +01002449 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002450 for (i = 0; i < fwspec->num_ids; i++) {
2451 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002452 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002453
Robin Murphy06e393e2016-09-12 17:13:55 +01002454 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002455 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002456 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002457 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002458 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002459 if (mask & ~smmu->smr_mask_mask) {
2460 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2461 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002462 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002463 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002464 }
Will Deacon03edb222015-01-19 14:27:33 +00002465
Robin Murphy06e393e2016-09-12 17:13:55 +01002466 ret = -ENOMEM;
2467 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2468 GFP_KERNEL);
2469 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002470 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002471
2472 cfg->smmu = smmu;
2473 fwspec->iommu_priv = cfg;
2474 while (i--)
2475 cfg->smendx[i] = INVALID_SMENDX;
2476
Robin Murphy6668f692016-09-12 17:13:54 +01002477 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002478 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002479 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002480
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002481 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002482 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002483
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002484out_pwr_off:
2485 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002486out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002487 if (fwspec)
2488 kfree(fwspec->iommu_priv);
2489 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002490 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002491}
2492
Will Deacon45ae7cf2013-06-24 18:31:25 +01002493static void arm_smmu_remove_device(struct device *dev)
2494{
Robin Murphy06e393e2016-09-12 17:13:55 +01002495 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002496 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002497
Robin Murphy06e393e2016-09-12 17:13:55 +01002498 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002499 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002500
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002501 smmu = fwspec_smmu(fwspec);
2502 if (arm_smmu_power_on(smmu->pwr)) {
2503 WARN_ON(1);
2504 return;
2505 }
2506
Robin Murphy06e393e2016-09-12 17:13:55 +01002507 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002508 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002509 kfree(fwspec->iommu_priv);
2510 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002511 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002512}
2513
Joerg Roedelaf659932015-10-21 23:51:41 +02002514static struct iommu_group *arm_smmu_device_group(struct device *dev)
2515{
Robin Murphy06e393e2016-09-12 17:13:55 +01002516 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2517 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002518 struct iommu_group *group = NULL;
2519 int i, idx;
2520
Robin Murphy06e393e2016-09-12 17:13:55 +01002521 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002522 if (group && smmu->s2crs[idx].group &&
2523 group != smmu->s2crs[idx].group)
2524 return ERR_PTR(-EINVAL);
2525
2526 group = smmu->s2crs[idx].group;
2527 }
2528
2529 if (group)
2530 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02002531
2532 if (dev_is_pci(dev))
2533 group = pci_device_group(dev);
2534 else
2535 group = generic_device_group(dev);
2536
Joerg Roedelaf659932015-10-21 23:51:41 +02002537 return group;
2538}
2539
Will Deaconc752ce42014-06-25 22:46:31 +01002540static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2541 enum iommu_attr attr, void *data)
2542{
Joerg Roedel1d672632015-03-26 13:43:10 +01002543 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002544 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002545
2546 switch (attr) {
2547 case DOMAIN_ATTR_NESTING:
2548 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2549 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002550 case DOMAIN_ATTR_PT_BASE_ADDR:
2551 *((phys_addr_t *)data) =
2552 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2553 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002554 case DOMAIN_ATTR_CONTEXT_BANK:
2555 /* context bank index isn't valid until we are attached */
2556 if (smmu_domain->smmu == NULL)
2557 return -ENODEV;
2558
2559 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2560 ret = 0;
2561 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002562 case DOMAIN_ATTR_TTBR0: {
2563 u64 val;
2564 struct arm_smmu_device *smmu = smmu_domain->smmu;
2565 /* not valid until we are attached */
2566 if (smmu == NULL)
2567 return -ENODEV;
2568
2569 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2570 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2571 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2572 << (TTBRn_ASID_SHIFT);
2573 *((u64 *)data) = val;
2574 ret = 0;
2575 break;
2576 }
2577 case DOMAIN_ATTR_CONTEXTIDR:
2578 /* not valid until attached */
2579 if (smmu_domain->smmu == NULL)
2580 return -ENODEV;
2581 *((u32 *)data) = smmu_domain->cfg.procid;
2582 ret = 0;
2583 break;
2584 case DOMAIN_ATTR_PROCID:
2585 *((u32 *)data) = smmu_domain->cfg.procid;
2586 ret = 0;
2587 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002588 case DOMAIN_ATTR_DYNAMIC:
2589 *((int *)data) = !!(smmu_domain->attributes
2590 & (1 << DOMAIN_ATTR_DYNAMIC));
2591 ret = 0;
2592 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002593 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2594 *((int *)data) = !!(smmu_domain->attributes
2595 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2596 ret = 0;
2597 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002598 case DOMAIN_ATTR_S1_BYPASS:
2599 *((int *)data) = !!(smmu_domain->attributes
2600 & (1 << DOMAIN_ATTR_S1_BYPASS));
2601 ret = 0;
2602 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002603 case DOMAIN_ATTR_SECURE_VMID:
2604 *((int *)data) = smmu_domain->secure_vmid;
2605 ret = 0;
2606 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002607 case DOMAIN_ATTR_PGTBL_INFO: {
2608 struct iommu_pgtbl_info *info = data;
2609
2610 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2611 ret = -ENODEV;
2612 break;
2613 }
2614 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2615 ret = 0;
2616 break;
2617 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002618 case DOMAIN_ATTR_FAST:
2619 *((int *)data) = !!(smmu_domain->attributes
2620 & (1 << DOMAIN_ATTR_FAST));
2621 ret = 0;
2622 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002623 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2624 *((int *)data) = !!(smmu_domain->attributes &
2625 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2626 ret = 0;
2627 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002628 case DOMAIN_ATTR_EARLY_MAP:
2629 *((int *)data) = !!(smmu_domain->attributes
2630 & (1 << DOMAIN_ATTR_EARLY_MAP));
2631 ret = 0;
2632 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002633 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
2634 if (!smmu_domain->smmu)
2635 return -ENODEV;
Liam Mark53cf2342016-12-20 11:36:07 -08002636 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
2637 ret = 0;
2638 break;
2639 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
2640 *((int *)data) = !!(smmu_domain->attributes
2641 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002642 ret = 0;
2643 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002644 default:
2645 return -ENODEV;
2646 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002647 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002648}
2649
2650static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2651 enum iommu_attr attr, void *data)
2652{
Will Deacon518f7132014-11-14 17:17:54 +00002653 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002654 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002655
Will Deacon518f7132014-11-14 17:17:54 +00002656 mutex_lock(&smmu_domain->init_mutex);
2657
Will Deaconc752ce42014-06-25 22:46:31 +01002658 switch (attr) {
2659 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002660 if (smmu_domain->smmu) {
2661 ret = -EPERM;
2662 goto out_unlock;
2663 }
2664
Will Deaconc752ce42014-06-25 22:46:31 +01002665 if (*(int *)data)
2666 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2667 else
2668 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2669
Will Deacon518f7132014-11-14 17:17:54 +00002670 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002671 case DOMAIN_ATTR_PROCID:
2672 if (smmu_domain->smmu != NULL) {
2673 dev_err(smmu_domain->smmu->dev,
2674 "cannot change procid attribute while attached\n");
2675 ret = -EBUSY;
2676 break;
2677 }
2678 smmu_domain->cfg.procid = *((u32 *)data);
2679 ret = 0;
2680 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002681 case DOMAIN_ATTR_DYNAMIC: {
2682 int dynamic = *((int *)data);
2683
2684 if (smmu_domain->smmu != NULL) {
2685 dev_err(smmu_domain->smmu->dev,
2686 "cannot change dynamic attribute while attached\n");
2687 ret = -EBUSY;
2688 break;
2689 }
2690
2691 if (dynamic)
2692 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2693 else
2694 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2695 ret = 0;
2696 break;
2697 }
2698 case DOMAIN_ATTR_CONTEXT_BANK:
2699 /* context bank can't be set while attached */
2700 if (smmu_domain->smmu != NULL) {
2701 ret = -EBUSY;
2702 break;
2703 }
2704 /* ... and it can only be set for dynamic contexts. */
2705 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2706 ret = -EINVAL;
2707 break;
2708 }
2709
2710 /* this will be validated during attach */
2711 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2712 ret = 0;
2713 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002714 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2715 u32 non_fatal_faults = *((int *)data);
2716
2717 if (non_fatal_faults)
2718 smmu_domain->attributes |=
2719 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2720 else
2721 smmu_domain->attributes &=
2722 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2723 ret = 0;
2724 break;
2725 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002726 case DOMAIN_ATTR_S1_BYPASS: {
2727 int bypass = *((int *)data);
2728
2729 /* bypass can't be changed while attached */
2730 if (smmu_domain->smmu != NULL) {
2731 ret = -EBUSY;
2732 break;
2733 }
2734 if (bypass)
2735 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2736 else
2737 smmu_domain->attributes &=
2738 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2739
2740 ret = 0;
2741 break;
2742 }
Patrick Daly8befb662016-08-17 20:03:28 -07002743 case DOMAIN_ATTR_ATOMIC:
2744 {
2745 int atomic_ctx = *((int *)data);
2746
2747 /* can't be changed while attached */
2748 if (smmu_domain->smmu != NULL) {
2749 ret = -EBUSY;
2750 break;
2751 }
2752 if (atomic_ctx)
2753 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2754 else
2755 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2756 break;
2757 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002758 case DOMAIN_ATTR_SECURE_VMID:
2759 if (smmu_domain->secure_vmid != VMID_INVAL) {
2760 ret = -ENODEV;
2761 WARN(1, "secure vmid already set!");
2762 break;
2763 }
2764 smmu_domain->secure_vmid = *((int *)data);
2765 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002766 case DOMAIN_ATTR_FAST:
2767 if (*((int *)data))
2768 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2769 ret = 0;
2770 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002771 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2772 /* can't be changed while attached */
2773 if (smmu_domain->smmu != NULL) {
2774 ret = -EBUSY;
2775 break;
2776 }
2777 if (*((int *)data))
2778 smmu_domain->attributes |=
2779 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
2780 ret = 0;
2781 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002782 case DOMAIN_ATTR_EARLY_MAP: {
2783 int early_map = *((int *)data);
2784
2785 ret = 0;
2786 if (early_map) {
2787 smmu_domain->attributes |=
2788 1 << DOMAIN_ATTR_EARLY_MAP;
2789 } else {
2790 if (smmu_domain->smmu)
2791 ret = arm_smmu_enable_s1_translations(
2792 smmu_domain);
2793
2794 if (!ret)
2795 smmu_domain->attributes &=
2796 ~(1 << DOMAIN_ATTR_EARLY_MAP);
2797 }
2798 break;
2799 }
Liam Mark53cf2342016-12-20 11:36:07 -08002800 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
2801 int force_coherent = *((int *)data);
2802
2803 if (smmu_domain->smmu != NULL) {
2804 dev_err(smmu_domain->smmu->dev,
2805 "cannot change force coherent attribute while attached\n");
2806 ret = -EBUSY;
2807 break;
2808 }
2809
2810 if (force_coherent)
2811 smmu_domain->attributes |=
2812 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
2813 else
2814 smmu_domain->attributes &=
2815 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
2816
2817 ret = 0;
2818 break;
2819 }
2820
Will Deaconc752ce42014-06-25 22:46:31 +01002821 default:
Will Deacon518f7132014-11-14 17:17:54 +00002822 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002823 }
Will Deacon518f7132014-11-14 17:17:54 +00002824
2825out_unlock:
2826 mutex_unlock(&smmu_domain->init_mutex);
2827 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002828}
2829
Robin Murphy7e96c742016-09-14 15:26:46 +01002830static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
2831{
2832 u32 fwid = 0;
2833
2834 if (args->args_count > 0)
2835 fwid |= (u16)args->args[0];
2836
2837 if (args->args_count > 1)
2838 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
2839
2840 return iommu_fwspec_add_ids(dev, &fwid, 1);
2841}
2842
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002843static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
2844{
2845 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2846 struct arm_smmu_device *smmu = smmu_domain->smmu;
2847 void __iomem *cb_base;
2848 u32 reg;
2849 int ret;
2850
2851 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2852 ret = arm_smmu_power_on(smmu->pwr);
2853 if (ret)
2854 return ret;
2855
2856 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
2857 reg |= SCTLR_M;
2858
2859 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
2860 arm_smmu_power_off(smmu->pwr);
2861 return ret;
2862}
2863
Liam Mark3ba41cf2016-12-09 14:39:04 -08002864static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
2865 dma_addr_t iova)
2866{
2867 bool ret;
2868 unsigned long flags;
2869 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2870 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2871
2872 if (!ops)
2873 return false;
2874
2875 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2876 ret = ops->is_iova_coherent(ops, iova);
2877 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2878 return ret;
2879}
2880
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002881static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2882 unsigned long flags)
2883{
2884 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2885 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2886 struct arm_smmu_device *smmu;
2887 void __iomem *cb_base;
2888
2889 if (!smmu_domain->smmu) {
2890 pr_err("Can't trigger faults on non-attached domains\n");
2891 return;
2892 }
2893
2894 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002895 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002896 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002897
2898 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2899 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2900 flags, cfg->cbndx);
2901 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002902 /* give the interrupt time to fire... */
2903 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002904
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002905 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002906}
2907
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002908static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2909 unsigned long offset)
2910{
2911 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2912 struct arm_smmu_device *smmu;
2913 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2914 void __iomem *cb_base;
2915 unsigned long val;
2916
2917 if (offset >= SZ_4K) {
2918 pr_err("Invalid offset: 0x%lx\n", offset);
2919 return 0;
2920 }
2921
2922 smmu = smmu_domain->smmu;
2923 if (!smmu) {
2924 WARN(1, "Can't read registers of a detached domain\n");
2925 val = 0;
2926 return val;
2927 }
2928
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002929 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002930 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002931
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002932 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2933 val = readl_relaxed(cb_base + offset);
2934
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002935 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002936 return val;
2937}
2938
2939static void arm_smmu_reg_write(struct iommu_domain *domain,
2940 unsigned long offset, unsigned long val)
2941{
2942 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2943 struct arm_smmu_device *smmu;
2944 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2945 void __iomem *cb_base;
2946
2947 if (offset >= SZ_4K) {
2948 pr_err("Invalid offset: 0x%lx\n", offset);
2949 return;
2950 }
2951
2952 smmu = smmu_domain->smmu;
2953 if (!smmu) {
2954 WARN(1, "Can't read registers of a detached domain\n");
2955 return;
2956 }
2957
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002958 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002959 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002960
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002961 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2962 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002963
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002964 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002965}
2966
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002967static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
2968{
2969 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
2970}
2971
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002972static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
2973{
2974 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2975
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002976 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002977}
2978
2979static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
2980{
2981 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2982
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002983 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002984}
2985
Will Deacon518f7132014-11-14 17:17:54 +00002986static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002987 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002988 .domain_alloc = arm_smmu_domain_alloc,
2989 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002990 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002991 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002992 .map = arm_smmu_map,
2993 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002994 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002995 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002996 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002997 .add_device = arm_smmu_add_device,
2998 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002999 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003000 .domain_get_attr = arm_smmu_domain_get_attr,
3001 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003002 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003003 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003004 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003005 .reg_read = arm_smmu_reg_read,
3006 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003007 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003008 .enable_config_clocks = arm_smmu_enable_config_clocks,
3009 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003010 .is_iova_coherent = arm_smmu_is_iova_coherent,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003011};
3012
Patrick Dalyad441dd2016-09-15 15:50:46 -07003013#define IMPL_DEF1_MICRO_MMU_CTRL 0
3014#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3015#define MICRO_MMU_CTRL_IDLE (1 << 3)
3016
3017/* Definitions for implementation-defined registers */
3018#define ACTLR_QCOM_OSH_SHIFT 28
3019#define ACTLR_QCOM_OSH 1
3020
3021#define ACTLR_QCOM_ISH_SHIFT 29
3022#define ACTLR_QCOM_ISH 1
3023
3024#define ACTLR_QCOM_NSH_SHIFT 30
3025#define ACTLR_QCOM_NSH 1
3026
3027static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003028{
3029 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003030 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003031
3032 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3033 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3034 0, 30000)) {
3035 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3036 return -EBUSY;
3037 }
3038
3039 return 0;
3040}
3041
Patrick Dalyad441dd2016-09-15 15:50:46 -07003042static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003043{
3044 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3045 u32 reg;
3046
3047 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3048 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3049 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3050
Patrick Dalyad441dd2016-09-15 15:50:46 -07003051 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003052}
3053
Patrick Dalyad441dd2016-09-15 15:50:46 -07003054static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003055{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003056 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003057}
3058
Patrick Dalyad441dd2016-09-15 15:50:46 -07003059static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003060{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003061 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003062}
3063
Patrick Dalyad441dd2016-09-15 15:50:46 -07003064static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003065{
3066 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3067 u32 reg;
3068
3069 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3070 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3071 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3072}
3073
Patrick Dalyad441dd2016-09-15 15:50:46 -07003074static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003075{
3076 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003077 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003078 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003079 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003080
Patrick Dalyad441dd2016-09-15 15:50:46 -07003081 /*
3082 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3083 * to prevent table walks with an inconsistent state.
3084 */
3085 for (i = 0; i < smmu->num_context_banks; ++i) {
3086 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3087 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3088 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3089 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3090 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3091 }
3092
3093 /* Program implementation defined registers */
3094 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003095 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3096 writel_relaxed(regs[i].value,
3097 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003098 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003099}
3100
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003101static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3102 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003103{
3104 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3105 struct arm_smmu_device *smmu = smmu_domain->smmu;
3106 int ret;
3107 phys_addr_t phys = 0;
3108 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003109 u32 sctlr, sctlr_orig, fsr;
3110 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003111
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003112 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003113 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003114 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003115
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003116 spin_lock_irqsave(&smmu->atos_lock, flags);
3117 cb_base = ARM_SMMU_CB_BASE(smmu) +
3118 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003119
3120 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003121 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003122 qsmmuv2_wait_for_halt(smmu);
3123
3124 /* clear FSR to allow ATOS to log any faults */
3125 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3126 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3127
3128 /* disable stall mode momentarily */
3129 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3130 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3131 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3132
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003133 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003134
3135 /* restore SCTLR */
3136 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3137
3138 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003139 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3140
3141 arm_smmu_power_off(smmu_domain->smmu->pwr);
3142 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003143}
3144
3145struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3146 .device_reset = qsmmuv2_device_reset,
3147 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003148};
3149
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003150static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003151{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003152 int i;
3153 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003154 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003155 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003156
Peng Fan3ca37122016-05-03 21:50:30 +08003157 /*
3158 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3159 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3160 * bit is only present in MMU-500r2 onwards.
3161 */
3162 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3163 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3164 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3165 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3166 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3167 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3168 }
3169
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003170 /* Make sure all context banks are disabled and clear CB_FSR */
3171 for (i = 0; i < smmu->num_context_banks; ++i) {
3172 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3173 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3174 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003175 /*
3176 * Disable MMU-500's not-particularly-beneficial next-page
3177 * prefetcher for the sake of errata #841119 and #826419.
3178 */
3179 if (smmu->model == ARM_MMU500) {
3180 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3181 reg &= ~ARM_MMU500_ACTLR_CPRE;
3182 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3183 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003184 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003185}
3186
3187static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3188{
3189 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003190 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003191 u32 reg;
3192
3193 /* clear global FSR */
3194 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3195 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3196
Robin Murphy468f4942016-09-12 17:13:49 +01003197 /*
3198 * Reset stream mapping groups: Initial values mark all SMRn as
3199 * invalid and all S2CRn as bypass unless overridden.
3200 */
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003201 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
Robin Murphya754fd12016-09-12 17:13:50 +01003202 for (i = 0; i < smmu->num_mapping_groups; ++i)
3203 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003204
3205 arm_smmu_context_bank_reset(smmu);
3206 }
Will Deacon1463fe42013-07-31 19:21:27 +01003207
Will Deacon45ae7cf2013-06-24 18:31:25 +01003208 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003209 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3210 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3211
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003212 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003213
Will Deacon45ae7cf2013-06-24 18:31:25 +01003214 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003215 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003216
3217 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003218 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003219
Robin Murphy25a1c962016-02-10 14:25:33 +00003220 /* Enable client access, handling unmatched streams as appropriate */
3221 reg &= ~sCR0_CLIENTPD;
3222 if (disable_bypass)
3223 reg |= sCR0_USFCFG;
3224 else
3225 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003226
3227 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003228 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003229
3230 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003231 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003232
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003233 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3234 reg |= sCR0_VMID16EN;
3235
Will Deacon45ae7cf2013-06-24 18:31:25 +01003236 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003237 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003238 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003239
3240 /* Manage any implementation defined features */
3241 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003242}
3243
3244static int arm_smmu_id_size_to_bits(int size)
3245{
3246 switch (size) {
3247 case 0:
3248 return 32;
3249 case 1:
3250 return 36;
3251 case 2:
3252 return 40;
3253 case 3:
3254 return 42;
3255 case 4:
3256 return 44;
3257 case 5:
3258 default:
3259 return 48;
3260 }
3261}
3262
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003263static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3264{
3265 struct device *dev = smmu->dev;
3266 int i, ntuples, ret;
3267 u32 *tuples;
3268 struct arm_smmu_impl_def_reg *regs, *regit;
3269
3270 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3271 return 0;
3272
3273 ntuples /= sizeof(u32);
3274 if (ntuples % 2) {
3275 dev_err(dev,
3276 "Invalid number of attach-impl-defs registers: %d\n",
3277 ntuples);
3278 return -EINVAL;
3279 }
3280
3281 regs = devm_kmalloc(
3282 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3283 GFP_KERNEL);
3284 if (!regs)
3285 return -ENOMEM;
3286
3287 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3288 if (!tuples)
3289 return -ENOMEM;
3290
3291 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3292 tuples, ntuples);
3293 if (ret)
3294 return ret;
3295
3296 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3297 regit->offset = tuples[i];
3298 regit->value = tuples[i + 1];
3299 }
3300
3301 devm_kfree(dev, tuples);
3302
3303 smmu->impl_def_attach_registers = regs;
3304 smmu->num_impl_def_attach_registers = ntuples / 2;
3305
3306 return 0;
3307}
3308
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003309
3310static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003311{
3312 const char *cname;
3313 struct property *prop;
3314 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003315 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003316
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003317 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003318 of_property_count_strings(dev->of_node, "clock-names");
3319
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003320 if (pwr->num_clocks < 1) {
3321 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003322 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003323 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003324
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003325 pwr->clocks = devm_kzalloc(
3326 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003327 GFP_KERNEL);
3328
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003329 if (!pwr->clocks)
3330 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003331
3332 i = 0;
3333 of_property_for_each_string(dev->of_node, "clock-names",
3334 prop, cname) {
3335 struct clk *c = devm_clk_get(dev, cname);
3336
3337 if (IS_ERR(c)) {
3338 dev_err(dev, "Couldn't get clock: %s",
3339 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003340 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003341 }
3342
3343 if (clk_get_rate(c) == 0) {
3344 long rate = clk_round_rate(c, 1000);
3345
3346 clk_set_rate(c, rate);
3347 }
3348
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003349 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003350
3351 ++i;
3352 }
3353 return 0;
3354}
3355
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003356static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003357{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003358 const char *cname;
3359 struct property *prop;
3360 int i, ret = 0;
3361 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003362
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003363 pwr->num_gdscs =
3364 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3365
3366 if (pwr->num_gdscs < 1) {
3367 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003368 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003369 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003370
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003371 pwr->gdscs = devm_kzalloc(
3372 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3373
3374 if (!pwr->gdscs)
3375 return -ENOMEM;
3376
3377 i = 0;
3378 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3379 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07003380 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003381
3382 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3383 return ret;
3384}
3385
3386static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3387{
3388 struct device *dev = pwr->dev;
3389
3390 /* We don't want the bus APIs to print an error message */
3391 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3392 dev_dbg(dev, "No bus scaling info\n");
3393 return 0;
3394 }
3395
3396 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3397 if (!pwr->bus_dt_data) {
3398 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3399 return -EINVAL;
3400 }
3401
3402 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3403 if (!pwr->bus_client) {
3404 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003405 return -EINVAL;
3406 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003407
3408 return 0;
3409}
3410
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003411/*
3412 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3413 */
3414static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3415 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003416{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003417 struct arm_smmu_power_resources *pwr;
3418 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003419
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003420 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3421 if (!pwr)
3422 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003423
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003424 pwr->dev = &pdev->dev;
3425 pwr->pdev = pdev;
3426 mutex_init(&pwr->power_lock);
3427 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003428
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003429 ret = arm_smmu_init_clocks(pwr);
3430 if (ret)
3431 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003432
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003433 ret = arm_smmu_init_regulators(pwr);
3434 if (ret)
3435 return ERR_PTR(ret);
3436
3437 ret = arm_smmu_init_bus_scaling(pwr);
3438 if (ret)
3439 return ERR_PTR(ret);
3440
3441 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003442}
3443
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003444/*
Patrick Dalyabeee952017-04-13 18:14:59 -07003445 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003446 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003447static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003448{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003449 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003450}
3451
Will Deacon45ae7cf2013-06-24 18:31:25 +01003452static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3453{
3454 unsigned long size;
3455 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3456 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003457 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003458 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003459
Mitchel Humpherysba822582015-10-20 11:37:41 -07003460 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3461 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003462 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003463
3464 /* ID0 */
3465 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003466
3467 /* Restrict available stages based on module parameter */
3468 if (force_stage == 1)
3469 id &= ~(ID0_S2TS | ID0_NTS);
3470 else if (force_stage == 2)
3471 id &= ~(ID0_S1TS | ID0_NTS);
3472
Will Deacon45ae7cf2013-06-24 18:31:25 +01003473 if (id & ID0_S1TS) {
3474 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003475 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003476 }
3477
3478 if (id & ID0_S2TS) {
3479 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003480 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003481 }
3482
3483 if (id & ID0_NTS) {
3484 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003485 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003486 }
3487
3488 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003489 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003490 dev_err(smmu->dev, "\tno translation support!\n");
3491 return -ENODEV;
3492 }
3493
Robin Murphyb7862e32016-04-13 18:13:03 +01003494 if ((id & ID0_S1TS) &&
3495 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003496 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003497 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003498 }
3499
Robin Murphybae2c2d2015-07-29 19:46:05 +01003500 /*
3501 * In order for DMA API calls to work properly, we must defer to what
3502 * the DT says about coherency, regardless of what the hardware claims.
3503 * Fortunately, this also opens up a workaround for systems where the
3504 * ID register value has ended up configured incorrectly.
3505 */
3506 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3507 cttw_reg = !!(id & ID0_CTTW);
3508 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003509 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003510 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003511 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003512 cttw_dt ? "" : "non-");
3513 if (cttw_dt != cttw_reg)
3514 dev_notice(smmu->dev,
3515 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003516
Robin Murphy53867802016-09-12 17:13:48 +01003517 /* Max. number of entries we have for stream matching/indexing */
3518 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3519 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003520 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003521 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003522
3523 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003524 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3525 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003526 dev_err(smmu->dev,
3527 "stream-matching supported, but no SMRs present!\n");
3528 return -ENODEV;
3529 }
3530
Robin Murphy53867802016-09-12 17:13:48 +01003531 /*
3532 * SMR.ID bits may not be preserved if the corresponding MASK
3533 * bits are set, so check each one separately. We can reject
3534 * masters later if they try to claim IDs outside these masks.
3535 */
3536 smr = smmu->streamid_mask << SMR_ID_SHIFT;
3537 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3538 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3539 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003540
Robin Murphy53867802016-09-12 17:13:48 +01003541 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
3542 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3543 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3544 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003545
Robin Murphy468f4942016-09-12 17:13:49 +01003546 /* Zero-initialised to mark as invalid */
3547 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3548 GFP_KERNEL);
3549 if (!smmu->smrs)
3550 return -ENOMEM;
3551
Robin Murphy53867802016-09-12 17:13:48 +01003552 dev_notice(smmu->dev,
3553 "\tstream matching with %lu register groups, mask 0x%x",
3554 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003555 }
Robin Murphya754fd12016-09-12 17:13:50 +01003556 /* s2cr->type == 0 means translation, so initialise explicitly */
3557 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3558 GFP_KERNEL);
3559 if (!smmu->s2crs)
3560 return -ENOMEM;
3561 for (i = 0; i < size; i++)
3562 smmu->s2crs[i] = s2cr_init_val;
3563
Robin Murphy53867802016-09-12 17:13:48 +01003564 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003565 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003566
Robin Murphy7602b872016-04-28 17:12:09 +01003567 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3568 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3569 if (!(id & ID0_PTFS_NO_AARCH32S))
3570 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3571 }
3572
Will Deacon45ae7cf2013-06-24 18:31:25 +01003573 /* ID1 */
3574 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003575 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003576
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003577 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003578 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003579 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003580 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003581 dev_warn(smmu->dev,
3582 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3583 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003584
Will Deacon518f7132014-11-14 17:17:54 +00003585 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003586 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3587 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3588 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3589 return -ENODEV;
3590 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003591 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003592 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003593 /*
3594 * Cavium CN88xx erratum #27704.
3595 * Ensure ASID and VMID allocation is unique across all SMMUs in
3596 * the system.
3597 */
3598 if (smmu->model == CAVIUM_SMMUV2) {
3599 smmu->cavium_id_base =
3600 atomic_add_return(smmu->num_context_banks,
3601 &cavium_smmu_context_count);
3602 smmu->cavium_id_base -= smmu->num_context_banks;
3603 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003604
3605 /* ID2 */
3606 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3607 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003608 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003609
Will Deacon518f7132014-11-14 17:17:54 +00003610 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003611 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003612 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003613
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003614 if (id & ID2_VMID16)
3615 smmu->features |= ARM_SMMU_FEAT_VMID16;
3616
Robin Murphyf1d84542015-03-04 16:41:05 +00003617 /*
3618 * What the page table walker can address actually depends on which
3619 * descriptor format is in use, but since a) we don't know that yet,
3620 * and b) it can vary per context bank, this will have to do...
3621 */
3622 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3623 dev_warn(smmu->dev,
3624 "failed to set DMA mask for table walker\n");
3625
Robin Murphyb7862e32016-04-13 18:13:03 +01003626 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003627 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003628 if (smmu->version == ARM_SMMU_V1_64K)
3629 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003630 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003631 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003632 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003633 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003634 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003635 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003636 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003637 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003638 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003639 }
3640
Robin Murphy7602b872016-04-28 17:12:09 +01003641 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003642 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003643 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003644 if (smmu->features &
3645 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003646 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003647 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003648 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003649 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003650 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003651
Robin Murphyd5466352016-05-09 17:20:09 +01003652 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3653 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3654 else
3655 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003656 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003657 smmu->pgsize_bitmap);
3658
Will Deacon518f7132014-11-14 17:17:54 +00003659
Will Deacon28d60072014-09-01 16:24:48 +01003660 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003661 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3662 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003663
3664 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003665 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3666 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003667
Will Deacon45ae7cf2013-06-24 18:31:25 +01003668 return 0;
3669}
3670
Patrick Dalyd7476202016-09-08 18:23:28 -07003671static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
3672{
3673 if (!smmu->arch_ops)
3674 return 0;
3675 if (!smmu->arch_ops->init)
3676 return 0;
3677 return smmu->arch_ops->init(smmu);
3678}
3679
3680static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
3681{
3682 if (!smmu->arch_ops)
3683 return;
3684 if (!smmu->arch_ops->device_reset)
3685 return;
3686 return smmu->arch_ops->device_reset(smmu);
3687}
3688
Robin Murphy67b65a32016-04-13 18:12:57 +01003689struct arm_smmu_match_data {
3690 enum arm_smmu_arch_version version;
3691 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003692 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01003693};
3694
Patrick Dalyd7476202016-09-08 18:23:28 -07003695#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
3696static struct arm_smmu_match_data name = { \
3697.version = ver, \
3698.model = imp, \
3699.arch_ops = ops, \
3700} \
Robin Murphy67b65a32016-04-13 18:12:57 +01003701
Patrick Daly1f8a2882016-09-12 17:32:05 -07003702struct arm_smmu_arch_ops qsmmuv500_arch_ops;
3703
Patrick Dalyd7476202016-09-08 18:23:28 -07003704ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
3705ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
3706ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
3707ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
3708ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003709ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07003710ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
3711 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01003712
Joerg Roedel09b52692014-10-02 12:24:45 +02003713static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003714 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3715 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3716 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003717 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003718 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003719 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003720 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07003721 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01003722 { },
3723};
3724MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3725
Patrick Dalyc47dcd42017-02-09 23:09:57 -08003726
3727static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
3728{
3729 if (!dev->iommu_fwspec)
3730 of_iommu_configure(dev, dev->of_node);
3731 return 0;
3732}
3733
Patrick Daly000a2f22017-02-13 22:18:12 -08003734static int arm_smmu_add_device_fixup(struct device *dev, void *data)
3735{
3736 struct iommu_ops *ops = data;
3737
3738 ops->add_device(dev);
3739 return 0;
3740}
3741
Patrick Daly1f8a2882016-09-12 17:32:05 -07003742static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003743static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3744{
Robin Murphy67b65a32016-04-13 18:12:57 +01003745 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003746 struct resource *res;
3747 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003748 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01003749 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01003750 bool legacy_binding;
3751
3752 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
3753 if (legacy_binding && !using_generic_binding) {
3754 if (!using_legacy_binding)
3755 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
3756 using_legacy_binding = true;
3757 } else if (!legacy_binding && !using_legacy_binding) {
3758 using_generic_binding = true;
3759 } else {
3760 dev_err(dev, "not probing due to mismatched DT properties\n");
3761 return -ENODEV;
3762 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003763
3764 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3765 if (!smmu) {
3766 dev_err(dev, "failed to allocate arm_smmu_device\n");
3767 return -ENOMEM;
3768 }
3769 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003770 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003771 idr_init(&smmu->asid_idr);
3772 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003773
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003774 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01003775 smmu->version = data->version;
3776 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003777 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01003778
Will Deacon45ae7cf2013-06-24 18:31:25 +01003779 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003780 smmu->base = devm_ioremap_resource(dev, res);
3781 if (IS_ERR(smmu->base))
3782 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003783 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003784
3785 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3786 &smmu->num_global_irqs)) {
3787 dev_err(dev, "missing #global-interrupts property\n");
3788 return -ENODEV;
3789 }
3790
3791 num_irqs = 0;
3792 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3793 num_irqs++;
3794 if (num_irqs > smmu->num_global_irqs)
3795 smmu->num_context_irqs++;
3796 }
3797
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003798 if (!smmu->num_context_irqs) {
3799 dev_err(dev, "found %d interrupts but expected at least %d\n",
3800 num_irqs, smmu->num_global_irqs + 1);
3801 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003802 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003803
3804 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3805 GFP_KERNEL);
3806 if (!smmu->irqs) {
3807 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3808 return -ENOMEM;
3809 }
3810
3811 for (i = 0; i < num_irqs; ++i) {
3812 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003813
Will Deacon45ae7cf2013-06-24 18:31:25 +01003814 if (irq < 0) {
3815 dev_err(dev, "failed to get irq index %d\n", i);
3816 return -ENODEV;
3817 }
3818 smmu->irqs[i] = irq;
3819 }
3820
Dhaval Patel031d7462015-05-09 14:47:29 -07003821 parse_driver_options(smmu);
3822
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003823 smmu->pwr = arm_smmu_init_power_resources(pdev);
3824 if (IS_ERR(smmu->pwr))
3825 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003826
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003827 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003828 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003829 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003830
3831 err = arm_smmu_device_cfg_probe(smmu);
3832 if (err)
3833 goto out_power_off;
3834
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003835 err = arm_smmu_parse_impl_def_registers(smmu);
3836 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003837 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003838
Robin Murphyb7862e32016-04-13 18:13:03 +01003839 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003840 smmu->num_context_banks != smmu->num_context_irqs) {
3841 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08003842 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
3843 smmu->num_context_irqs, smmu->num_context_banks,
3844 smmu->num_context_banks);
3845 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003846 }
3847
Will Deacon45ae7cf2013-06-24 18:31:25 +01003848 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003849 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3850 NULL, arm_smmu_global_fault,
3851 IRQF_ONESHOT | IRQF_SHARED,
3852 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003853 if (err) {
3854 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3855 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01003856 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003857 }
3858 }
3859
Patrick Dalyd7476202016-09-08 18:23:28 -07003860 err = arm_smmu_arch_init(smmu);
3861 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003862 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07003863
Robin Murphy06e393e2016-09-12 17:13:55 +01003864 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003865 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01003866 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003867 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07003868
Patrick Daly8e3371a2017-02-13 22:14:53 -08003869 INIT_LIST_HEAD(&smmu->list);
3870 spin_lock(&arm_smmu_devices_lock);
3871 list_add(&smmu->list, &arm_smmu_devices);
3872 spin_unlock(&arm_smmu_devices_lock);
3873
Patrick Dalyc47dcd42017-02-09 23:09:57 -08003874 /* bus_set_iommu depends on this. */
3875 bus_for_each_dev(&platform_bus_type, NULL, NULL,
3876 arm_smmu_of_iommu_configure_fixup);
3877
Robin Murphy7e96c742016-09-14 15:26:46 +01003878 /* Oh, for a proper bus abstraction */
3879 if (!iommu_present(&platform_bus_type))
3880 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08003881 else
3882 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
3883 arm_smmu_add_device_fixup);
Robin Murphy7e96c742016-09-14 15:26:46 +01003884#ifdef CONFIG_ARM_AMBA
3885 if (!iommu_present(&amba_bustype))
3886 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
3887#endif
3888#ifdef CONFIG_PCI
3889 if (!iommu_present(&pci_bus_type)) {
3890 pci_request_acs();
3891 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
3892 }
3893#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003894 return 0;
3895
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003896out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003897 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003898
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003899out_exit_power_resources:
3900 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003901
Will Deacon45ae7cf2013-06-24 18:31:25 +01003902 return err;
3903}
3904
3905static int arm_smmu_device_remove(struct platform_device *pdev)
3906{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003907 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003908
3909 if (!smmu)
3910 return -ENODEV;
3911
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003912 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003913 return -EINVAL;
3914
Will Deaconecfadb62013-07-31 19:21:28 +01003915 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003916 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003917
Patrick Dalyc190d932016-08-30 17:23:28 -07003918 idr_destroy(&smmu->asid_idr);
3919
Will Deacon45ae7cf2013-06-24 18:31:25 +01003920 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003921 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003922 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003923
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003924 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003925
Will Deacon45ae7cf2013-06-24 18:31:25 +01003926 return 0;
3927}
3928
Will Deacon45ae7cf2013-06-24 18:31:25 +01003929static struct platform_driver arm_smmu_driver = {
3930 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003931 .name = "arm-smmu",
3932 .of_match_table = of_match_ptr(arm_smmu_of_match),
3933 },
3934 .probe = arm_smmu_device_dt_probe,
3935 .remove = arm_smmu_device_remove,
3936};
3937
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08003938static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003939static int __init arm_smmu_init(void)
3940{
Robin Murphy7e96c742016-09-14 15:26:46 +01003941 static bool registered;
3942 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003943
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08003944 if (registered)
3945 return 0;
3946
3947 ret = platform_driver_register(&qsmmuv500_tbu_driver);
3948 if (ret)
3949 return ret;
3950
3951 ret = platform_driver_register(&arm_smmu_driver);
3952 registered = !ret;
Robin Murphy7e96c742016-09-14 15:26:46 +01003953 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003954}
3955
3956static void __exit arm_smmu_exit(void)
3957{
3958 return platform_driver_unregister(&arm_smmu_driver);
3959}
3960
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003961subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003962module_exit(arm_smmu_exit);
3963
Robin Murphy7e96c742016-09-14 15:26:46 +01003964static int __init arm_smmu_of_init(struct device_node *np)
3965{
3966 int ret = arm_smmu_init();
3967
3968 if (ret)
3969 return ret;
3970
3971 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
3972 return -ENODEV;
3973
3974 return 0;
3975}
3976IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
3977IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
3978IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
3979IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
3980IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
3981IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01003982
Patrick Dalya0fddb62017-03-27 19:26:59 -07003983#define TCU_HW_VERSION_HLOS1 (0x18)
3984
Patrick Daly1f8a2882016-09-12 17:32:05 -07003985#define DEBUG_SID_HALT_REG 0x0
3986#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07003987#define DEBUG_SID_HALT_SID_MASK 0x3ff
3988
3989#define DEBUG_VA_ADDR_REG 0x8
3990
3991#define DEBUG_TXN_TRIGG_REG 0x18
3992#define DEBUG_TXN_AXPROT_SHIFT 6
3993#define DEBUG_TXN_AXCACHE_SHIFT 2
3994#define DEBUG_TRX_WRITE (0x1 << 1)
3995#define DEBUG_TXN_READ (0x0 << 1)
3996#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07003997
3998#define DEBUG_SR_HALT_ACK_REG 0x20
3999#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004000#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4001
4002#define DEBUG_PAR_REG 0x28
4003#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4004#define DEBUG_PAR_PA_SHIFT 12
4005#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004006
4007#define TBU_DBG_TIMEOUT_US 30000
4008
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004009struct qsmmuv500_archdata {
4010 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004011 void __iomem *tcu_base;
4012 u32 version;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004013};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004014#define get_qsmmuv500_archdata(smmu) \
4015 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004016
Patrick Daly1f8a2882016-09-12 17:32:05 -07004017struct qsmmuv500_tbu_device {
4018 struct list_head list;
4019 struct device *dev;
4020 struct arm_smmu_device *smmu;
4021 void __iomem *base;
4022 void __iomem *status_reg;
4023
4024 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004025 u32 sid_start;
4026 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004027
4028 /* Protects halt count */
4029 spinlock_t halt_lock;
4030 u32 halt_count;
4031};
4032
4033static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
4034{
4035 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004036 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004037 int ret = 0;
4038
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004039 list_for_each_entry(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004040 ret = arm_smmu_power_on(tbu->pwr);
4041 if (ret)
4042 break;
4043 }
4044 if (!ret)
4045 return 0;
4046
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004047 list_for_each_entry_continue_reverse(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004048 arm_smmu_power_off(tbu->pwr);
4049 }
4050 return ret;
4051}
4052
4053static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
4054{
4055 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004056 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004057
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004058 list_for_each_entry_reverse(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004059 arm_smmu_power_off(tbu->pwr);
4060 }
4061}
4062
4063static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
4064{
4065 unsigned long flags;
4066 u32 val;
4067 void __iomem *base;
4068
4069 spin_lock_irqsave(&tbu->halt_lock, flags);
4070 if (tbu->halt_count) {
4071 tbu->halt_count++;
4072 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4073 return 0;
4074 }
4075
4076 base = tbu->base;
4077 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4078 val |= DEBUG_SID_HALT_VAL;
4079 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4080
4081 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
4082 val, (val & DEBUG_SR_HALT_ACK_VAL),
4083 0, TBU_DBG_TIMEOUT_US)) {
4084 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4085 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4086 return -ETIMEDOUT;
4087 }
4088
4089 tbu->halt_count = 1;
4090 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4091 return 0;
4092}
4093
4094static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4095{
4096 unsigned long flags;
4097 u32 val;
4098 void __iomem *base;
4099
4100 spin_lock_irqsave(&tbu->halt_lock, flags);
4101 if (!tbu->halt_count) {
4102 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4103 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4104 return;
4105
4106 } else if (tbu->halt_count > 1) {
4107 tbu->halt_count--;
4108 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4109 return;
4110 }
4111
4112 base = tbu->base;
4113 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4114 val &= ~DEBUG_SID_HALT_VAL;
4115 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4116
4117 tbu->halt_count = 0;
4118 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4119}
4120
4121static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
4122{
4123 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004124 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004125 int ret = 0;
4126
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004127 list_for_each_entry(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004128 ret = qsmmuv500_tbu_halt(tbu);
4129 if (ret)
4130 break;
4131 }
4132
4133 if (!ret)
4134 return 0;
4135
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004136 list_for_each_entry_continue_reverse(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004137 qsmmuv500_tbu_resume(tbu);
4138 }
4139 return ret;
4140}
4141
4142static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
4143{
4144 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004145 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004146
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004147 list_for_each_entry(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004148 qsmmuv500_tbu_resume(tbu);
4149 }
4150}
4151
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004152static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4153 struct arm_smmu_device *smmu, u32 sid)
4154{
4155 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004156 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004157
4158 list_for_each_entry(tbu, &data->tbus, list) {
4159 if (tbu->sid_start <= sid &&
4160 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004161 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004162 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004163 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004164}
4165
Patrick Daly1f8a2882016-09-12 17:32:05 -07004166static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
4167{
4168 int i, ret;
4169 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
4170
4171 ret = qsmmuv500_tbu_power_on_all(smmu);
4172 if (ret)
4173 return;
4174
4175 /* Program implementation defined registers */
4176 qsmmuv500_halt_all(smmu);
4177 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
4178 writel_relaxed(regs[i].value,
4179 ARM_SMMU_GR0(smmu) + regs[i].offset);
4180 qsmmuv500_resume_all(smmu);
4181 qsmmuv500_tbu_power_off_all(smmu);
4182}
4183
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004184static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
4185 struct qsmmuv500_tbu_device *tbu,
4186 unsigned long *flags)
4187{
4188 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004189 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004190 u32 val;
4191
4192 spin_lock_irqsave(&smmu->atos_lock, *flags);
4193 /* The status register is not accessible on version 1.0 */
4194 if (data->version == 0x01000000)
4195 return 0;
4196
4197 if (readl_poll_timeout_atomic(tbu->status_reg,
4198 val, (val == 0x1), 0,
4199 TBU_DBG_TIMEOUT_US)) {
4200 dev_err(tbu->dev, "ECATS hw busy!\n");
4201 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4202 return -ETIMEDOUT;
4203 }
4204
4205 return 0;
4206}
4207
4208static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
4209 struct qsmmuv500_tbu_device *tbu,
4210 unsigned long *flags)
4211{
4212 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004213 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004214
4215 /* The status register is not accessible on version 1.0 */
4216 if (data->version != 0x01000000)
4217 writel_relaxed(0, tbu->status_reg);
4218 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4219}
4220
4221/*
4222 * Zero means failure.
4223 */
4224static phys_addr_t qsmmuv500_iova_to_phys(
4225 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
4226{
4227 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4228 struct arm_smmu_device *smmu = smmu_domain->smmu;
4229 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4230 struct qsmmuv500_tbu_device *tbu;
4231 int ret;
4232 phys_addr_t phys = 0;
4233 u64 val, fsr;
4234 unsigned long flags;
4235 void __iomem *cb_base;
4236 u32 sctlr_orig, sctlr;
4237 int needs_redo = 0;
4238
4239 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4240 tbu = qsmmuv500_find_tbu(smmu, sid);
4241 if (!tbu)
4242 return 0;
4243
4244 ret = arm_smmu_power_on(tbu->pwr);
4245 if (ret)
4246 return 0;
4247
4248 /*
4249 * Disable client transactions & wait for existing operations to
4250 * complete.
4251 */
4252 ret = qsmmuv500_tbu_halt(tbu);
4253 if (ret)
4254 goto out_power_off;
4255
4256 /* Only one concurrent atos operation */
4257 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
4258 if (ret)
4259 goto out_resume;
4260
4261 /*
4262 * We can be called from an interrupt handler with FSR already set
4263 * so terminate the faulting transaction prior to starting ecats.
4264 * No new racing faults can occur since we in the halted state.
4265 * ECATS can trigger the fault interrupt, so disable it temporarily
4266 * and check for an interrupt manually.
4267 */
4268 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4269 if (fsr & FSR_FAULT) {
4270 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4271 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4272 }
4273 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4274 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4275 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4276
4277redo:
4278 /* Set address and stream-id */
4279 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
4280 val |= sid & DEBUG_SID_HALT_SID_MASK;
4281 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
4282 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
4283
4284 /*
4285 * Write-back Read and Write-Allocate
4286 * Priviledged, nonsecure, data transaction
4287 * Read operation.
4288 */
4289 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
4290 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
4291 val |= DEBUG_TXN_TRIGGER;
4292 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
4293
4294 ret = 0;
4295 if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG,
4296 val, !(val & DEBUG_SR_ECATS_RUNNING_VAL),
4297 0, TBU_DBG_TIMEOUT_US)) {
4298 dev_err(tbu->dev, "ECATS translation timed out!\n");
4299 }
4300
4301 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4302 if (fsr & FSR_FAULT) {
4303 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
4304 val);
4305 ret = -EINVAL;
4306
4307 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
4308 /*
4309 * Clear pending interrupts
4310 * Barrier required to ensure that the FSR is cleared
4311 * before resuming SMMU operation
4312 */
4313 wmb();
4314 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4315 }
4316
4317 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
4318 if (val & DEBUG_PAR_FAULT_VAL) {
4319 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
4320 val);
4321 ret = -EINVAL;
4322 }
4323
4324 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
4325 if (ret < 0)
4326 phys = 0;
4327
4328 /* Reset hardware */
4329 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
4330 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
4331
4332 /*
4333 * After a failed translation, the next successful translation will
4334 * incorrectly be reported as a failure.
4335 */
4336 if (!phys && needs_redo++ < 2)
4337 goto redo;
4338
4339 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4340 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
4341
4342out_resume:
4343 qsmmuv500_tbu_resume(tbu);
4344
4345out_power_off:
4346 arm_smmu_power_off(tbu->pwr);
4347
4348 return phys;
4349}
4350
4351static phys_addr_t qsmmuv500_iova_to_phys_hard(
4352 struct iommu_domain *domain, dma_addr_t iova)
4353{
4354 u16 sid;
4355 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4356 struct iommu_fwspec *fwspec;
4357
4358 /* Select a sid */
4359 fwspec = smmu_domain->dev->iommu_fwspec;
4360 sid = (u16)fwspec->ids[0];
4361
4362 return qsmmuv500_iova_to_phys(domain, iova, sid);
4363}
4364
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004365static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004366{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004367 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004368 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004369 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004370
4371 if (!dev->driver) {
4372 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4373 return -EINVAL;
4374 }
4375
4376 tbu = dev_get_drvdata(dev);
4377
4378 INIT_LIST_HEAD(&tbu->list);
4379 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004380 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004381 return 0;
4382}
4383
4384static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4385{
Patrick Dalya0fddb62017-03-27 19:26:59 -07004386 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004387 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004388 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004389 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004390 int ret;
4391
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004392 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
4393 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004394 return -ENOMEM;
4395
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004396 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07004397
4398 pdev = container_of(dev, struct platform_device, dev);
4399 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
4400 data->tcu_base = devm_ioremap_resource(dev, res);
4401 if (IS_ERR(data->tcu_base))
4402 return PTR_ERR(data->tcu_base);
4403
4404 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004405 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004406
4407 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4408 if (ret)
4409 return ret;
4410
4411 /* Attempt to register child devices */
4412 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
4413 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07004414 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004415
4416 return 0;
4417}
4418
4419struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
4420 .init = qsmmuv500_arch_init,
4421 .device_reset = qsmmuv500_device_reset,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004422 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly1f8a2882016-09-12 17:32:05 -07004423};
4424
4425static const struct of_device_id qsmmuv500_tbu_of_match[] = {
4426 {.compatible = "qcom,qsmmuv500-tbu"},
4427 {}
4428};
4429
4430static int qsmmuv500_tbu_probe(struct platform_device *pdev)
4431{
4432 struct resource *res;
4433 struct device *dev = &pdev->dev;
4434 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004435 const __be32 *cell;
4436 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004437
4438 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
4439 if (!tbu)
4440 return -ENOMEM;
4441
4442 INIT_LIST_HEAD(&tbu->list);
4443 tbu->dev = dev;
4444 spin_lock_init(&tbu->halt_lock);
4445
4446 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
4447 tbu->base = devm_ioremap_resource(dev, res);
4448 if (IS_ERR(tbu->base))
4449 return PTR_ERR(tbu->base);
4450
4451 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
4452 tbu->status_reg = devm_ioremap_resource(dev, res);
4453 if (IS_ERR(tbu->status_reg))
4454 return PTR_ERR(tbu->status_reg);
4455
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004456 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
4457 if (!cell || len < 8)
4458 return -EINVAL;
4459
4460 tbu->sid_start = of_read_number(cell, 1);
4461 tbu->num_sids = of_read_number(cell + 1, 1);
4462
Patrick Daly1f8a2882016-09-12 17:32:05 -07004463 tbu->pwr = arm_smmu_init_power_resources(pdev);
4464 if (IS_ERR(tbu->pwr))
4465 return PTR_ERR(tbu->pwr);
4466
4467 dev_set_drvdata(dev, tbu);
4468 return 0;
4469}
4470
4471static struct platform_driver qsmmuv500_tbu_driver = {
4472 .driver = {
4473 .name = "qsmmuv500-tbu",
4474 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
4475 },
4476 .probe = qsmmuv500_tbu_probe,
4477};
4478
Will Deacon45ae7cf2013-06-24 18:31:25 +01004479MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
4480MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
4481MODULE_LICENSE("GPL v2");