blob: c704c47c54cbc34e807f041c4adedf826313cee0 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070050#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070051#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070052#include <linux/msm-bus.h>
53#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55#include <linux/amba/bus.h>
56
Will Deacon518f7132014-11-14 17:17:54 +000057#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* Maximum number of context banks per SMMU */
60#define ARM_SMMU_MAX_CBS 128
61
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* SMMU global address space */
63#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010064#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000066/*
67 * SMMU global address space with conditional offset to access secure
68 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 * nsGFSYNR0: 0x450)
70 */
71#define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu)->base + \
73 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
74 ? 0x400 : 0))
75
Robin Murphyf9a05f02016-04-13 18:13:01 +010076/*
77 * Some 64-bit registers only make sense to write atomically, but in such
78 * cases all the data relevant to AArch32 formats lies within the lower word,
79 * therefore this actually makes more sense than it might first appear.
80 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#endif
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087/* Configuration registers */
88#define ARM_SMMU_GR0_sCR0 0x0
89#define sCR0_CLIENTPD (1 << 0)
90#define sCR0_GFRE (1 << 1)
91#define sCR0_GFIE (1 << 2)
92#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100131#define ID0_NUMSMRG_SHIFT 0
132#define ID0_NUMSMRG_MASK 0xff
133
134#define ID1_PAGESIZE (1 << 31)
135#define ID1_NUMPAGENDXB_SHIFT 28
136#define ID1_NUMPAGENDXB_MASK 7
137#define ID1_NUMS2CB_SHIFT 16
138#define ID1_NUMS2CB_MASK 0xff
139#define ID1_NUMCB_SHIFT 0
140#define ID1_NUMCB_MASK 0xff
141
142#define ID2_OAS_SHIFT 4
143#define ID2_OAS_MASK 0xf
144#define ID2_IAS_SHIFT 0
145#define ID2_IAS_MASK 0xf
146#define ID2_UBS_SHIFT 8
147#define ID2_UBS_MASK 0xf
148#define ID2_PTFS_4K (1 << 12)
149#define ID2_PTFS_16K (1 << 13)
150#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800151#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152
Peng Fan3ca37122016-05-03 21:50:30 +0800153#define ID7_MAJOR_SHIFT 4
154#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157#define ARM_SMMU_GR0_TLBIVMID 0x64
158#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
159#define ARM_SMMU_GR0_TLBIALLH 0x6c
160#define ARM_SMMU_GR0_sTLBGSYNC 0x70
161#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
162#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800163#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
165/* Stream mapping registers */
166#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
167#define SMR_VALID (1 << 31)
168#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100169#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170
171#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
172#define S2CR_CBNDX_SHIFT 0
173#define S2CR_CBNDX_MASK 0xff
174#define S2CR_TYPE_SHIFT 16
175#define S2CR_TYPE_MASK 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100176enum arm_smmu_s2cr_type {
177 S2CR_TYPE_TRANS,
178 S2CR_TYPE_BYPASS,
179 S2CR_TYPE_FAULT,
180};
181
182#define S2CR_PRIVCFG_SHIFT 24
183#define S2CR_PRIVCFG_MASK 0x3
184enum arm_smmu_s2cr_privcfg {
185 S2CR_PRIVCFG_DEFAULT,
186 S2CR_PRIVCFG_DIPAN,
187 S2CR_PRIVCFG_UNPRIV,
188 S2CR_PRIVCFG_PRIV,
189};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100190
191/* Context bank attribute registers */
192#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
193#define CBAR_VMID_SHIFT 0
194#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000195#define CBAR_S1_BPSHCFG_SHIFT 8
196#define CBAR_S1_BPSHCFG_MASK 3
197#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100198#define CBAR_S1_MEMATTR_SHIFT 12
199#define CBAR_S1_MEMATTR_MASK 0xf
200#define CBAR_S1_MEMATTR_WB 0xf
201#define CBAR_TYPE_SHIFT 16
202#define CBAR_TYPE_MASK 0x3
203#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
204#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
205#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
206#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
207#define CBAR_IRPTNDX_SHIFT 24
208#define CBAR_IRPTNDX_MASK 0xff
209
Shalaj Jain04059c52015-03-03 13:34:59 -0800210#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
211#define CBFRSYNRA_SID_MASK (0xffff)
212
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
214#define CBA2R_RW64_32BIT (0 << 0)
215#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800216#define CBA2R_VMID_SHIFT 16
217#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218
219/* Translation context bank */
220#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100221#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222
223#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100224#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_RESUME 0x8
226#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100227#define ARM_SMMU_CB_TTBR0 0x20
228#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600230#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100231#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100233#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100234#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700235#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100236#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000238#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100239#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000240#define ARM_SMMU_CB_S1_TLBIVAL 0x620
241#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
242#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700243#define ARM_SMMU_CB_TLBSYNC 0x7f0
244#define ARM_SMMU_CB_TLBSTATUS 0x7f4
245#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100246#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000247#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248
249#define SCTLR_S1_ASIDPNE (1 << 12)
250#define SCTLR_CFCFG (1 << 7)
251#define SCTLR_CFIE (1 << 6)
252#define SCTLR_CFRE (1 << 5)
253#define SCTLR_E (1 << 4)
254#define SCTLR_AFE (1 << 2)
255#define SCTLR_TRE (1 << 1)
256#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100258#define ARM_MMU500_ACTLR_CPRE (1 << 1)
259
Peng Fan3ca37122016-05-03 21:50:30 +0800260#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
261
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700262#define ARM_SMMU_IMPL_DEF0(smmu) \
263 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
264#define ARM_SMMU_IMPL_DEF1(smmu) \
265 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000266#define CB_PAR_F (1 << 0)
267
268#define ATSR_ACTIVE (1 << 0)
269
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270#define RESUME_RETRY (0 << 0)
271#define RESUME_TERMINATE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100274#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100276#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
278#define FSR_MULTI (1 << 31)
279#define FSR_SS (1 << 30)
280#define FSR_UUT (1 << 8)
281#define FSR_ASF (1 << 7)
282#define FSR_TLBLKF (1 << 6)
283#define FSR_TLBMCF (1 << 5)
284#define FSR_EF (1 << 4)
285#define FSR_PF (1 << 3)
286#define FSR_AFF (1 << 2)
287#define FSR_TF (1 << 1)
288
Mitchel Humpherys29073202014-07-08 09:52:18 -0700289#define FSR_IGN (FSR_AFF | FSR_ASF | \
290 FSR_TLBMCF | FSR_TLBLKF)
291#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100292 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293
294#define FSYNR0_WNR (1 << 4)
295
Will Deacon4cf740b2014-07-14 19:47:39 +0100296static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000297module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100298MODULE_PARM_DESC(force_stage,
299 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800300static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000301module_param(disable_bypass, bool, S_IRUGO);
302MODULE_PARM_DESC(disable_bypass,
303 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100304
Robin Murphy09360402014-08-28 17:51:59 +0100305enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100306 ARM_SMMU_V1,
307 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100308 ARM_SMMU_V2,
309};
310
Robin Murphy67b65a32016-04-13 18:12:57 +0100311enum arm_smmu_implementation {
312 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100313 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100314 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700315 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700316 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100317};
318
Patrick Dalyd7476202016-09-08 18:23:28 -0700319struct arm_smmu_device;
320struct arm_smmu_arch_ops {
321 int (*init)(struct arm_smmu_device *smmu);
322 void (*device_reset)(struct arm_smmu_device *smmu);
323 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
324 dma_addr_t iova);
325 void (*iova_to_phys_fault)(struct iommu_domain *domain,
326 dma_addr_t iova, phys_addr_t *phys1,
327 phys_addr_t *phys_post_tlbiall);
328};
329
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700330struct arm_smmu_impl_def_reg {
331 u32 offset;
332 u32 value;
333};
334
Robin Murphya754fd12016-09-12 17:13:50 +0100335struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100336 struct iommu_group *group;
337 int count;
Robin Murphya754fd12016-09-12 17:13:50 +0100338 enum arm_smmu_s2cr_type type;
339 enum arm_smmu_s2cr_privcfg privcfg;
340 u8 cbndx;
341};
342
343#define s2cr_init_val (struct arm_smmu_s2cr){ \
344 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
345}
346
Will Deacon45ae7cf2013-06-24 18:31:25 +0100347struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100348 u16 mask;
349 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100350 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351};
352
Will Deacona9a1b0b2014-05-01 18:05:08 +0100353struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100354 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100355 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100356};
Robin Murphy468f4942016-09-12 17:13:49 +0100357#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100358#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
359#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000360#define fwspec_smendx(fw, i) \
361 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100362#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000363 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100364
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700365/*
366 * Describes resources required for on/off power operation.
367 * Separate reference count is provided for atomic/nonatomic
368 * operations.
369 */
370struct arm_smmu_power_resources {
371 struct platform_device *pdev;
372 struct device *dev;
373
374 struct clk **clocks;
375 int num_clocks;
376
377 struct regulator_bulk_data *gdscs;
378 int num_gdscs;
379
380 uint32_t bus_client;
381 struct msm_bus_scale_pdata *bus_dt_data;
382
383 /* Protects power_count */
384 struct mutex power_lock;
385 int power_count;
386
387 /* Protects clock_refs_count */
388 spinlock_t clock_refs_lock;
389 int clock_refs_count;
390};
391
Will Deacon45ae7cf2013-06-24 18:31:25 +0100392struct arm_smmu_device {
393 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394
395 void __iomem *base;
396 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100397 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100398
399#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
400#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
401#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
402#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
403#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000404#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800405#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100406#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
407#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
408#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
409#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
410#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000412
413#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800414#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800415#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700416#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000417 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100418 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100419 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100420
421 u32 num_context_banks;
422 u32 num_s2_context_banks;
423 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
424 atomic_t irptndx;
425
426 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100427 u16 streamid_mask;
428 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100429 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100430 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100431 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100432
Will Deacon518f7132014-11-14 17:17:54 +0000433 unsigned long va_size;
434 unsigned long ipa_size;
435 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100436 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100437
438 u32 num_global_irqs;
439 u32 num_context_irqs;
440 unsigned int *irqs;
441
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800442 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700443 /* Specific to QCOM */
444 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
445 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800446
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700447 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700448
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800449 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700450
451 /* protects idr */
452 struct mutex idr_mutex;
453 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700454
455 struct arm_smmu_arch_ops *arch_ops;
456 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100457};
458
Robin Murphy7602b872016-04-28 17:12:09 +0100459enum arm_smmu_context_fmt {
460 ARM_SMMU_CTX_FMT_NONE,
461 ARM_SMMU_CTX_FMT_AARCH64,
462 ARM_SMMU_CTX_FMT_AARCH32_L,
463 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100464};
465
466struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100467 u8 cbndx;
468 u8 irptndx;
469 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600470 u32 procid;
471 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100472 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100473};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100474#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600475#define INVALID_CBNDX 0xff
476#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700477/*
478 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
479 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
480 */
481#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100482
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600483#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800484#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100485
Will Deaconc752ce42014-06-25 22:46:31 +0100486enum arm_smmu_domain_stage {
487 ARM_SMMU_DOMAIN_S1 = 0,
488 ARM_SMMU_DOMAIN_S2,
489 ARM_SMMU_DOMAIN_NESTED,
490};
491
Patrick Dalyc11d1082016-09-01 15:52:44 -0700492struct arm_smmu_pte_info {
493 void *virt_addr;
494 size_t size;
495 struct list_head entry;
496};
497
Will Deacon45ae7cf2013-06-24 18:31:25 +0100498struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100499 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000500 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700501 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000502 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100503 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100504 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000505 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700506 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700507 u32 secure_vmid;
508 struct list_head pte_info_list;
509 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700510 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700511 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100512 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100513};
514
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000515struct arm_smmu_option_prop {
516 u32 opt;
517 const char *prop;
518};
519
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800520static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
521
Robin Murphy7e96c742016-09-14 15:26:46 +0100522static bool using_legacy_binding, using_generic_binding;
523
Mitchel Humpherys29073202014-07-08 09:52:18 -0700524static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000525 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800526 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800527 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700528 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000529 { 0, NULL},
530};
531
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800532static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
533 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700534static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
535 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600536static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800537
Patrick Dalyc11d1082016-09-01 15:52:44 -0700538static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
539static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700540static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700541static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
542
Patrick Dalyd7476202016-09-08 18:23:28 -0700543static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
544static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
545
Joerg Roedel1d672632015-03-26 13:43:10 +0100546static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
547{
548 return container_of(dom, struct arm_smmu_domain, domain);
549}
550
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000551static void parse_driver_options(struct arm_smmu_device *smmu)
552{
553 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700554
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000555 do {
556 if (of_property_read_bool(smmu->dev->of_node,
557 arm_smmu_options[i].prop)) {
558 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700559 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000560 arm_smmu_options[i].prop);
561 }
562 } while (arm_smmu_options[++i].opt);
563}
564
Patrick Dalyc190d932016-08-30 17:23:28 -0700565static bool is_dynamic_domain(struct iommu_domain *domain)
566{
567 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
568
569 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
570}
571
Patrick Dalye271f212016-10-04 13:24:49 -0700572static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
573{
574 return (smmu_domain->secure_vmid != VMID_INVAL);
575}
576
577static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
578{
579 if (arm_smmu_is_domain_secure(smmu_domain))
580 mutex_lock(&smmu_domain->assign_lock);
581}
582
583static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
584{
585 if (arm_smmu_is_domain_secure(smmu_domain))
586 mutex_unlock(&smmu_domain->assign_lock);
587}
588
Will Deacon8f68f8e2014-07-15 11:27:08 +0100589static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100590{
591 if (dev_is_pci(dev)) {
592 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700593
Will Deacona9a1b0b2014-05-01 18:05:08 +0100594 while (!pci_is_root_bus(bus))
595 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100596 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100597 }
598
Robin Murphyd5b41782016-09-14 15:21:39 +0100599 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100600}
601
Robin Murphyd5b41782016-09-14 15:21:39 +0100602static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100603{
Robin Murphyd5b41782016-09-14 15:21:39 +0100604 *((__be32 *)data) = cpu_to_be32(alias);
605 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100606}
607
Robin Murphyd5b41782016-09-14 15:21:39 +0100608static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100609{
Robin Murphyd5b41782016-09-14 15:21:39 +0100610 struct of_phandle_iterator *it = *(void **)data;
611 struct device_node *np = it->node;
612 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100613
Robin Murphyd5b41782016-09-14 15:21:39 +0100614 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
615 "#stream-id-cells", 0)
616 if (it->node == np) {
617 *(void **)data = dev;
618 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700619 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100620 it->node = np;
621 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100622}
623
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100624static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100625static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100626
Robin Murphy06e393e2016-09-12 17:13:55 +0100627static int arm_smmu_register_legacy_master(struct device *dev,
628 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100629{
Robin Murphy06e393e2016-09-12 17:13:55 +0100630 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100631 struct device_node *np;
632 struct of_phandle_iterator it;
633 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100634 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100635 __be32 pci_sid;
636 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100637
Stephen Boydfecdeef2017-03-01 16:53:19 -0800638 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100639 np = dev_get_dev_node(dev);
640 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
641 of_node_put(np);
642 return -ENODEV;
643 }
644
645 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100646 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
647 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100648 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100649 of_node_put(np);
650 if (err == 0)
651 return -ENODEV;
652 if (err < 0)
653 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100654
Robin Murphyd5b41782016-09-14 15:21:39 +0100655 if (dev_is_pci(dev)) {
656 /* "mmu-masters" assumes Stream ID == Requester ID */
657 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
658 &pci_sid);
659 it.cur = &pci_sid;
660 it.cur_count = 1;
661 }
662
Robin Murphy06e393e2016-09-12 17:13:55 +0100663 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
664 &arm_smmu_ops);
665 if (err)
666 return err;
667
668 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
669 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100670 return -ENOMEM;
671
Robin Murphy06e393e2016-09-12 17:13:55 +0100672 *smmu = dev_get_drvdata(smmu_dev);
673 of_phandle_iterator_args(&it, sids, it.cur_count);
674 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
675 kfree(sids);
676 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100677}
678
679static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
680{
681 int idx;
682
683 do {
684 idx = find_next_zero_bit(map, end, start);
685 if (idx == end)
686 return -ENOSPC;
687 } while (test_and_set_bit(idx, map));
688
689 return idx;
690}
691
692static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
693{
694 clear_bit(idx, map);
695}
696
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700697static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700698{
699 int i, ret = 0;
700
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700701 for (i = 0; i < pwr->num_clocks; ++i) {
702 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700703 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700704 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700705 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700706 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700707 break;
708 }
709 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700710 return ret;
711}
712
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700713static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700714{
715 int i;
716
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700717 for (i = pwr->num_clocks; i; --i)
718 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700719}
720
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700721static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700722{
723 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700724
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700725 for (i = 0; i < pwr->num_clocks; ++i) {
726 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700727 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700728 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700729 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700730 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700731 break;
732 }
733 }
734
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700735 return ret;
736}
Patrick Daly8befb662016-08-17 20:03:28 -0700737
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700738static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
739{
740 int i;
741
742 for (i = pwr->num_clocks; i; --i)
743 clk_disable(pwr->clocks[i - 1]);
744}
745
746static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
747{
748 if (!pwr->bus_client)
749 return 0;
750 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
751}
752
753static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
754{
755 if (!pwr->bus_client)
756 return;
757 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
758}
759
760/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
761static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
762{
763 int ret = 0;
764 unsigned long flags;
765
766 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
767 if (pwr->clock_refs_count > 0) {
768 pwr->clock_refs_count++;
769 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
770 return 0;
771 }
772
773 ret = arm_smmu_enable_clocks(pwr);
774 if (!ret)
775 pwr->clock_refs_count = 1;
776
777 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700778 return ret;
779}
780
781/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700782static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700783{
Patrick Daly8befb662016-08-17 20:03:28 -0700784 unsigned long flags;
785
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700786 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
787 if (pwr->clock_refs_count == 0) {
788 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
789 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
790 return;
791
792 } else if (pwr->clock_refs_count > 1) {
793 pwr->clock_refs_count--;
794 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700795 return;
796 }
797
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700798 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700799
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700800 pwr->clock_refs_count = 0;
801 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700802}
803
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700804static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700805{
806 int ret;
807
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700808 mutex_lock(&pwr->power_lock);
809 if (pwr->power_count > 0) {
810 pwr->power_count += 1;
811 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700812 return 0;
813 }
814
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700815 ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700816 if (ret)
817 goto out_unlock;
818
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700819 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700820 if (ret)
821 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700822
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700823 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700824 if (ret)
825 goto out_disable_bus;
826
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700827 pwr->power_count = 1;
828 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700829 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700830
Patrick Daly2764f952016-09-06 19:22:44 -0700831out_disable_bus:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700832 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700833out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700834 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700835out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700836 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700837 return ret;
838}
839
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700840static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700841{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700842 mutex_lock(&pwr->power_lock);
843 if (pwr->power_count == 0) {
844 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
845 mutex_unlock(&pwr->power_lock);
846 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700847
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700848 } else if (pwr->power_count > 1) {
849 pwr->power_count--;
850 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700851 return;
852 }
853
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700854 arm_smmu_unprepare_clocks(pwr);
855 arm_smmu_unrequest_bus(pwr);
856 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700857
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700858 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700859}
860
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700861static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700862{
863 int ret;
864
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700865 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700866 if (ret)
867 return ret;
868
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700869 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700870 if (ret)
871 goto out_disable;
872
873 return 0;
874
875out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700876 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700877 return ret;
878}
879
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700880static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700881{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700882 arm_smmu_power_off_atomic(pwr);
883 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700884}
885
886/*
887 * Must be used instead of arm_smmu_power_on if it may be called from
888 * atomic context
889 */
890static int arm_smmu_domain_power_on(struct iommu_domain *domain,
891 struct arm_smmu_device *smmu)
892{
893 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
894 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
895
896 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700897 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700898
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700899 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700900}
901
902/*
903 * Must be used instead of arm_smmu_power_on if it may be called from
904 * atomic context
905 */
906static void arm_smmu_domain_power_off(struct iommu_domain *domain,
907 struct arm_smmu_device *smmu)
908{
909 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
910 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
911
912 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700913 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700914 return;
915 }
916
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700917 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700918}
919
Will Deacon45ae7cf2013-06-24 18:31:25 +0100920/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700921static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
922 int cbndx)
923{
924 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
925 u32 val;
926
927 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
928 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
929 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700930 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700931 dev_err(smmu->dev, "TLBSYNC timeout!\n");
932}
933
Will Deacon518f7132014-11-14 17:17:54 +0000934static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100935{
936 int count = 0;
937 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
938
939 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
940 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
941 & sTLBGSTATUS_GSACTIVE) {
942 cpu_relax();
943 if (++count == TLB_LOOP_TIMEOUT) {
944 dev_err_ratelimited(smmu->dev,
945 "TLB sync timed out -- SMMU may be deadlocked\n");
946 return;
947 }
948 udelay(1);
949 }
950}
951
Will Deacon518f7132014-11-14 17:17:54 +0000952static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100953{
Will Deacon518f7132014-11-14 17:17:54 +0000954 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700955 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000956}
957
Patrick Daly8befb662016-08-17 20:03:28 -0700958/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000959static void arm_smmu_tlb_inv_context(void *cookie)
960{
961 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100962 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
963 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100964 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000965 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100966
967 if (stage1) {
968 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800969 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100970 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700971 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100972 } else {
973 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800974 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100975 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700976 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100977 }
Will Deacon1463fe42013-07-31 19:21:27 +0100978}
979
Will Deacon518f7132014-11-14 17:17:54 +0000980static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000981 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000982{
983 struct arm_smmu_domain *smmu_domain = cookie;
984 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
985 struct arm_smmu_device *smmu = smmu_domain->smmu;
986 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
987 void __iomem *reg;
988
989 if (stage1) {
990 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
991 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
992
Robin Murphy7602b872016-04-28 17:12:09 +0100993 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000994 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800995 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000996 do {
997 writel_relaxed(iova, reg);
998 iova += granule;
999 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001000 } else {
1001 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001002 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001003 do {
1004 writeq_relaxed(iova, reg);
1005 iova += granule >> 12;
1006 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001007 }
Will Deacon518f7132014-11-14 17:17:54 +00001008 } else if (smmu->version == ARM_SMMU_V2) {
1009 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1010 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1011 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001012 iova >>= 12;
1013 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001014 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001015 iova += granule >> 12;
1016 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001017 } else {
1018 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001019 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001020 }
1021}
1022
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001023struct arm_smmu_secure_pool_chunk {
1024 void *addr;
1025 size_t size;
1026 struct list_head list;
1027};
1028
1029static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1030 size_t size)
1031{
1032 struct arm_smmu_secure_pool_chunk *it;
1033
1034 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1035 if (it->size == size) {
1036 void *addr = it->addr;
1037
1038 list_del(&it->list);
1039 kfree(it);
1040 return addr;
1041 }
1042 }
1043
1044 return NULL;
1045}
1046
1047static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1048 void *addr, size_t size)
1049{
1050 struct arm_smmu_secure_pool_chunk *chunk;
1051
1052 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1053 if (!chunk)
1054 return -ENOMEM;
1055
1056 chunk->addr = addr;
1057 chunk->size = size;
1058 memset(addr, 0, size);
1059 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1060
1061 return 0;
1062}
1063
1064static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1065{
1066 struct arm_smmu_secure_pool_chunk *it, *i;
1067
1068 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1069 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1070 /* pages will be freed later (after being unassigned) */
1071 kfree(it);
1072 }
1073}
1074
Patrick Dalyc11d1082016-09-01 15:52:44 -07001075static void *arm_smmu_alloc_pages_exact(void *cookie,
1076 size_t size, gfp_t gfp_mask)
1077{
1078 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001079 void *page;
1080 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001081
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001082 if (!arm_smmu_is_domain_secure(smmu_domain))
1083 return alloc_pages_exact(size, gfp_mask);
1084
1085 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1086 if (page)
1087 return page;
1088
1089 page = alloc_pages_exact(size, gfp_mask);
1090 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001091 ret = arm_smmu_prepare_pgtable(page, cookie);
1092 if (ret) {
1093 free_pages_exact(page, size);
1094 return NULL;
1095 }
1096 }
1097
1098 return page;
1099}
1100
1101static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1102{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001103 struct arm_smmu_domain *smmu_domain = cookie;
1104
1105 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1106 free_pages_exact(virt, size);
1107 return;
1108 }
1109
1110 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1111 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001112}
1113
Will Deacon518f7132014-11-14 17:17:54 +00001114static struct iommu_gather_ops arm_smmu_gather_ops = {
1115 .tlb_flush_all = arm_smmu_tlb_inv_context,
1116 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1117 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001118 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1119 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001120};
1121
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001122static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1123 dma_addr_t iova, u32 fsr)
1124{
1125 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001126 struct arm_smmu_device *smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001127 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001128 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001129
1130 smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001131
Patrick Dalyad441dd2016-09-15 15:50:46 -07001132 if (smmu->arch_ops && smmu->arch_ops->iova_to_phys_fault) {
1133 smmu->arch_ops->iova_to_phys_fault(domain, iova, &phys,
1134 &phys_post_tlbiall);
1135 } else {
1136 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001137 arm_smmu_tlb_inv_context(smmu_domain);
Patrick Dalyad441dd2016-09-15 15:50:46 -07001138 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001139 }
1140
Patrick Dalyad441dd2016-09-15 15:50:46 -07001141 if (phys != phys_post_tlbiall) {
1142 dev_err(smmu->dev,
1143 "ATOS results differed across TLBIALL...\n"
1144 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1145 }
1146 if (!phys_post_tlbiall) {
1147 dev_err(smmu->dev,
1148 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1149 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001150
Patrick Dalyad441dd2016-09-15 15:50:46 -07001151 return phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001152}
1153
Will Deacon45ae7cf2013-06-24 18:31:25 +01001154static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1155{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001156 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001157 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001158 unsigned long iova;
1159 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001160 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001161 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1162 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001163 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001164 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001165 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001166 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001167 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001168 bool non_fatal_fault = !!(smmu_domain->attributes &
1169 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001170
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001171 static DEFINE_RATELIMIT_STATE(_rs,
1172 DEFAULT_RATELIMIT_INTERVAL,
1173 DEFAULT_RATELIMIT_BURST);
1174
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001175 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001176 if (ret)
1177 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001178
Shalaj Jain04059c52015-03-03 13:34:59 -08001179 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001180 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001181 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1182
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001183 if (!(fsr & FSR_FAULT)) {
1184 ret = IRQ_NONE;
1185 goto out_power_off;
1186 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001187
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001188 if (fatal_asf && (fsr & FSR_ASF)) {
1189 dev_err(smmu->dev,
1190 "Took an address size fault. Refusing to recover.\n");
1191 BUG();
1192 }
1193
Will Deacon45ae7cf2013-06-24 18:31:25 +01001194 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001195 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001196 if (fsr & FSR_TF)
1197 flags |= IOMMU_FAULT_TRANSLATION;
1198 if (fsr & FSR_PF)
1199 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001200 if (fsr & FSR_EF)
1201 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001202 if (fsr & FSR_SS)
1203 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001204
Robin Murphyf9a05f02016-04-13 18:13:01 +01001205 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001206 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001207 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1208 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001209 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1210 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001211 dev_dbg(smmu->dev,
1212 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1213 iova, fsr, fsynr, cfg->cbndx);
1214 dev_dbg(smmu->dev,
1215 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001216 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001217 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001218 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001219 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1220 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001221 if (__ratelimit(&_rs)) {
1222 dev_err(smmu->dev,
1223 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1224 iova, fsr, fsynr, cfg->cbndx);
1225 dev_err(smmu->dev, "FAR = %016lx\n",
1226 (unsigned long)iova);
1227 dev_err(smmu->dev,
1228 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1229 fsr,
1230 (fsr & 0x02) ? "TF " : "",
1231 (fsr & 0x04) ? "AFF " : "",
1232 (fsr & 0x08) ? "PF " : "",
1233 (fsr & 0x10) ? "EF " : "",
1234 (fsr & 0x20) ? "TLBMCF " : "",
1235 (fsr & 0x40) ? "TLBLKF " : "",
1236 (fsr & 0x80) ? "MHF " : "",
1237 (fsr & 0x40000000) ? "SS " : "",
1238 (fsr & 0x80000000) ? "MULTI " : "");
1239 dev_err(smmu->dev,
1240 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001241 if (!phys_soft)
1242 dev_err(smmu->dev,
1243 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1244 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001245 dev_err(smmu->dev,
1246 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1247 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1248 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001249 ret = IRQ_NONE;
1250 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001251 if (!non_fatal_fault) {
1252 dev_err(smmu->dev,
1253 "Unhandled arm-smmu context fault!\n");
1254 BUG();
1255 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001256 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001257
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001258 /*
1259 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1260 * if stalled. This is required to keep the IOMMU client stalled on
1261 * the outstanding fault. This gives the client a chance to take any
1262 * debug action and then terminate the stalled transaction.
1263 * So, the sequence in case of stall on fault should be:
1264 * 1) Do not clear FSR or write to RESUME here
1265 * 2) Client takes any debug action
1266 * 3) Client terminates the stalled transaction and resumes the IOMMU
1267 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1268 * not before so that the fault remains outstanding. This ensures
1269 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1270 * need to be terminated.
1271 */
1272 if (tmp != -EBUSY) {
1273 /* Clear the faulting FSR */
1274 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001275
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001276 /*
1277 * Barrier required to ensure that the FSR is cleared
1278 * before resuming SMMU operation
1279 */
1280 wmb();
1281
1282 /* Retry or terminate any stalled transactions */
1283 if (fsr & FSR_SS)
1284 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1285 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001286
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001287out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001288 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001289
Patrick Daly5ba28112016-08-30 19:18:52 -07001290 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001291}
1292
1293static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1294{
1295 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1296 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001297 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001298
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001299 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001300 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001301
Will Deacon45ae7cf2013-06-24 18:31:25 +01001302 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1303 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1304 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1305 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1306
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001307 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001308 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001309 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001310 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001311
Will Deacon45ae7cf2013-06-24 18:31:25 +01001312 dev_err_ratelimited(smmu->dev,
1313 "Unexpected global fault, this could be serious\n");
1314 dev_err_ratelimited(smmu->dev,
1315 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1316 gfsr, gfsynr0, gfsynr1, gfsynr2);
1317
1318 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001319 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001320 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001321}
1322
Will Deacon518f7132014-11-14 17:17:54 +00001323static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1324 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001325{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001326 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001327 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001328 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001329 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1330 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001331 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001332
Will Deacon45ae7cf2013-06-24 18:31:25 +01001333 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001334 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1335 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001336
Will Deacon4a1c93c2015-03-04 12:21:03 +00001337 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001338 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1339 reg = CBA2R_RW64_64BIT;
1340 else
1341 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001342 /* 16-bit VMIDs live in CBA2R */
1343 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001344 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001345
Will Deacon4a1c93c2015-03-04 12:21:03 +00001346 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1347 }
1348
Will Deacon45ae7cf2013-06-24 18:31:25 +01001349 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001350 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001351 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001352 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001353
Will Deacon57ca90f2014-02-06 14:59:05 +00001354 /*
1355 * Use the weakest shareability/memory types, so they are
1356 * overridden by the ttbcr/pte.
1357 */
1358 if (stage1) {
1359 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1360 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001361 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1362 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001363 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001364 }
Will Deacon44680ee2014-06-25 11:29:12 +01001365 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001366
Will Deacon518f7132014-11-14 17:17:54 +00001367 /* TTBRs */
1368 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001369 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001370
Robin Murphyb94df6f2016-08-11 17:44:06 +01001371 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1372 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1373 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1374 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1375 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1376 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1377 } else {
1378 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1379 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1380 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1381 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1382 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1383 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1384 }
Will Deacon518f7132014-11-14 17:17:54 +00001385 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001386 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001387 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001388 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001389
Will Deacon518f7132014-11-14 17:17:54 +00001390 /* TTBCR */
1391 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001392 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1393 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1394 reg2 = 0;
1395 } else {
1396 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1397 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1398 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001399 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001400 if (smmu->version > ARM_SMMU_V1)
1401 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001402 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001403 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001405 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001406
Will Deacon518f7132014-11-14 17:17:54 +00001407 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001408 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001409 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1410 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1411 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1412 } else {
1413 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1414 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1415 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001416 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001417 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001418 }
1419
Will Deacon45ae7cf2013-06-24 18:31:25 +01001420 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001421 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalye62d3362016-03-15 18:58:28 -07001422 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1423 !stage1)
1424 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001425 if (stage1)
1426 reg |= SCTLR_S1_ASIDPNE;
1427#ifdef __BIG_ENDIAN
1428 reg |= SCTLR_E;
1429#endif
Will Deacon25724842013-08-21 13:49:53 +01001430 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001431}
1432
Patrick Dalyc190d932016-08-30 17:23:28 -07001433static int arm_smmu_init_asid(struct iommu_domain *domain,
1434 struct arm_smmu_device *smmu)
1435{
1436 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1437 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1438 bool dynamic = is_dynamic_domain(domain);
1439 int ret;
1440
1441 if (!dynamic) {
1442 cfg->asid = cfg->cbndx + 1;
1443 } else {
1444 mutex_lock(&smmu->idr_mutex);
1445 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1446 smmu->num_context_banks + 2,
1447 MAX_ASID + 1, GFP_KERNEL);
1448
1449 mutex_unlock(&smmu->idr_mutex);
1450 if (ret < 0) {
1451 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1452 ret);
1453 return ret;
1454 }
1455 cfg->asid = ret;
1456 }
1457 return 0;
1458}
1459
1460static void arm_smmu_free_asid(struct iommu_domain *domain)
1461{
1462 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1463 struct arm_smmu_device *smmu = smmu_domain->smmu;
1464 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1465 bool dynamic = is_dynamic_domain(domain);
1466
1467 if (cfg->asid == INVALID_ASID || !dynamic)
1468 return;
1469
1470 mutex_lock(&smmu->idr_mutex);
1471 idr_remove(&smmu->asid_idr, cfg->asid);
1472 mutex_unlock(&smmu->idr_mutex);
1473}
1474
Will Deacon45ae7cf2013-06-24 18:31:25 +01001475static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001476 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001477{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001478 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001479 unsigned long ias, oas;
1480 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001481 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001482 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001483 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001484 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001485 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001486 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001487
Will Deacon518f7132014-11-14 17:17:54 +00001488 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001489 if (smmu_domain->smmu)
1490 goto out_unlock;
1491
Patrick Dalyc190d932016-08-30 17:23:28 -07001492 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1493 smmu_domain->cfg.asid = INVALID_ASID;
1494
Patrick Dalyc190d932016-08-30 17:23:28 -07001495 dynamic = is_dynamic_domain(domain);
1496 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1497 dev_err(smmu->dev, "dynamic domains not supported\n");
1498 ret = -EPERM;
1499 goto out_unlock;
1500 }
1501
Will Deaconc752ce42014-06-25 22:46:31 +01001502 /*
1503 * Mapping the requested stage onto what we support is surprisingly
1504 * complicated, mainly because the spec allows S1+S2 SMMUs without
1505 * support for nested translation. That means we end up with the
1506 * following table:
1507 *
1508 * Requested Supported Actual
1509 * S1 N S1
1510 * S1 S1+S2 S1
1511 * S1 S2 S2
1512 * S1 S1 S1
1513 * N N N
1514 * N S1+S2 S2
1515 * N S2 S2
1516 * N S1 S1
1517 *
1518 * Note that you can't actually request stage-2 mappings.
1519 */
1520 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1521 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1522 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1523 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1524
Robin Murphy7602b872016-04-28 17:12:09 +01001525 /*
1526 * Choosing a suitable context format is even more fiddly. Until we
1527 * grow some way for the caller to express a preference, and/or move
1528 * the decision into the io-pgtable code where it arguably belongs,
1529 * just aim for the closest thing to the rest of the system, and hope
1530 * that the hardware isn't esoteric enough that we can't assume AArch64
1531 * support to be a superset of AArch32 support...
1532 */
1533 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1534 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001535 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1536 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1537 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1538 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1539 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001540 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1541 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1542 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1543 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1544 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1545
1546 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1547 ret = -EINVAL;
1548 goto out_unlock;
1549 }
1550
Will Deaconc752ce42014-06-25 22:46:31 +01001551 switch (smmu_domain->stage) {
1552 case ARM_SMMU_DOMAIN_S1:
1553 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1554 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001555 ias = smmu->va_size;
1556 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001557 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001558 fmt = ARM_64_LPAE_S1;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001559 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001560 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001561 ias = min(ias, 32UL);
1562 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001563 } else {
1564 fmt = ARM_V7S;
1565 ias = min(ias, 32UL);
1566 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001567 }
Will Deaconc752ce42014-06-25 22:46:31 +01001568 break;
1569 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001570 /*
1571 * We will likely want to change this if/when KVM gets
1572 * involved.
1573 */
Will Deaconc752ce42014-06-25 22:46:31 +01001574 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001575 cfg->cbar = CBAR_TYPE_S2_TRANS;
1576 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001577 ias = smmu->ipa_size;
1578 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001579 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001580 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001581 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001582 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001583 ias = min(ias, 40UL);
1584 oas = min(oas, 40UL);
1585 }
Will Deaconc752ce42014-06-25 22:46:31 +01001586 break;
1587 default:
1588 ret = -EINVAL;
1589 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001590 }
1591
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001592 if (is_fast)
1593 fmt = ARM_V8L_FAST;
1594
Patrick Dalyce6786f2016-11-09 14:19:23 -08001595 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1596 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001597
Patrick Dalyc190d932016-08-30 17:23:28 -07001598 /* Dynamic domains must set cbndx through domain attribute */
1599 if (!dynamic) {
1600 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001601 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001602 if (ret < 0)
1603 goto out_unlock;
1604 cfg->cbndx = ret;
1605 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001606 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001607 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1608 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001609 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001610 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001611 }
1612
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001613 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001614 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001615 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001616 .ias = ias,
1617 .oas = oas,
1618 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001619 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001620 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001621
Will Deacon518f7132014-11-14 17:17:54 +00001622 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001623 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1624 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001625 if (!pgtbl_ops) {
1626 ret = -ENOMEM;
1627 goto out_clear_smmu;
1628 }
1629
Patrick Dalyc11d1082016-09-01 15:52:44 -07001630 /*
1631 * assign any page table memory that might have been allocated
1632 * during alloc_io_pgtable_ops
1633 */
Patrick Dalye271f212016-10-04 13:24:49 -07001634 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001635 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001636 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001637
Robin Murphyd5466352016-05-09 17:20:09 +01001638 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001639 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001640 domain->geometry.aperture_end = (1UL << ias) - 1;
1641 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001642
Patrick Dalyc190d932016-08-30 17:23:28 -07001643 /* Assign an asid */
1644 ret = arm_smmu_init_asid(domain, smmu);
1645 if (ret)
1646 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001647
Patrick Dalyc190d932016-08-30 17:23:28 -07001648 if (!dynamic) {
1649 /* Initialise the context bank with our page table cfg */
1650 arm_smmu_init_context_bank(smmu_domain,
1651 &smmu_domain->pgtbl_cfg);
1652
1653 /*
1654 * Request context fault interrupt. Do this last to avoid the
1655 * handler seeing a half-initialised domain state.
1656 */
1657 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1658 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001659 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1660 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001661 if (ret < 0) {
1662 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1663 cfg->irptndx, irq);
1664 cfg->irptndx = INVALID_IRPTNDX;
1665 goto out_clear_smmu;
1666 }
1667 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001668 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001669 }
Will Deacon518f7132014-11-14 17:17:54 +00001670 mutex_unlock(&smmu_domain->init_mutex);
1671
1672 /* Publish page table ops for map/unmap */
1673 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001674 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001675
Will Deacon518f7132014-11-14 17:17:54 +00001676out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001677 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001678 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001679out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001680 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001681 return ret;
1682}
1683
Patrick Daly77db4f92016-10-14 15:34:10 -07001684static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1685{
1686 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1687 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1688 smmu_domain->secure_vmid = VMID_INVAL;
1689}
1690
Will Deacon45ae7cf2013-06-24 18:31:25 +01001691static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1692{
Joerg Roedel1d672632015-03-26 13:43:10 +01001693 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001694 struct arm_smmu_device *smmu = smmu_domain->smmu;
1695 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001696 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001697 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001698 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001699 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001700
Robin Murphy7e96c742016-09-14 15:26:46 +01001701 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001702 return;
1703
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001704 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001705 if (ret) {
1706 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1707 smmu);
1708 return;
1709 }
1710
Patrick Dalyc190d932016-08-30 17:23:28 -07001711 dynamic = is_dynamic_domain(domain);
1712 if (dynamic) {
1713 arm_smmu_free_asid(domain);
1714 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001715 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001716 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001717 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001718 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001719 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001720 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001721 return;
1722 }
1723
Will Deacon518f7132014-11-14 17:17:54 +00001724 /*
1725 * Disable the context bank and free the page tables before freeing
1726 * it.
1727 */
Will Deacon44680ee2014-06-25 11:29:12 +01001728 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001729 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001730
Will Deacon44680ee2014-06-25 11:29:12 +01001731 if (cfg->irptndx != INVALID_IRPTNDX) {
1732 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001733 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001734 }
1735
Markus Elfring44830b02015-11-06 18:32:41 +01001736 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001737 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001738 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001739 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001740 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001741 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001742
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001743 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001744 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001745}
1746
Joerg Roedel1d672632015-03-26 13:43:10 +01001747static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001748{
1749 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001750
Patrick Daly09801312016-08-29 17:02:52 -07001751 /* Do not support DOMAIN_DMA for now */
1752 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001753 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001754 /*
1755 * Allocate the domain and initialise some of its data structures.
1756 * We can't really do anything meaningful until we've added a
1757 * master.
1758 */
1759 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1760 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001761 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001762
Robin Murphy7e96c742016-09-14 15:26:46 +01001763 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1764 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001765 kfree(smmu_domain);
1766 return NULL;
1767 }
1768
Will Deacon518f7132014-11-14 17:17:54 +00001769 mutex_init(&smmu_domain->init_mutex);
1770 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001771 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1772 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001773 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001774 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001775 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001776
1777 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001778}
1779
Joerg Roedel1d672632015-03-26 13:43:10 +01001780static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001781{
Joerg Roedel1d672632015-03-26 13:43:10 +01001782 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001783
1784 /*
1785 * Free the domain resources. We assume that all devices have
1786 * already been detached.
1787 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001788 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001789 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001790 kfree(smmu_domain);
1791}
1792
Robin Murphy468f4942016-09-12 17:13:49 +01001793static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1794{
1795 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01001796 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01001797
1798 if (smr->valid)
1799 reg |= SMR_VALID;
1800 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1801}
1802
Robin Murphya754fd12016-09-12 17:13:50 +01001803static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1804{
1805 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1806 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1807 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1808 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1809
1810 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1811}
1812
1813static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1814{
1815 arm_smmu_write_s2cr(smmu, idx);
1816 if (smmu->smrs)
1817 arm_smmu_write_smr(smmu, idx);
1818}
1819
Robin Murphy6668f692016-09-12 17:13:54 +01001820static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01001821{
1822 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01001823 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001824
Robin Murphy6668f692016-09-12 17:13:54 +01001825 /* Stream indexing is blissfully easy */
1826 if (!smrs)
1827 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01001828
Robin Murphy6668f692016-09-12 17:13:54 +01001829 /* Validating SMRs is... less so */
1830 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1831 if (!smrs[i].valid) {
1832 /*
1833 * Note the first free entry we come across, which
1834 * we'll claim in the end if nothing else matches.
1835 */
1836 if (free_idx < 0)
1837 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01001838 continue;
1839 }
Robin Murphy6668f692016-09-12 17:13:54 +01001840 /*
1841 * If the new entry is _entirely_ matched by an existing entry,
1842 * then reuse that, with the guarantee that there also cannot
1843 * be any subsequent conflicting entries. In normal use we'd
1844 * expect simply identical entries for this case, but there's
1845 * no harm in accommodating the generalisation.
1846 */
1847 if ((mask & smrs[i].mask) == mask &&
1848 !((id ^ smrs[i].id) & ~smrs[i].mask))
1849 return i;
1850 /*
1851 * If the new entry has any other overlap with an existing one,
1852 * though, then there always exists at least one stream ID
1853 * which would cause a conflict, and we can't allow that risk.
1854 */
1855 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1856 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001857 }
1858
Robin Murphy6668f692016-09-12 17:13:54 +01001859 return free_idx;
1860}
1861
1862static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1863{
1864 if (--smmu->s2crs[idx].count)
1865 return false;
1866
1867 smmu->s2crs[idx] = s2cr_init_val;
1868 if (smmu->smrs)
1869 smmu->smrs[idx].valid = false;
1870
1871 return true;
1872}
1873
1874static int arm_smmu_master_alloc_smes(struct device *dev)
1875{
Robin Murphy06e393e2016-09-12 17:13:55 +01001876 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1877 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01001878 struct arm_smmu_device *smmu = cfg->smmu;
1879 struct arm_smmu_smr *smrs = smmu->smrs;
1880 struct iommu_group *group;
1881 int i, idx, ret;
1882
1883 mutex_lock(&smmu->stream_map_mutex);
1884 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01001885 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001886 u16 sid = fwspec->ids[i];
1887 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1888
Robin Murphy6668f692016-09-12 17:13:54 +01001889 if (idx != INVALID_SMENDX) {
1890 ret = -EEXIST;
1891 goto out_err;
1892 }
1893
Robin Murphy7e96c742016-09-14 15:26:46 +01001894 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01001895 if (ret < 0)
1896 goto out_err;
1897
1898 idx = ret;
1899 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001900 smrs[idx].id = sid;
1901 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01001902 smrs[idx].valid = true;
1903 }
1904 smmu->s2crs[idx].count++;
1905 cfg->smendx[i] = (s16)idx;
1906 }
1907
1908 group = iommu_group_get_for_dev(dev);
1909 if (!group)
1910 group = ERR_PTR(-ENOMEM);
1911 if (IS_ERR(group)) {
1912 ret = PTR_ERR(group);
1913 goto out_err;
1914 }
1915 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01001916
Will Deacon45ae7cf2013-06-24 18:31:25 +01001917 /* It worked! Now, poke the actual hardware */
Robin Murphy06e393e2016-09-12 17:13:55 +01001918 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001919 arm_smmu_write_sme(smmu, idx);
1920 smmu->s2crs[idx].group = group;
1921 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001922
Robin Murphy6668f692016-09-12 17:13:54 +01001923 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001924 return 0;
1925
Robin Murphy6668f692016-09-12 17:13:54 +01001926out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01001927 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01001928 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01001929 cfg->smendx[i] = INVALID_SMENDX;
1930 }
Robin Murphy6668f692016-09-12 17:13:54 +01001931 mutex_unlock(&smmu->stream_map_mutex);
1932 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001933}
1934
Robin Murphy06e393e2016-09-12 17:13:55 +01001935static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001936{
Robin Murphy06e393e2016-09-12 17:13:55 +01001937 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1938 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01001939 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001940
Robin Murphy6668f692016-09-12 17:13:54 +01001941 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01001942 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001943 if (arm_smmu_free_sme(smmu, idx))
1944 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01001945 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001946 }
Robin Murphy6668f692016-09-12 17:13:54 +01001947 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001948}
1949
Will Deacon45ae7cf2013-06-24 18:31:25 +01001950static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01001951 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001952{
Will Deacon44680ee2014-06-25 11:29:12 +01001953 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01001954 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1955 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1956 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01001957 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001958
Robin Murphy06e393e2016-09-12 17:13:55 +01001959 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphya754fd12016-09-12 17:13:50 +01001960 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy6668f692016-09-12 17:13:54 +01001961 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01001962
1963 s2cr[idx].type = type;
1964 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1965 s2cr[idx].cbndx = cbndx;
1966 arm_smmu_write_s2cr(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001967 }
1968
1969 return 0;
1970}
1971
Patrick Daly09801312016-08-29 17:02:52 -07001972static void arm_smmu_detach_dev(struct iommu_domain *domain,
1973 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001974{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001975 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001976 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Daly09801312016-08-29 17:02:52 -07001977 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07001978 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07001979
1980 if (dynamic)
1981 return;
1982
Patrick Daly09801312016-08-29 17:02:52 -07001983 if (!smmu) {
1984 dev_err(dev, "Domain not attached; cannot detach!\n");
1985 return;
1986 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001987
Patrick Daly8befb662016-08-17 20:03:28 -07001988 /* Remove additional vote for atomic power */
1989 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001990 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
1991 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001992 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001993}
1994
Patrick Dalye271f212016-10-04 13:24:49 -07001995static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07001996{
Patrick Dalye271f212016-10-04 13:24:49 -07001997 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001998 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
1999 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2000 int source_vmid = VMID_HLOS;
2001 struct arm_smmu_pte_info *pte_info, *temp;
2002
Patrick Dalye271f212016-10-04 13:24:49 -07002003 if (!arm_smmu_is_domain_secure(smmu_domain))
2004 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002005
Patrick Dalye271f212016-10-04 13:24:49 -07002006 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002007 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2008 PAGE_SIZE, &source_vmid, 1,
2009 dest_vmids, dest_perms, 2);
2010 if (WARN_ON(ret))
2011 break;
2012 }
2013
2014 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2015 entry) {
2016 list_del(&pte_info->entry);
2017 kfree(pte_info);
2018 }
Patrick Dalye271f212016-10-04 13:24:49 -07002019 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002020}
2021
2022static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2023{
2024 int ret;
2025 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002026 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002027 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2028 struct arm_smmu_pte_info *pte_info, *temp;
2029
Patrick Dalye271f212016-10-04 13:24:49 -07002030 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002031 return;
2032
2033 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2034 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2035 PAGE_SIZE, source_vmlist, 2,
2036 &dest_vmids, &dest_perms, 1);
2037 if (WARN_ON(ret))
2038 break;
2039 free_pages_exact(pte_info->virt_addr, pte_info->size);
2040 }
2041
2042 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2043 entry) {
2044 list_del(&pte_info->entry);
2045 kfree(pte_info);
2046 }
2047}
2048
2049static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2050{
2051 struct arm_smmu_domain *smmu_domain = cookie;
2052 struct arm_smmu_pte_info *pte_info;
2053
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002054 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002055
2056 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2057 if (!pte_info)
2058 return;
2059
2060 pte_info->virt_addr = addr;
2061 pte_info->size = size;
2062 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2063}
2064
2065static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2066{
2067 struct arm_smmu_domain *smmu_domain = cookie;
2068 struct arm_smmu_pte_info *pte_info;
2069
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002070 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002071
2072 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2073 if (!pte_info)
2074 return -ENOMEM;
2075 pte_info->virt_addr = addr;
2076 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2077 return 0;
2078}
2079
Will Deacon45ae7cf2013-06-24 18:31:25 +01002080static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2081{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002082 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002083 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002084 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002085 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002086 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002087
Robin Murphy06e393e2016-09-12 17:13:55 +01002088 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002089 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2090 return -ENXIO;
2091 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002092
Robin Murphy4f79b142016-10-17 12:06:21 +01002093 /*
2094 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2095 * domains between of_xlate() and add_device() - we have no way to cope
2096 * with that, so until ARM gets converted to rely on groups and default
2097 * domains, just say no (but more politely than by dereferencing NULL).
2098 * This should be at least a WARN_ON once that's sorted.
2099 */
2100 if (!fwspec->iommu_priv)
2101 return -ENODEV;
2102
Robin Murphy06e393e2016-09-12 17:13:55 +01002103 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002104
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002105 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002106 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002107 if (ret)
2108 return ret;
2109
Will Deacon518f7132014-11-14 17:17:54 +00002110 /* Ensure that the domain is finalised */
Robin Murphy06e393e2016-09-12 17:13:55 +01002111 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002112 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002113 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002114
Patrick Dalyc190d932016-08-30 17:23:28 -07002115 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002116 if (is_dynamic_domain(domain)) {
2117 ret = 0;
2118 goto out_power_off;
2119 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002120
Will Deacon45ae7cf2013-06-24 18:31:25 +01002121 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002122 * Sanity check the domain. We don't support domains across
2123 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002124 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002125 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002126 dev_err(dev,
2127 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002128 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002129 ret = -EINVAL;
2130 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002131 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002132
2133 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002134 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002135
2136out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002137 /*
2138 * Keep an additional vote for non-atomic power until domain is
2139 * detached
2140 */
2141 if (!ret && atomic_domain) {
2142 WARN_ON(arm_smmu_power_on(smmu->pwr));
2143 arm_smmu_power_off_atomic(smmu->pwr);
2144 }
2145
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002146 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002147
Will Deacon45ae7cf2013-06-24 18:31:25 +01002148 return ret;
2149}
2150
Will Deacon45ae7cf2013-06-24 18:31:25 +01002151static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002152 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002153{
Will Deacon518f7132014-11-14 17:17:54 +00002154 int ret;
2155 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002156 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002157 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002158
Will Deacon518f7132014-11-14 17:17:54 +00002159 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002160 return -ENODEV;
2161
Patrick Dalye271f212016-10-04 13:24:49 -07002162 arm_smmu_secure_domain_lock(smmu_domain);
2163
Will Deacon518f7132014-11-14 17:17:54 +00002164 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2165 ret = ops->map(ops, iova, paddr, size, prot);
2166 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002167
2168 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002169 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002170
Will Deacon518f7132014-11-14 17:17:54 +00002171 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002172}
2173
2174static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2175 size_t size)
2176{
Will Deacon518f7132014-11-14 17:17:54 +00002177 size_t ret;
2178 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002179 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002180 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002181
Will Deacon518f7132014-11-14 17:17:54 +00002182 if (!ops)
2183 return 0;
2184
Patrick Daly8befb662016-08-17 20:03:28 -07002185 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002186 if (ret)
2187 return ret;
2188
Patrick Dalye271f212016-10-04 13:24:49 -07002189 arm_smmu_secure_domain_lock(smmu_domain);
2190
Will Deacon518f7132014-11-14 17:17:54 +00002191 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2192 ret = ops->unmap(ops, iova, size);
2193 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002194
Patrick Daly8befb662016-08-17 20:03:28 -07002195 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002196 /*
2197 * While splitting up block mappings, we might allocate page table
2198 * memory during unmap, so the vmids needs to be assigned to the
2199 * memory here as well.
2200 */
2201 arm_smmu_assign_table(smmu_domain);
2202 /* Also unassign any pages that were free'd during unmap */
2203 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002204 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002205 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002206}
2207
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002208static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2209 struct scatterlist *sg, unsigned int nents, int prot)
2210{
2211 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002212 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002213 unsigned long flags;
2214 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2215 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2216
2217 if (!ops)
2218 return -ENODEV;
2219
Patrick Daly8befb662016-08-17 20:03:28 -07002220 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002221 if (ret)
2222 return ret;
2223
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002224 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002225 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002226 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002227
2228 if (!ret)
2229 arm_smmu_unmap(domain, iova, size);
2230
Patrick Daly8befb662016-08-17 20:03:28 -07002231 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002232 arm_smmu_assign_table(smmu_domain);
2233
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002234 return ret;
2235}
2236
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002237static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002238 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002239{
Joerg Roedel1d672632015-03-26 13:43:10 +01002240 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002241 struct arm_smmu_device *smmu = smmu_domain->smmu;
2242 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2243 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2244 struct device *dev = smmu->dev;
2245 void __iomem *cb_base;
2246 u32 tmp;
2247 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002248 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002249
2250 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2251
Robin Murphy661d9622015-05-27 17:09:34 +01002252 /* ATS1 registers can only be written atomically */
2253 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002254 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002255 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2256 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002257 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002258
2259 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2260 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002261 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002262 dev_err(dev,
2263 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2264 &iova, &phys);
2265 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002266 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002267 }
2268
Robin Murphyf9a05f02016-04-13 18:13:01 +01002269 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002270 if (phys & CB_PAR_F) {
2271 dev_err(dev, "translation fault!\n");
2272 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002273 phys = 0;
2274 } else {
2275 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002276 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002277
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002278 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002279}
2280
Will Deacon45ae7cf2013-06-24 18:31:25 +01002281static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002282 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002283{
Will Deacon518f7132014-11-14 17:17:54 +00002284 phys_addr_t ret;
2285 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002286 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002287 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002288
Will Deacon518f7132014-11-14 17:17:54 +00002289 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002290 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002291
Will Deacon518f7132014-11-14 17:17:54 +00002292 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002293 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002294 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002295
Will Deacon518f7132014-11-14 17:17:54 +00002296 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002297}
2298
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002299/*
2300 * This function can sleep, and cannot be called from atomic context. Will
2301 * power on register block if required. This restriction does not apply to the
2302 * original iova_to_phys() op.
2303 */
2304static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2305 dma_addr_t iova)
2306{
2307 phys_addr_t ret = 0;
2308 unsigned long flags;
2309 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002310
Patrick Dalyad441dd2016-09-15 15:50:46 -07002311 if (smmu_domain->smmu->arch_ops &&
2312 smmu_domain->smmu->arch_ops->iova_to_phys_hard)
2313 return smmu_domain->smmu->arch_ops->iova_to_phys_hard(
2314 domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002315
2316 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2317 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2318 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002319 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002320
2321 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2322
2323 return ret;
2324}
2325
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002326static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002327{
Will Deacond0948942014-06-24 17:30:10 +01002328 switch (cap) {
2329 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002330 /*
2331 * Return true here as the SMMU can always send out coherent
2332 * requests.
2333 */
2334 return true;
Will Deacond0948942014-06-24 17:30:10 +01002335 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002336 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002337 case IOMMU_CAP_NOEXEC:
2338 return true;
Will Deacond0948942014-06-24 17:30:10 +01002339 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002340 return false;
Will Deacond0948942014-06-24 17:30:10 +01002341 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002342}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002343
Robin Murphy7e96c742016-09-14 15:26:46 +01002344static int arm_smmu_match_node(struct device *dev, void *data)
2345{
2346 return dev->of_node == data;
2347}
2348
2349static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2350{
2351 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2352 np, arm_smmu_match_node);
2353 put_device(dev);
2354 return dev ? dev_get_drvdata(dev) : NULL;
2355}
2356
Will Deacon03edb222015-01-19 14:27:33 +00002357static int arm_smmu_add_device(struct device *dev)
2358{
Robin Murphy06e393e2016-09-12 17:13:55 +01002359 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002360 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002361 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002362 int i, ret;
2363
Robin Murphy7e96c742016-09-14 15:26:46 +01002364 if (using_legacy_binding) {
2365 ret = arm_smmu_register_legacy_master(dev, &smmu);
2366 fwspec = dev->iommu_fwspec;
2367 if (ret)
2368 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002369 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002370 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2371 if (!smmu)
2372 return -ENODEV;
2373 } else {
2374 return -ENODEV;
2375 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002376
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002377 ret = arm_smmu_power_on(smmu->pwr);
2378 if (ret)
2379 goto out_free;
2380
Robin Murphyd5b41782016-09-14 15:21:39 +01002381 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002382 for (i = 0; i < fwspec->num_ids; i++) {
2383 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002384 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002385
Robin Murphy06e393e2016-09-12 17:13:55 +01002386 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002387 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002388 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002389 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002390 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002391 if (mask & ~smmu->smr_mask_mask) {
2392 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2393 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002394 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002395 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002396 }
Will Deacon03edb222015-01-19 14:27:33 +00002397
Robin Murphy06e393e2016-09-12 17:13:55 +01002398 ret = -ENOMEM;
2399 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2400 GFP_KERNEL);
2401 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002402 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002403
2404 cfg->smmu = smmu;
2405 fwspec->iommu_priv = cfg;
2406 while (i--)
2407 cfg->smendx[i] = INVALID_SMENDX;
2408
Robin Murphy6668f692016-09-12 17:13:54 +01002409 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002410 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002411 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002412
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002413 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002414 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002415
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002416out_pwr_off:
2417 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002418out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002419 if (fwspec)
2420 kfree(fwspec->iommu_priv);
2421 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002422 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002423}
2424
Will Deacon45ae7cf2013-06-24 18:31:25 +01002425static void arm_smmu_remove_device(struct device *dev)
2426{
Robin Murphy06e393e2016-09-12 17:13:55 +01002427 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002428 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002429
Robin Murphy06e393e2016-09-12 17:13:55 +01002430 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002431 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002432
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002433 smmu = fwspec_smmu(fwspec);
2434 if (arm_smmu_power_on(smmu->pwr)) {
2435 WARN_ON(1);
2436 return;
2437 }
2438
Robin Murphy06e393e2016-09-12 17:13:55 +01002439 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002440 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002441 kfree(fwspec->iommu_priv);
2442 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002443 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002444}
2445
Joerg Roedelaf659932015-10-21 23:51:41 +02002446static struct iommu_group *arm_smmu_device_group(struct device *dev)
2447{
Robin Murphy06e393e2016-09-12 17:13:55 +01002448 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2449 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002450 struct iommu_group *group = NULL;
2451 int i, idx;
2452
Robin Murphy06e393e2016-09-12 17:13:55 +01002453 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002454 if (group && smmu->s2crs[idx].group &&
2455 group != smmu->s2crs[idx].group)
2456 return ERR_PTR(-EINVAL);
2457
2458 group = smmu->s2crs[idx].group;
2459 }
2460
2461 if (group)
2462 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02002463
2464 if (dev_is_pci(dev))
2465 group = pci_device_group(dev);
2466 else
2467 group = generic_device_group(dev);
2468
Joerg Roedelaf659932015-10-21 23:51:41 +02002469 return group;
2470}
2471
Will Deaconc752ce42014-06-25 22:46:31 +01002472static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2473 enum iommu_attr attr, void *data)
2474{
Joerg Roedel1d672632015-03-26 13:43:10 +01002475 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002476 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002477
2478 switch (attr) {
2479 case DOMAIN_ATTR_NESTING:
2480 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2481 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002482 case DOMAIN_ATTR_PT_BASE_ADDR:
2483 *((phys_addr_t *)data) =
2484 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2485 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002486 case DOMAIN_ATTR_CONTEXT_BANK:
2487 /* context bank index isn't valid until we are attached */
2488 if (smmu_domain->smmu == NULL)
2489 return -ENODEV;
2490
2491 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2492 ret = 0;
2493 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002494 case DOMAIN_ATTR_TTBR0: {
2495 u64 val;
2496 struct arm_smmu_device *smmu = smmu_domain->smmu;
2497 /* not valid until we are attached */
2498 if (smmu == NULL)
2499 return -ENODEV;
2500
2501 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2502 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2503 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2504 << (TTBRn_ASID_SHIFT);
2505 *((u64 *)data) = val;
2506 ret = 0;
2507 break;
2508 }
2509 case DOMAIN_ATTR_CONTEXTIDR:
2510 /* not valid until attached */
2511 if (smmu_domain->smmu == NULL)
2512 return -ENODEV;
2513 *((u32 *)data) = smmu_domain->cfg.procid;
2514 ret = 0;
2515 break;
2516 case DOMAIN_ATTR_PROCID:
2517 *((u32 *)data) = smmu_domain->cfg.procid;
2518 ret = 0;
2519 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002520 case DOMAIN_ATTR_DYNAMIC:
2521 *((int *)data) = !!(smmu_domain->attributes
2522 & (1 << DOMAIN_ATTR_DYNAMIC));
2523 ret = 0;
2524 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002525 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2526 *((int *)data) = !!(smmu_domain->attributes
2527 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2528 ret = 0;
2529 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002530 case DOMAIN_ATTR_S1_BYPASS:
2531 *((int *)data) = !!(smmu_domain->attributes
2532 & (1 << DOMAIN_ATTR_S1_BYPASS));
2533 ret = 0;
2534 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002535 case DOMAIN_ATTR_SECURE_VMID:
2536 *((int *)data) = smmu_domain->secure_vmid;
2537 ret = 0;
2538 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002539 case DOMAIN_ATTR_PGTBL_INFO: {
2540 struct iommu_pgtbl_info *info = data;
2541
2542 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2543 ret = -ENODEV;
2544 break;
2545 }
2546 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2547 ret = 0;
2548 break;
2549 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002550 case DOMAIN_ATTR_FAST:
2551 *((int *)data) = !!(smmu_domain->attributes
2552 & (1 << DOMAIN_ATTR_FAST));
2553 ret = 0;
2554 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002555 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2556 *((int *)data) = !!(smmu_domain->attributes &
2557 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2558 ret = 0;
2559 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002560 default:
2561 return -ENODEV;
2562 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002563 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002564}
2565
2566static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2567 enum iommu_attr attr, void *data)
2568{
Will Deacon518f7132014-11-14 17:17:54 +00002569 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002570 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002571
Will Deacon518f7132014-11-14 17:17:54 +00002572 mutex_lock(&smmu_domain->init_mutex);
2573
Will Deaconc752ce42014-06-25 22:46:31 +01002574 switch (attr) {
2575 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002576 if (smmu_domain->smmu) {
2577 ret = -EPERM;
2578 goto out_unlock;
2579 }
2580
Will Deaconc752ce42014-06-25 22:46:31 +01002581 if (*(int *)data)
2582 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2583 else
2584 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2585
Will Deacon518f7132014-11-14 17:17:54 +00002586 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002587 case DOMAIN_ATTR_PROCID:
2588 if (smmu_domain->smmu != NULL) {
2589 dev_err(smmu_domain->smmu->dev,
2590 "cannot change procid attribute while attached\n");
2591 ret = -EBUSY;
2592 break;
2593 }
2594 smmu_domain->cfg.procid = *((u32 *)data);
2595 ret = 0;
2596 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002597 case DOMAIN_ATTR_DYNAMIC: {
2598 int dynamic = *((int *)data);
2599
2600 if (smmu_domain->smmu != NULL) {
2601 dev_err(smmu_domain->smmu->dev,
2602 "cannot change dynamic attribute while attached\n");
2603 ret = -EBUSY;
2604 break;
2605 }
2606
2607 if (dynamic)
2608 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2609 else
2610 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2611 ret = 0;
2612 break;
2613 }
2614 case DOMAIN_ATTR_CONTEXT_BANK:
2615 /* context bank can't be set while attached */
2616 if (smmu_domain->smmu != NULL) {
2617 ret = -EBUSY;
2618 break;
2619 }
2620 /* ... and it can only be set for dynamic contexts. */
2621 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2622 ret = -EINVAL;
2623 break;
2624 }
2625
2626 /* this will be validated during attach */
2627 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2628 ret = 0;
2629 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002630 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2631 u32 non_fatal_faults = *((int *)data);
2632
2633 if (non_fatal_faults)
2634 smmu_domain->attributes |=
2635 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2636 else
2637 smmu_domain->attributes &=
2638 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2639 ret = 0;
2640 break;
2641 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002642 case DOMAIN_ATTR_S1_BYPASS: {
2643 int bypass = *((int *)data);
2644
2645 /* bypass can't be changed while attached */
2646 if (smmu_domain->smmu != NULL) {
2647 ret = -EBUSY;
2648 break;
2649 }
2650 if (bypass)
2651 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2652 else
2653 smmu_domain->attributes &=
2654 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2655
2656 ret = 0;
2657 break;
2658 }
Patrick Daly8befb662016-08-17 20:03:28 -07002659 case DOMAIN_ATTR_ATOMIC:
2660 {
2661 int atomic_ctx = *((int *)data);
2662
2663 /* can't be changed while attached */
2664 if (smmu_domain->smmu != NULL) {
2665 ret = -EBUSY;
2666 break;
2667 }
2668 if (atomic_ctx)
2669 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2670 else
2671 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2672 break;
2673 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002674 case DOMAIN_ATTR_SECURE_VMID:
2675 if (smmu_domain->secure_vmid != VMID_INVAL) {
2676 ret = -ENODEV;
2677 WARN(1, "secure vmid already set!");
2678 break;
2679 }
2680 smmu_domain->secure_vmid = *((int *)data);
2681 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002682 case DOMAIN_ATTR_FAST:
2683 if (*((int *)data))
2684 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2685 ret = 0;
2686 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002687 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2688 /* can't be changed while attached */
2689 if (smmu_domain->smmu != NULL) {
2690 ret = -EBUSY;
2691 break;
2692 }
2693 if (*((int *)data))
2694 smmu_domain->attributes |=
2695 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
2696 ret = 0;
2697 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002698 default:
Will Deacon518f7132014-11-14 17:17:54 +00002699 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002700 }
Will Deacon518f7132014-11-14 17:17:54 +00002701
2702out_unlock:
2703 mutex_unlock(&smmu_domain->init_mutex);
2704 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002705}
2706
Robin Murphy7e96c742016-09-14 15:26:46 +01002707static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
2708{
2709 u32 fwid = 0;
2710
2711 if (args->args_count > 0)
2712 fwid |= (u16)args->args[0];
2713
2714 if (args->args_count > 1)
2715 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
2716
2717 return iommu_fwspec_add_ids(dev, &fwid, 1);
2718}
2719
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002720static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2721 unsigned long flags)
2722{
2723 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2724 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2725 struct arm_smmu_device *smmu;
2726 void __iomem *cb_base;
2727
2728 if (!smmu_domain->smmu) {
2729 pr_err("Can't trigger faults on non-attached domains\n");
2730 return;
2731 }
2732
2733 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002734 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002735 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002736
2737 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2738 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2739 flags, cfg->cbndx);
2740 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002741 /* give the interrupt time to fire... */
2742 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002743
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002744 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002745}
2746
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002747static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2748 unsigned long offset)
2749{
2750 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2751 struct arm_smmu_device *smmu;
2752 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2753 void __iomem *cb_base;
2754 unsigned long val;
2755
2756 if (offset >= SZ_4K) {
2757 pr_err("Invalid offset: 0x%lx\n", offset);
2758 return 0;
2759 }
2760
2761 smmu = smmu_domain->smmu;
2762 if (!smmu) {
2763 WARN(1, "Can't read registers of a detached domain\n");
2764 val = 0;
2765 return val;
2766 }
2767
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002768 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002769 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002770
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002771 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2772 val = readl_relaxed(cb_base + offset);
2773
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002774 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002775 return val;
2776}
2777
2778static void arm_smmu_reg_write(struct iommu_domain *domain,
2779 unsigned long offset, unsigned long val)
2780{
2781 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2782 struct arm_smmu_device *smmu;
2783 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2784 void __iomem *cb_base;
2785
2786 if (offset >= SZ_4K) {
2787 pr_err("Invalid offset: 0x%lx\n", offset);
2788 return;
2789 }
2790
2791 smmu = smmu_domain->smmu;
2792 if (!smmu) {
2793 WARN(1, "Can't read registers of a detached domain\n");
2794 return;
2795 }
2796
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002797 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002798 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002799
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002800 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2801 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002802
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002803 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002804}
2805
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002806static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
2807{
2808 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
2809}
2810
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002811static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
2812{
2813 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2814
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002815 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002816}
2817
2818static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
2819{
2820 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2821
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002822 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002823}
2824
Will Deacon518f7132014-11-14 17:17:54 +00002825static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002826 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002827 .domain_alloc = arm_smmu_domain_alloc,
2828 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002829 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002830 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002831 .map = arm_smmu_map,
2832 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002833 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002834 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002835 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002836 .add_device = arm_smmu_add_device,
2837 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002838 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002839 .domain_get_attr = arm_smmu_domain_get_attr,
2840 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01002841 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00002842 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002843 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002844 .reg_read = arm_smmu_reg_read,
2845 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002846 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002847 .enable_config_clocks = arm_smmu_enable_config_clocks,
2848 .disable_config_clocks = arm_smmu_disable_config_clocks,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002849};
2850
Patrick Dalyad441dd2016-09-15 15:50:46 -07002851#define IMPL_DEF1_MICRO_MMU_CTRL 0
2852#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
2853#define MICRO_MMU_CTRL_IDLE (1 << 3)
2854
2855/* Definitions for implementation-defined registers */
2856#define ACTLR_QCOM_OSH_SHIFT 28
2857#define ACTLR_QCOM_OSH 1
2858
2859#define ACTLR_QCOM_ISH_SHIFT 29
2860#define ACTLR_QCOM_ISH 1
2861
2862#define ACTLR_QCOM_NSH_SHIFT 30
2863#define ACTLR_QCOM_NSH 1
2864
2865static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002866{
2867 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002868 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002869
2870 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2871 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2872 0, 30000)) {
2873 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2874 return -EBUSY;
2875 }
2876
2877 return 0;
2878}
2879
Patrick Dalyad441dd2016-09-15 15:50:46 -07002880static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002881{
2882 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2883 u32 reg;
2884
2885 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2886 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2887 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2888
Patrick Dalyad441dd2016-09-15 15:50:46 -07002889 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002890}
2891
Patrick Dalyad441dd2016-09-15 15:50:46 -07002892static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002893{
Patrick Dalyad441dd2016-09-15 15:50:46 -07002894 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002895}
2896
Patrick Dalyad441dd2016-09-15 15:50:46 -07002897static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002898{
Patrick Dalyad441dd2016-09-15 15:50:46 -07002899 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002900}
2901
Patrick Dalyad441dd2016-09-15 15:50:46 -07002902static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002903{
2904 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2905 u32 reg;
2906
2907 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2908 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2909 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2910}
2911
Patrick Dalyad441dd2016-09-15 15:50:46 -07002912static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002913{
2914 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002915 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002916 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002917 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002918
Patrick Dalyad441dd2016-09-15 15:50:46 -07002919 /*
2920 * SCTLR.M must be disabled here per ARM SMMUv2 spec
2921 * to prevent table walks with an inconsistent state.
2922 */
2923 for (i = 0; i < smmu->num_context_banks; ++i) {
2924 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2925 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2926 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2927 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2928 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
2929 }
2930
2931 /* Program implementation defined registers */
2932 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002933 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2934 writel_relaxed(regs[i].value,
2935 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002936 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002937}
2938
Patrick Dalyad441dd2016-09-15 15:50:46 -07002939static phys_addr_t __qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
2940 dma_addr_t iova, bool halt)
2941{
2942 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2943 struct arm_smmu_device *smmu = smmu_domain->smmu;
2944 int ret;
2945 phys_addr_t phys = 0;
2946 unsigned long flags;
2947
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002948 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002949 if (ret)
2950 return 0;
2951
2952 if (halt) {
2953 ret = qsmmuv2_halt(smmu);
2954 if (ret)
2955 goto out_power_off;
2956 }
2957
2958 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2959 spin_lock(&smmu->atos_lock);
2960 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
2961 spin_unlock(&smmu->atos_lock);
2962 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2963
2964 if (halt)
2965 qsmmuv2_resume(smmu);
2966
2967out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002968 arm_smmu_power_off(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002969 return phys;
2970}
2971
2972static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
2973 dma_addr_t iova)
2974{
2975 return __qsmmuv2_iova_to_phys_hard(domain, iova, true);
2976}
2977
2978static void qsmmuv2_iova_to_phys_fault(
2979 struct iommu_domain *domain,
2980 dma_addr_t iova, phys_addr_t *phys,
2981 phys_addr_t *phys_post_tlbiall)
2982{
2983 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2984 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2985 struct arm_smmu_device *smmu;
2986 void __iomem *cb_base;
2987 u64 sctlr, sctlr_orig;
2988 u32 fsr;
2989
2990 smmu = smmu_domain->smmu;
2991 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2992
2993 qsmmuv2_halt_nowait(smmu);
2994
2995 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
2996
2997 qsmmuv2_wait_for_halt(smmu);
2998
2999 /* clear FSR to allow ATOS to log any faults */
3000 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3001 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3002
3003 /* disable stall mode momentarily */
3004 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3005 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3006 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3007
3008 *phys = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
3009 arm_smmu_tlb_inv_context(smmu_domain);
3010 *phys_post_tlbiall = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
3011
3012 /* restore SCTLR */
3013 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3014
3015 qsmmuv2_resume(smmu);
3016}
3017
3018struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3019 .device_reset = qsmmuv2_device_reset,
3020 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
3021 .iova_to_phys_fault = qsmmuv2_iova_to_phys_fault,
3022};
3023
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003024static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003025{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003026 int i;
3027 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003028 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003029 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003030
Peng Fan3ca37122016-05-03 21:50:30 +08003031 /*
3032 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3033 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3034 * bit is only present in MMU-500r2 onwards.
3035 */
3036 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3037 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3038 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3039 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3040 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3041 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3042 }
3043
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003044 /* Make sure all context banks are disabled and clear CB_FSR */
3045 for (i = 0; i < smmu->num_context_banks; ++i) {
3046 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3047 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3048 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003049 /*
3050 * Disable MMU-500's not-particularly-beneficial next-page
3051 * prefetcher for the sake of errata #841119 and #826419.
3052 */
3053 if (smmu->model == ARM_MMU500) {
3054 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3055 reg &= ~ARM_MMU500_ACTLR_CPRE;
3056 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3057 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003058 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003059}
3060
3061static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3062{
3063 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003064 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003065 u32 reg;
3066
3067 /* clear global FSR */
3068 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3069 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3070
Robin Murphy468f4942016-09-12 17:13:49 +01003071 /*
3072 * Reset stream mapping groups: Initial values mark all SMRn as
3073 * invalid and all S2CRn as bypass unless overridden.
3074 */
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003075 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
Robin Murphya754fd12016-09-12 17:13:50 +01003076 for (i = 0; i < smmu->num_mapping_groups; ++i)
3077 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003078
3079 arm_smmu_context_bank_reset(smmu);
3080 }
Will Deacon1463fe42013-07-31 19:21:27 +01003081
Will Deacon45ae7cf2013-06-24 18:31:25 +01003082 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003083 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3084 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3085
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003086 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003087
Will Deacon45ae7cf2013-06-24 18:31:25 +01003088 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003089 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003090
3091 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003092 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003093
Robin Murphy25a1c962016-02-10 14:25:33 +00003094 /* Enable client access, handling unmatched streams as appropriate */
3095 reg &= ~sCR0_CLIENTPD;
3096 if (disable_bypass)
3097 reg |= sCR0_USFCFG;
3098 else
3099 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003100
3101 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003102 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003103
3104 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003105 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003106
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003107 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3108 reg |= sCR0_VMID16EN;
3109
Will Deacon45ae7cf2013-06-24 18:31:25 +01003110 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003111 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003112 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003113
3114 /* Manage any implementation defined features */
3115 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003116}
3117
3118static int arm_smmu_id_size_to_bits(int size)
3119{
3120 switch (size) {
3121 case 0:
3122 return 32;
3123 case 1:
3124 return 36;
3125 case 2:
3126 return 40;
3127 case 3:
3128 return 42;
3129 case 4:
3130 return 44;
3131 case 5:
3132 default:
3133 return 48;
3134 }
3135}
3136
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003137static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3138{
3139 struct device *dev = smmu->dev;
3140 int i, ntuples, ret;
3141 u32 *tuples;
3142 struct arm_smmu_impl_def_reg *regs, *regit;
3143
3144 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3145 return 0;
3146
3147 ntuples /= sizeof(u32);
3148 if (ntuples % 2) {
3149 dev_err(dev,
3150 "Invalid number of attach-impl-defs registers: %d\n",
3151 ntuples);
3152 return -EINVAL;
3153 }
3154
3155 regs = devm_kmalloc(
3156 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3157 GFP_KERNEL);
3158 if (!regs)
3159 return -ENOMEM;
3160
3161 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3162 if (!tuples)
3163 return -ENOMEM;
3164
3165 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3166 tuples, ntuples);
3167 if (ret)
3168 return ret;
3169
3170 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3171 regit->offset = tuples[i];
3172 regit->value = tuples[i + 1];
3173 }
3174
3175 devm_kfree(dev, tuples);
3176
3177 smmu->impl_def_attach_registers = regs;
3178 smmu->num_impl_def_attach_registers = ntuples / 2;
3179
3180 return 0;
3181}
3182
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003183
3184static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003185{
3186 const char *cname;
3187 struct property *prop;
3188 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003189 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003190
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003191 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003192 of_property_count_strings(dev->of_node, "clock-names");
3193
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003194 if (pwr->num_clocks < 1) {
3195 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003196 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003197 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003198
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003199 pwr->clocks = devm_kzalloc(
3200 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003201 GFP_KERNEL);
3202
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003203 if (!pwr->clocks)
3204 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003205
3206 i = 0;
3207 of_property_for_each_string(dev->of_node, "clock-names",
3208 prop, cname) {
3209 struct clk *c = devm_clk_get(dev, cname);
3210
3211 if (IS_ERR(c)) {
3212 dev_err(dev, "Couldn't get clock: %s",
3213 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003214 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003215 }
3216
3217 if (clk_get_rate(c) == 0) {
3218 long rate = clk_round_rate(c, 1000);
3219
3220 clk_set_rate(c, rate);
3221 }
3222
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003223 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003224
3225 ++i;
3226 }
3227 return 0;
3228}
3229
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003230static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003231{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003232 const char *cname;
3233 struct property *prop;
3234 int i, ret = 0;
3235 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003236
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003237 pwr->num_gdscs =
3238 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3239
3240 if (pwr->num_gdscs < 1) {
3241 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003242 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003243 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003244
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003245 pwr->gdscs = devm_kzalloc(
3246 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3247
3248 if (!pwr->gdscs)
3249 return -ENOMEM;
3250
3251 i = 0;
3252 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3253 prop, cname)
3254 pwr->gdscs[i].supply = cname;
3255
3256 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3257 return ret;
3258}
3259
3260static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3261{
3262 struct device *dev = pwr->dev;
3263
3264 /* We don't want the bus APIs to print an error message */
3265 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3266 dev_dbg(dev, "No bus scaling info\n");
3267 return 0;
3268 }
3269
3270 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3271 if (!pwr->bus_dt_data) {
3272 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3273 return -EINVAL;
3274 }
3275
3276 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3277 if (!pwr->bus_client) {
3278 dev_err(dev, "Bus client registration failed\n");
3279 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
3280 return -EINVAL;
3281 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003282
3283 return 0;
3284}
3285
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003286/*
3287 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3288 */
3289static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3290 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003291{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003292 struct arm_smmu_power_resources *pwr;
3293 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003294
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003295 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3296 if (!pwr)
3297 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003298
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003299 pwr->dev = &pdev->dev;
3300 pwr->pdev = pdev;
3301 mutex_init(&pwr->power_lock);
3302 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003303
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003304 ret = arm_smmu_init_clocks(pwr);
3305 if (ret)
3306 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003307
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003308 ret = arm_smmu_init_regulators(pwr);
3309 if (ret)
3310 return ERR_PTR(ret);
3311
3312 ret = arm_smmu_init_bus_scaling(pwr);
3313 if (ret)
3314 return ERR_PTR(ret);
3315
3316 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003317}
3318
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003319/*
3320 * Bus APIs are not devm-safe.
3321 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003322static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003323{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003324 msm_bus_scale_unregister_client(pwr->bus_client);
3325 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003326}
3327
Will Deacon45ae7cf2013-06-24 18:31:25 +01003328static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3329{
3330 unsigned long size;
3331 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3332 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003333 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003334 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003335
Mitchel Humpherysba822582015-10-20 11:37:41 -07003336 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3337 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003338 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003339
3340 /* ID0 */
3341 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003342
3343 /* Restrict available stages based on module parameter */
3344 if (force_stage == 1)
3345 id &= ~(ID0_S2TS | ID0_NTS);
3346 else if (force_stage == 2)
3347 id &= ~(ID0_S1TS | ID0_NTS);
3348
Will Deacon45ae7cf2013-06-24 18:31:25 +01003349 if (id & ID0_S1TS) {
3350 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003351 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003352 }
3353
3354 if (id & ID0_S2TS) {
3355 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003356 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003357 }
3358
3359 if (id & ID0_NTS) {
3360 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003361 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003362 }
3363
3364 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003365 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003366 dev_err(smmu->dev, "\tno translation support!\n");
3367 return -ENODEV;
3368 }
3369
Robin Murphyb7862e32016-04-13 18:13:03 +01003370 if ((id & ID0_S1TS) &&
3371 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003372 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003373 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003374 }
3375
Robin Murphybae2c2d2015-07-29 19:46:05 +01003376 /*
3377 * In order for DMA API calls to work properly, we must defer to what
3378 * the DT says about coherency, regardless of what the hardware claims.
3379 * Fortunately, this also opens up a workaround for systems where the
3380 * ID register value has ended up configured incorrectly.
3381 */
3382 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3383 cttw_reg = !!(id & ID0_CTTW);
3384 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003385 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003386 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003387 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003388 cttw_dt ? "" : "non-");
3389 if (cttw_dt != cttw_reg)
3390 dev_notice(smmu->dev,
3391 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003392
Robin Murphy53867802016-09-12 17:13:48 +01003393 /* Max. number of entries we have for stream matching/indexing */
3394 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3395 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003396 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003397 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003398
3399 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003400 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3401 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003402 dev_err(smmu->dev,
3403 "stream-matching supported, but no SMRs present!\n");
3404 return -ENODEV;
3405 }
3406
Robin Murphy53867802016-09-12 17:13:48 +01003407 /*
3408 * SMR.ID bits may not be preserved if the corresponding MASK
3409 * bits are set, so check each one separately. We can reject
3410 * masters later if they try to claim IDs outside these masks.
3411 */
3412 smr = smmu->streamid_mask << SMR_ID_SHIFT;
3413 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3414 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3415 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003416
Robin Murphy53867802016-09-12 17:13:48 +01003417 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
3418 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3419 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3420 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003421
Robin Murphy468f4942016-09-12 17:13:49 +01003422 /* Zero-initialised to mark as invalid */
3423 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3424 GFP_KERNEL);
3425 if (!smmu->smrs)
3426 return -ENOMEM;
3427
Robin Murphy53867802016-09-12 17:13:48 +01003428 dev_notice(smmu->dev,
3429 "\tstream matching with %lu register groups, mask 0x%x",
3430 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003431 }
Robin Murphya754fd12016-09-12 17:13:50 +01003432 /* s2cr->type == 0 means translation, so initialise explicitly */
3433 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3434 GFP_KERNEL);
3435 if (!smmu->s2crs)
3436 return -ENOMEM;
3437 for (i = 0; i < size; i++)
3438 smmu->s2crs[i] = s2cr_init_val;
3439
Robin Murphy53867802016-09-12 17:13:48 +01003440 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003441 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003442
Robin Murphy7602b872016-04-28 17:12:09 +01003443 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3444 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3445 if (!(id & ID0_PTFS_NO_AARCH32S))
3446 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3447 }
3448
Will Deacon45ae7cf2013-06-24 18:31:25 +01003449 /* ID1 */
3450 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003451 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003452
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003453 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003454 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003455 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003456 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003457 dev_warn(smmu->dev,
3458 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3459 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003460
Will Deacon518f7132014-11-14 17:17:54 +00003461 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003462 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3463 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3464 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3465 return -ENODEV;
3466 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003467 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003468 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003469 /*
3470 * Cavium CN88xx erratum #27704.
3471 * Ensure ASID and VMID allocation is unique across all SMMUs in
3472 * the system.
3473 */
3474 if (smmu->model == CAVIUM_SMMUV2) {
3475 smmu->cavium_id_base =
3476 atomic_add_return(smmu->num_context_banks,
3477 &cavium_smmu_context_count);
3478 smmu->cavium_id_base -= smmu->num_context_banks;
3479 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003480
3481 /* ID2 */
3482 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3483 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003484 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003485
Will Deacon518f7132014-11-14 17:17:54 +00003486 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003487 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003488 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003489
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003490 if (id & ID2_VMID16)
3491 smmu->features |= ARM_SMMU_FEAT_VMID16;
3492
Robin Murphyf1d84542015-03-04 16:41:05 +00003493 /*
3494 * What the page table walker can address actually depends on which
3495 * descriptor format is in use, but since a) we don't know that yet,
3496 * and b) it can vary per context bank, this will have to do...
3497 */
3498 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3499 dev_warn(smmu->dev,
3500 "failed to set DMA mask for table walker\n");
3501
Robin Murphyb7862e32016-04-13 18:13:03 +01003502 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003503 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003504 if (smmu->version == ARM_SMMU_V1_64K)
3505 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003506 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003507 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003508 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003509 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003510 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003511 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003512 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003513 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003514 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003515 }
3516
Robin Murphy7602b872016-04-28 17:12:09 +01003517 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003518 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003519 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003520 if (smmu->features &
3521 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003522 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003523 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003524 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003525 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003526 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003527
Robin Murphyd5466352016-05-09 17:20:09 +01003528 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3529 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3530 else
3531 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003532 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003533 smmu->pgsize_bitmap);
3534
Will Deacon518f7132014-11-14 17:17:54 +00003535
Will Deacon28d60072014-09-01 16:24:48 +01003536 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003537 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3538 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003539
3540 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003541 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3542 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003543
Will Deacon45ae7cf2013-06-24 18:31:25 +01003544 return 0;
3545}
3546
Patrick Dalyd7476202016-09-08 18:23:28 -07003547static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
3548{
3549 if (!smmu->arch_ops)
3550 return 0;
3551 if (!smmu->arch_ops->init)
3552 return 0;
3553 return smmu->arch_ops->init(smmu);
3554}
3555
3556static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
3557{
3558 if (!smmu->arch_ops)
3559 return;
3560 if (!smmu->arch_ops->device_reset)
3561 return;
3562 return smmu->arch_ops->device_reset(smmu);
3563}
3564
Robin Murphy67b65a32016-04-13 18:12:57 +01003565struct arm_smmu_match_data {
3566 enum arm_smmu_arch_version version;
3567 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003568 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01003569};
3570
Patrick Dalyd7476202016-09-08 18:23:28 -07003571#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
3572static struct arm_smmu_match_data name = { \
3573.version = ver, \
3574.model = imp, \
3575.arch_ops = ops, \
3576} \
Robin Murphy67b65a32016-04-13 18:12:57 +01003577
Patrick Daly1f8a2882016-09-12 17:32:05 -07003578struct arm_smmu_arch_ops qsmmuv500_arch_ops;
3579
Patrick Dalyd7476202016-09-08 18:23:28 -07003580ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
3581ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
3582ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
3583ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
3584ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003585ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07003586ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
3587 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01003588
Joerg Roedel09b52692014-10-02 12:24:45 +02003589static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003590 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3591 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3592 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003593 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003594 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003595 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003596 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07003597 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01003598 { },
3599};
3600MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3601
Patrick Daly1f8a2882016-09-12 17:32:05 -07003602static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003603static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3604{
Robin Murphy67b65a32016-04-13 18:12:57 +01003605 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003606 struct resource *res;
3607 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003608 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01003609 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01003610 bool legacy_binding;
3611
3612 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
3613 if (legacy_binding && !using_generic_binding) {
3614 if (!using_legacy_binding)
3615 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
3616 using_legacy_binding = true;
3617 } else if (!legacy_binding && !using_legacy_binding) {
3618 using_generic_binding = true;
3619 } else {
3620 dev_err(dev, "not probing due to mismatched DT properties\n");
3621 return -ENODEV;
3622 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003623
3624 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3625 if (!smmu) {
3626 dev_err(dev, "failed to allocate arm_smmu_device\n");
3627 return -ENOMEM;
3628 }
3629 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003630 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003631 idr_init(&smmu->asid_idr);
3632 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003633
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003634 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01003635 smmu->version = data->version;
3636 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003637 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01003638
Will Deacon45ae7cf2013-06-24 18:31:25 +01003639 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003640 smmu->base = devm_ioremap_resource(dev, res);
3641 if (IS_ERR(smmu->base))
3642 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003643 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003644
3645 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3646 &smmu->num_global_irqs)) {
3647 dev_err(dev, "missing #global-interrupts property\n");
3648 return -ENODEV;
3649 }
3650
3651 num_irqs = 0;
3652 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3653 num_irqs++;
3654 if (num_irqs > smmu->num_global_irqs)
3655 smmu->num_context_irqs++;
3656 }
3657
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003658 if (!smmu->num_context_irqs) {
3659 dev_err(dev, "found %d interrupts but expected at least %d\n",
3660 num_irqs, smmu->num_global_irqs + 1);
3661 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003662 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003663
3664 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3665 GFP_KERNEL);
3666 if (!smmu->irqs) {
3667 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3668 return -ENOMEM;
3669 }
3670
3671 for (i = 0; i < num_irqs; ++i) {
3672 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003673
Will Deacon45ae7cf2013-06-24 18:31:25 +01003674 if (irq < 0) {
3675 dev_err(dev, "failed to get irq index %d\n", i);
3676 return -ENODEV;
3677 }
3678 smmu->irqs[i] = irq;
3679 }
3680
Dhaval Patel031d7462015-05-09 14:47:29 -07003681 parse_driver_options(smmu);
3682
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003683 smmu->pwr = arm_smmu_init_power_resources(pdev);
3684 if (IS_ERR(smmu->pwr))
3685 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003686
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003687 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003688 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003689 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003690
3691 err = arm_smmu_device_cfg_probe(smmu);
3692 if (err)
3693 goto out_power_off;
3694
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003695 err = arm_smmu_parse_impl_def_registers(smmu);
3696 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003697 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003698
Robin Murphyb7862e32016-04-13 18:13:03 +01003699 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003700 smmu->num_context_banks != smmu->num_context_irqs) {
3701 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08003702 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
3703 smmu->num_context_irqs, smmu->num_context_banks,
3704 smmu->num_context_banks);
3705 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003706 }
3707
Will Deacon45ae7cf2013-06-24 18:31:25 +01003708 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003709 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3710 NULL, arm_smmu_global_fault,
3711 IRQF_ONESHOT | IRQF_SHARED,
3712 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003713 if (err) {
3714 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3715 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01003716 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003717 }
3718 }
3719
Patrick Dalyd7476202016-09-08 18:23:28 -07003720 err = arm_smmu_arch_init(smmu);
3721 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003722 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07003723
Robin Murphy06e393e2016-09-12 17:13:55 +01003724 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003725 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01003726 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003727 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07003728
Robin Murphy7e96c742016-09-14 15:26:46 +01003729 /* Oh, for a proper bus abstraction */
3730 if (!iommu_present(&platform_bus_type))
3731 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3732#ifdef CONFIG_ARM_AMBA
3733 if (!iommu_present(&amba_bustype))
3734 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
3735#endif
3736#ifdef CONFIG_PCI
3737 if (!iommu_present(&pci_bus_type)) {
3738 pci_request_acs();
3739 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
3740 }
3741#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003742 return 0;
3743
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003744out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003745 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003746
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003747out_exit_power_resources:
3748 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003749
Will Deacon45ae7cf2013-06-24 18:31:25 +01003750 return err;
3751}
3752
3753static int arm_smmu_device_remove(struct platform_device *pdev)
3754{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003755 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003756
3757 if (!smmu)
3758 return -ENODEV;
3759
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003760 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003761 return -EINVAL;
3762
Will Deaconecfadb62013-07-31 19:21:28 +01003763 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003764 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003765
Patrick Dalyc190d932016-08-30 17:23:28 -07003766 idr_destroy(&smmu->asid_idr);
3767
Will Deacon45ae7cf2013-06-24 18:31:25 +01003768 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003769 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003770 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003771
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003772 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003773
Will Deacon45ae7cf2013-06-24 18:31:25 +01003774 return 0;
3775}
3776
Will Deacon45ae7cf2013-06-24 18:31:25 +01003777static struct platform_driver arm_smmu_driver = {
3778 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003779 .name = "arm-smmu",
3780 .of_match_table = of_match_ptr(arm_smmu_of_match),
3781 },
3782 .probe = arm_smmu_device_dt_probe,
3783 .remove = arm_smmu_device_remove,
3784};
3785
3786static int __init arm_smmu_init(void)
3787{
Robin Murphy7e96c742016-09-14 15:26:46 +01003788 static bool registered;
3789 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003790
Robin Murphy7e96c742016-09-14 15:26:46 +01003791 if (!registered) {
3792 ret = platform_driver_register(&arm_smmu_driver);
3793 registered = !ret;
Wei Chen112c8982016-06-13 17:20:17 +08003794 }
Robin Murphy7e96c742016-09-14 15:26:46 +01003795 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003796}
3797
3798static void __exit arm_smmu_exit(void)
3799{
3800 return platform_driver_unregister(&arm_smmu_driver);
3801}
3802
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003803subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003804module_exit(arm_smmu_exit);
3805
Robin Murphy7e96c742016-09-14 15:26:46 +01003806static int __init arm_smmu_of_init(struct device_node *np)
3807{
3808 int ret = arm_smmu_init();
3809
3810 if (ret)
3811 return ret;
3812
3813 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
3814 return -ENODEV;
3815
3816 return 0;
3817}
3818IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
3819IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
3820IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
3821IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
3822IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
3823IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
3824IOMMU_OF_DECLARE(qcom_smmuv2, "qcom,smmu-v2", arm_smmu_of_init);
3825IOMMU_OF_DECLARE(qcom_mmu500, "qcom,qsmmu-v500", arm_smmu_of_init);
3826
Patrick Daly1f8a2882016-09-12 17:32:05 -07003827#define DEBUG_SID_HALT_REG 0x0
3828#define DEBUG_SID_HALT_VAL (0x1 << 16)
3829
3830#define DEBUG_SR_HALT_ACK_REG 0x20
3831#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
3832
3833#define TBU_DBG_TIMEOUT_US 30000
3834
3835struct qsmmuv500_tbu_device {
3836 struct list_head list;
3837 struct device *dev;
3838 struct arm_smmu_device *smmu;
3839 void __iomem *base;
3840 void __iomem *status_reg;
3841
3842 struct arm_smmu_power_resources *pwr;
3843
3844 /* Protects halt count */
3845 spinlock_t halt_lock;
3846 u32 halt_count;
3847};
3848
3849static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
3850{
3851 struct qsmmuv500_tbu_device *tbu;
3852 struct list_head *list = smmu->archdata;
3853 int ret = 0;
3854
3855 list_for_each_entry(tbu, list, list) {
3856 ret = arm_smmu_power_on(tbu->pwr);
3857 if (ret)
3858 break;
3859 }
3860 if (!ret)
3861 return 0;
3862
3863 list_for_each_entry_continue_reverse(tbu, list, list) {
3864 arm_smmu_power_off(tbu->pwr);
3865 }
3866 return ret;
3867}
3868
3869static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
3870{
3871 struct qsmmuv500_tbu_device *tbu;
3872 struct list_head *list = smmu->archdata;
3873
3874 list_for_each_entry_reverse(tbu, list, list) {
3875 arm_smmu_power_off(tbu->pwr);
3876 }
3877}
3878
3879static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
3880{
3881 unsigned long flags;
3882 u32 val;
3883 void __iomem *base;
3884
3885 spin_lock_irqsave(&tbu->halt_lock, flags);
3886 if (tbu->halt_count) {
3887 tbu->halt_count++;
3888 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3889 return 0;
3890 }
3891
3892 base = tbu->base;
3893 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
3894 val |= DEBUG_SID_HALT_VAL;
3895 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
3896
3897 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
3898 val, (val & DEBUG_SR_HALT_ACK_VAL),
3899 0, TBU_DBG_TIMEOUT_US)) {
3900 dev_err(tbu->dev, "Couldn't halt TBU!\n");
3901 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3902 return -ETIMEDOUT;
3903 }
3904
3905 tbu->halt_count = 1;
3906 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3907 return 0;
3908}
3909
3910static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
3911{
3912 unsigned long flags;
3913 u32 val;
3914 void __iomem *base;
3915
3916 spin_lock_irqsave(&tbu->halt_lock, flags);
3917 if (!tbu->halt_count) {
3918 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
3919 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3920 return;
3921
3922 } else if (tbu->halt_count > 1) {
3923 tbu->halt_count--;
3924 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3925 return;
3926 }
3927
3928 base = tbu->base;
3929 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
3930 val &= ~DEBUG_SID_HALT_VAL;
3931 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
3932
3933 tbu->halt_count = 0;
3934 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3935}
3936
3937static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
3938{
3939 struct qsmmuv500_tbu_device *tbu;
3940 struct list_head *list = smmu->archdata;
3941 int ret = 0;
3942
3943 list_for_each_entry(tbu, list, list) {
3944 ret = qsmmuv500_tbu_halt(tbu);
3945 if (ret)
3946 break;
3947 }
3948
3949 if (!ret)
3950 return 0;
3951
3952 list_for_each_entry_continue_reverse(tbu, list, list) {
3953 qsmmuv500_tbu_resume(tbu);
3954 }
3955 return ret;
3956}
3957
3958static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
3959{
3960 struct qsmmuv500_tbu_device *tbu;
3961 struct list_head *list = smmu->archdata;
3962
3963 list_for_each_entry(tbu, list, list) {
3964 qsmmuv500_tbu_resume(tbu);
3965 }
3966}
3967
3968static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
3969{
3970 int i, ret;
3971 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
3972
3973 ret = qsmmuv500_tbu_power_on_all(smmu);
3974 if (ret)
3975 return;
3976
3977 /* Program implementation defined registers */
3978 qsmmuv500_halt_all(smmu);
3979 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3980 writel_relaxed(regs[i].value,
3981 ARM_SMMU_GR0(smmu) + regs[i].offset);
3982 qsmmuv500_resume_all(smmu);
3983 qsmmuv500_tbu_power_off_all(smmu);
3984}
3985
3986static int qsmmuv500_tbu_register(struct device *dev, void *data)
3987{
3988 struct arm_smmu_device *smmu = data;
3989 struct qsmmuv500_tbu_device *tbu;
3990 struct list_head *list = smmu->archdata;
3991
3992 if (!dev->driver) {
3993 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
3994 return -EINVAL;
3995 }
3996
3997 tbu = dev_get_drvdata(dev);
3998
3999 INIT_LIST_HEAD(&tbu->list);
4000 tbu->smmu = smmu;
4001 list_add(&tbu->list, list);
4002 return 0;
4003}
4004
4005static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4006{
4007 struct device *dev = smmu->dev;
4008 struct list_head *list;
4009 int ret;
4010
4011 list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
4012 if (!list)
4013 return -ENOMEM;
4014
4015 INIT_LIST_HEAD(list);
4016 smmu->archdata = list;
4017
4018 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4019 if (ret)
4020 return ret;
4021
4022 /* Attempt to register child devices */
4023 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
4024 if (ret)
4025 return -EINVAL;
4026
4027 return 0;
4028}
4029
4030struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
4031 .init = qsmmuv500_arch_init,
4032 .device_reset = qsmmuv500_device_reset,
4033};
4034
4035static const struct of_device_id qsmmuv500_tbu_of_match[] = {
4036 {.compatible = "qcom,qsmmuv500-tbu"},
4037 {}
4038};
4039
4040static int qsmmuv500_tbu_probe(struct platform_device *pdev)
4041{
4042 struct resource *res;
4043 struct device *dev = &pdev->dev;
4044 struct qsmmuv500_tbu_device *tbu;
4045
4046 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
4047 if (!tbu)
4048 return -ENOMEM;
4049
4050 INIT_LIST_HEAD(&tbu->list);
4051 tbu->dev = dev;
4052 spin_lock_init(&tbu->halt_lock);
4053
4054 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
4055 tbu->base = devm_ioremap_resource(dev, res);
4056 if (IS_ERR(tbu->base))
4057 return PTR_ERR(tbu->base);
4058
4059 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
4060 tbu->status_reg = devm_ioremap_resource(dev, res);
4061 if (IS_ERR(tbu->status_reg))
4062 return PTR_ERR(tbu->status_reg);
4063
4064 tbu->pwr = arm_smmu_init_power_resources(pdev);
4065 if (IS_ERR(tbu->pwr))
4066 return PTR_ERR(tbu->pwr);
4067
4068 dev_set_drvdata(dev, tbu);
4069 return 0;
4070}
4071
4072static struct platform_driver qsmmuv500_tbu_driver = {
4073 .driver = {
4074 .name = "qsmmuv500-tbu",
4075 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
4076 },
4077 .probe = qsmmuv500_tbu_probe,
4078};
4079
4080static int __init qsmmuv500_tbu_init(void)
4081{
4082 return platform_driver_register(&qsmmuv500_tbu_driver);
4083}
4084subsys_initcall(qsmmuv500_tbu_init);
4085
Will Deacon45ae7cf2013-06-24 18:31:25 +01004086MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
4087MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
4088MODULE_LICENSE("GPL v2");