blob: 37dfe0a9feb33b48a7369a23675ad1d79c589dd6 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070050#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070051#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070052#include <linux/msm-bus.h>
53#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55#include <linux/amba/bus.h>
56
Will Deacon518f7132014-11-14 17:17:54 +000057#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* Maximum number of context banks per SMMU */
60#define ARM_SMMU_MAX_CBS 128
61
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* SMMU global address space */
63#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010064#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000066/*
67 * SMMU global address space with conditional offset to access secure
68 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 * nsGFSYNR0: 0x450)
70 */
71#define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu)->base + \
73 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
74 ? 0x400 : 0))
75
Robin Murphyf9a05f02016-04-13 18:13:01 +010076/*
77 * Some 64-bit registers only make sense to write atomically, but in such
78 * cases all the data relevant to AArch32 formats lies within the lower word,
79 * therefore this actually makes more sense than it might first appear.
80 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#endif
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087/* Configuration registers */
88#define ARM_SMMU_GR0_sCR0 0x0
89#define sCR0_CLIENTPD (1 << 0)
90#define sCR0_GFRE (1 << 1)
91#define sCR0_GFIE (1 << 2)
92#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100131#define ID0_NUMSMRG_SHIFT 0
132#define ID0_NUMSMRG_MASK 0xff
133
134#define ID1_PAGESIZE (1 << 31)
135#define ID1_NUMPAGENDXB_SHIFT 28
136#define ID1_NUMPAGENDXB_MASK 7
137#define ID1_NUMS2CB_SHIFT 16
138#define ID1_NUMS2CB_MASK 0xff
139#define ID1_NUMCB_SHIFT 0
140#define ID1_NUMCB_MASK 0xff
141
142#define ID2_OAS_SHIFT 4
143#define ID2_OAS_MASK 0xf
144#define ID2_IAS_SHIFT 0
145#define ID2_IAS_MASK 0xf
146#define ID2_UBS_SHIFT 8
147#define ID2_UBS_MASK 0xf
148#define ID2_PTFS_4K (1 << 12)
149#define ID2_PTFS_16K (1 << 13)
150#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800151#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152
Peng Fan3ca37122016-05-03 21:50:30 +0800153#define ID7_MAJOR_SHIFT 4
154#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157#define ARM_SMMU_GR0_TLBIVMID 0x64
158#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
159#define ARM_SMMU_GR0_TLBIALLH 0x6c
160#define ARM_SMMU_GR0_sTLBGSYNC 0x70
161#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
162#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800163#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
165/* Stream mapping registers */
166#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
167#define SMR_VALID (1 << 31)
168#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100169#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170
171#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
172#define S2CR_CBNDX_SHIFT 0
173#define S2CR_CBNDX_MASK 0xff
174#define S2CR_TYPE_SHIFT 16
175#define S2CR_TYPE_MASK 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100176enum arm_smmu_s2cr_type {
177 S2CR_TYPE_TRANS,
178 S2CR_TYPE_BYPASS,
179 S2CR_TYPE_FAULT,
180};
181
182#define S2CR_PRIVCFG_SHIFT 24
183#define S2CR_PRIVCFG_MASK 0x3
184enum arm_smmu_s2cr_privcfg {
185 S2CR_PRIVCFG_DEFAULT,
186 S2CR_PRIVCFG_DIPAN,
187 S2CR_PRIVCFG_UNPRIV,
188 S2CR_PRIVCFG_PRIV,
189};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100190
191/* Context bank attribute registers */
192#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
193#define CBAR_VMID_SHIFT 0
194#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000195#define CBAR_S1_BPSHCFG_SHIFT 8
196#define CBAR_S1_BPSHCFG_MASK 3
197#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100198#define CBAR_S1_MEMATTR_SHIFT 12
199#define CBAR_S1_MEMATTR_MASK 0xf
200#define CBAR_S1_MEMATTR_WB 0xf
201#define CBAR_TYPE_SHIFT 16
202#define CBAR_TYPE_MASK 0x3
203#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
204#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
205#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
206#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
207#define CBAR_IRPTNDX_SHIFT 24
208#define CBAR_IRPTNDX_MASK 0xff
209
Shalaj Jain04059c52015-03-03 13:34:59 -0800210#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
211#define CBFRSYNRA_SID_MASK (0xffff)
212
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
214#define CBA2R_RW64_32BIT (0 << 0)
215#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800216#define CBA2R_VMID_SHIFT 16
217#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218
219/* Translation context bank */
220#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100221#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222
223#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100224#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_RESUME 0x8
226#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100227#define ARM_SMMU_CB_TTBR0 0x20
228#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600230#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100231#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100233#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100234#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700235#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100236#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000238#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100239#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000240#define ARM_SMMU_CB_S1_TLBIVAL 0x620
241#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
242#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700243#define ARM_SMMU_CB_TLBSYNC 0x7f0
244#define ARM_SMMU_CB_TLBSTATUS 0x7f4
245#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100246#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000247#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248
249#define SCTLR_S1_ASIDPNE (1 << 12)
250#define SCTLR_CFCFG (1 << 7)
251#define SCTLR_CFIE (1 << 6)
252#define SCTLR_CFRE (1 << 5)
253#define SCTLR_E (1 << 4)
254#define SCTLR_AFE (1 << 2)
255#define SCTLR_TRE (1 << 1)
256#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100258#define ARM_MMU500_ACTLR_CPRE (1 << 1)
259
Peng Fan3ca37122016-05-03 21:50:30 +0800260#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
261
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700262#define ARM_SMMU_IMPL_DEF0(smmu) \
263 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
264#define ARM_SMMU_IMPL_DEF1(smmu) \
265 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000266#define CB_PAR_F (1 << 0)
267
268#define ATSR_ACTIVE (1 << 0)
269
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270#define RESUME_RETRY (0 << 0)
271#define RESUME_TERMINATE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100274#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100276#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
278#define FSR_MULTI (1 << 31)
279#define FSR_SS (1 << 30)
280#define FSR_UUT (1 << 8)
281#define FSR_ASF (1 << 7)
282#define FSR_TLBLKF (1 << 6)
283#define FSR_TLBMCF (1 << 5)
284#define FSR_EF (1 << 4)
285#define FSR_PF (1 << 3)
286#define FSR_AFF (1 << 2)
287#define FSR_TF (1 << 1)
288
Mitchel Humpherys29073202014-07-08 09:52:18 -0700289#define FSR_IGN (FSR_AFF | FSR_ASF | \
290 FSR_TLBMCF | FSR_TLBLKF)
291#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100292 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293
294#define FSYNR0_WNR (1 << 4)
295
Will Deacon4cf740b2014-07-14 19:47:39 +0100296static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000297module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100298MODULE_PARM_DESC(force_stage,
299 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800300static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000301module_param(disable_bypass, bool, S_IRUGO);
302MODULE_PARM_DESC(disable_bypass,
303 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100304
Robin Murphy09360402014-08-28 17:51:59 +0100305enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100306 ARM_SMMU_V1,
307 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100308 ARM_SMMU_V2,
309};
310
Robin Murphy67b65a32016-04-13 18:12:57 +0100311enum arm_smmu_implementation {
312 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100313 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100314 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700315 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700316 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100317};
318
Patrick Dalyd7476202016-09-08 18:23:28 -0700319struct arm_smmu_device;
320struct arm_smmu_arch_ops {
321 int (*init)(struct arm_smmu_device *smmu);
322 void (*device_reset)(struct arm_smmu_device *smmu);
323 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
324 dma_addr_t iova);
325 void (*iova_to_phys_fault)(struct iommu_domain *domain,
326 dma_addr_t iova, phys_addr_t *phys1,
327 phys_addr_t *phys_post_tlbiall);
328};
329
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700330struct arm_smmu_impl_def_reg {
331 u32 offset;
332 u32 value;
333};
334
Robin Murphya754fd12016-09-12 17:13:50 +0100335struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100336 struct iommu_group *group;
337 int count;
Robin Murphya754fd12016-09-12 17:13:50 +0100338 enum arm_smmu_s2cr_type type;
339 enum arm_smmu_s2cr_privcfg privcfg;
340 u8 cbndx;
341};
342
343#define s2cr_init_val (struct arm_smmu_s2cr){ \
344 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
345}
346
Will Deacon45ae7cf2013-06-24 18:31:25 +0100347struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100348 u16 mask;
349 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100350 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351};
352
Will Deacona9a1b0b2014-05-01 18:05:08 +0100353struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100354 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100355 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100356};
Robin Murphy468f4942016-09-12 17:13:49 +0100357#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100358#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
359#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000360#define fwspec_smendx(fw, i) \
361 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100362#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000363 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100364
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700365/*
366 * Describes resources required for on/off power operation.
367 * Separate reference count is provided for atomic/nonatomic
368 * operations.
369 */
370struct arm_smmu_power_resources {
371 struct platform_device *pdev;
372 struct device *dev;
373
374 struct clk **clocks;
375 int num_clocks;
376
377 struct regulator_bulk_data *gdscs;
378 int num_gdscs;
379
380 uint32_t bus_client;
381 struct msm_bus_scale_pdata *bus_dt_data;
382
383 /* Protects power_count */
384 struct mutex power_lock;
385 int power_count;
386
387 /* Protects clock_refs_count */
388 spinlock_t clock_refs_lock;
389 int clock_refs_count;
390};
391
Will Deacon45ae7cf2013-06-24 18:31:25 +0100392struct arm_smmu_device {
393 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394
395 void __iomem *base;
396 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100397 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100398
399#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
400#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
401#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
402#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
403#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000404#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800405#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100406#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
407#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
408#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
409#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
410#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000412
413#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800414#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800415#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700416#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000417 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100418 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100419 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100420
421 u32 num_context_banks;
422 u32 num_s2_context_banks;
423 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
424 atomic_t irptndx;
425
426 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100427 u16 streamid_mask;
428 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100429 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100430 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100431 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100432
Will Deacon518f7132014-11-14 17:17:54 +0000433 unsigned long va_size;
434 unsigned long ipa_size;
435 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100436 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100437
438 u32 num_global_irqs;
439 u32 num_context_irqs;
440 unsigned int *irqs;
441
Patrick Daly8e3371a2017-02-13 22:14:53 -0800442 struct list_head list;
443
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800444 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700445 /* Specific to QCOM */
446 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
447 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800448
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700449 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700450
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800451 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700452
453 /* protects idr */
454 struct mutex idr_mutex;
455 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700456
457 struct arm_smmu_arch_ops *arch_ops;
458 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100459};
460
Robin Murphy7602b872016-04-28 17:12:09 +0100461enum arm_smmu_context_fmt {
462 ARM_SMMU_CTX_FMT_NONE,
463 ARM_SMMU_CTX_FMT_AARCH64,
464 ARM_SMMU_CTX_FMT_AARCH32_L,
465 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100466};
467
468struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100469 u8 cbndx;
470 u8 irptndx;
471 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600472 u32 procid;
473 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100474 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100475};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100476#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600477#define INVALID_CBNDX 0xff
478#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700479/*
480 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
481 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
482 */
483#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100484
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600485#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800486#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100487
Will Deaconc752ce42014-06-25 22:46:31 +0100488enum arm_smmu_domain_stage {
489 ARM_SMMU_DOMAIN_S1 = 0,
490 ARM_SMMU_DOMAIN_S2,
491 ARM_SMMU_DOMAIN_NESTED,
492};
493
Patrick Dalyc11d1082016-09-01 15:52:44 -0700494struct arm_smmu_pte_info {
495 void *virt_addr;
496 size_t size;
497 struct list_head entry;
498};
499
Will Deacon45ae7cf2013-06-24 18:31:25 +0100500struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100501 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000502 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700503 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000504 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100505 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100506 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000507 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700508 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700509 u32 secure_vmid;
510 struct list_head pte_info_list;
511 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700512 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700513 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100514 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100515};
516
Patrick Daly8e3371a2017-02-13 22:14:53 -0800517static DEFINE_SPINLOCK(arm_smmu_devices_lock);
518static LIST_HEAD(arm_smmu_devices);
519
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000520struct arm_smmu_option_prop {
521 u32 opt;
522 const char *prop;
523};
524
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800525static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
526
Robin Murphy7e96c742016-09-14 15:26:46 +0100527static bool using_legacy_binding, using_generic_binding;
528
Mitchel Humpherys29073202014-07-08 09:52:18 -0700529static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000530 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800531 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800532 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700533 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000534 { 0, NULL},
535};
536
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800537static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
538 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700539static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
540 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600541static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800542
Patrick Dalyc11d1082016-09-01 15:52:44 -0700543static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
544static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700545static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700546static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
547
Patrick Dalyd7476202016-09-08 18:23:28 -0700548static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
549static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
550
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800551static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
552
Joerg Roedel1d672632015-03-26 13:43:10 +0100553static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
554{
555 return container_of(dom, struct arm_smmu_domain, domain);
556}
557
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000558static void parse_driver_options(struct arm_smmu_device *smmu)
559{
560 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700561
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000562 do {
563 if (of_property_read_bool(smmu->dev->of_node,
564 arm_smmu_options[i].prop)) {
565 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700566 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000567 arm_smmu_options[i].prop);
568 }
569 } while (arm_smmu_options[++i].opt);
570}
571
Patrick Dalyc190d932016-08-30 17:23:28 -0700572static bool is_dynamic_domain(struct iommu_domain *domain)
573{
574 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
575
576 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
577}
578
Liam Mark53cf2342016-12-20 11:36:07 -0800579static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
580{
581 if (smmu_domain->attributes &
582 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
583 return true;
584 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
585 return smmu_domain->smmu->dev->archdata.dma_coherent;
586 else
587 return false;
588}
589
Patrick Dalye271f212016-10-04 13:24:49 -0700590static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
591{
592 return (smmu_domain->secure_vmid != VMID_INVAL);
593}
594
595static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
596{
597 if (arm_smmu_is_domain_secure(smmu_domain))
598 mutex_lock(&smmu_domain->assign_lock);
599}
600
601static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
602{
603 if (arm_smmu_is_domain_secure(smmu_domain))
604 mutex_unlock(&smmu_domain->assign_lock);
605}
606
Will Deacon8f68f8e2014-07-15 11:27:08 +0100607static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100608{
609 if (dev_is_pci(dev)) {
610 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700611
Will Deacona9a1b0b2014-05-01 18:05:08 +0100612 while (!pci_is_root_bus(bus))
613 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100614 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100615 }
616
Robin Murphyd5b41782016-09-14 15:21:39 +0100617 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100618}
619
Robin Murphyd5b41782016-09-14 15:21:39 +0100620static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100621{
Robin Murphyd5b41782016-09-14 15:21:39 +0100622 *((__be32 *)data) = cpu_to_be32(alias);
623 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100624}
625
Robin Murphyd5b41782016-09-14 15:21:39 +0100626static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100627{
Robin Murphyd5b41782016-09-14 15:21:39 +0100628 struct of_phandle_iterator *it = *(void **)data;
629 struct device_node *np = it->node;
630 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100631
Robin Murphyd5b41782016-09-14 15:21:39 +0100632 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
633 "#stream-id-cells", 0)
634 if (it->node == np) {
635 *(void **)data = dev;
636 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700637 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100638 it->node = np;
639 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100640}
641
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100642static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100643static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100644
Robin Murphy06e393e2016-09-12 17:13:55 +0100645static int arm_smmu_register_legacy_master(struct device *dev,
646 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100647{
Robin Murphy06e393e2016-09-12 17:13:55 +0100648 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100649 struct device_node *np;
650 struct of_phandle_iterator it;
651 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100652 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100653 __be32 pci_sid;
654 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100655
Stephen Boydfecdeef2017-03-01 16:53:19 -0800656 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100657 np = dev_get_dev_node(dev);
658 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
659 of_node_put(np);
660 return -ENODEV;
661 }
662
663 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100664 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
665 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100666 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100667 of_node_put(np);
668 if (err == 0)
669 return -ENODEV;
670 if (err < 0)
671 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100672
Robin Murphyd5b41782016-09-14 15:21:39 +0100673 if (dev_is_pci(dev)) {
674 /* "mmu-masters" assumes Stream ID == Requester ID */
675 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
676 &pci_sid);
677 it.cur = &pci_sid;
678 it.cur_count = 1;
679 }
680
Robin Murphy06e393e2016-09-12 17:13:55 +0100681 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
682 &arm_smmu_ops);
683 if (err)
684 return err;
685
686 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
687 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100688 return -ENOMEM;
689
Robin Murphy06e393e2016-09-12 17:13:55 +0100690 *smmu = dev_get_drvdata(smmu_dev);
691 of_phandle_iterator_args(&it, sids, it.cur_count);
692 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
693 kfree(sids);
694 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100695}
696
697static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
698{
699 int idx;
700
701 do {
702 idx = find_next_zero_bit(map, end, start);
703 if (idx == end)
704 return -ENOSPC;
705 } while (test_and_set_bit(idx, map));
706
707 return idx;
708}
709
710static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
711{
712 clear_bit(idx, map);
713}
714
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700715static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700716{
717 int i, ret = 0;
718
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700719 for (i = 0; i < pwr->num_clocks; ++i) {
720 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700721 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700722 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700723 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700724 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700725 break;
726 }
727 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700728 return ret;
729}
730
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700731static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700732{
733 int i;
734
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700735 for (i = pwr->num_clocks; i; --i)
736 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700737}
738
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700739static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700740{
741 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700742
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700743 for (i = 0; i < pwr->num_clocks; ++i) {
744 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700745 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700746 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700747 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700748 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700749 break;
750 }
751 }
752
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700753 return ret;
754}
Patrick Daly8befb662016-08-17 20:03:28 -0700755
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700756static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
757{
758 int i;
759
760 for (i = pwr->num_clocks; i; --i)
761 clk_disable(pwr->clocks[i - 1]);
762}
763
764static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
765{
766 if (!pwr->bus_client)
767 return 0;
768 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
769}
770
771static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
772{
773 if (!pwr->bus_client)
774 return;
775 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
776}
777
778/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
779static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
780{
781 int ret = 0;
782 unsigned long flags;
783
784 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
785 if (pwr->clock_refs_count > 0) {
786 pwr->clock_refs_count++;
787 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
788 return 0;
789 }
790
791 ret = arm_smmu_enable_clocks(pwr);
792 if (!ret)
793 pwr->clock_refs_count = 1;
794
795 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700796 return ret;
797}
798
799/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700800static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700801{
Patrick Daly8befb662016-08-17 20:03:28 -0700802 unsigned long flags;
803
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700804 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
805 if (pwr->clock_refs_count == 0) {
806 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
807 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
808 return;
809
810 } else if (pwr->clock_refs_count > 1) {
811 pwr->clock_refs_count--;
812 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700813 return;
814 }
815
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700816 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700817
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700818 pwr->clock_refs_count = 0;
819 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700820}
821
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700822static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700823{
824 int ret;
825
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700826 mutex_lock(&pwr->power_lock);
827 if (pwr->power_count > 0) {
828 pwr->power_count += 1;
829 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700830 return 0;
831 }
832
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700833 ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700834 if (ret)
835 goto out_unlock;
836
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700837 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700838 if (ret)
839 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700840
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700841 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700842 if (ret)
843 goto out_disable_bus;
844
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700845 pwr->power_count = 1;
846 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700847 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700848
Patrick Daly2764f952016-09-06 19:22:44 -0700849out_disable_bus:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700850 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700851out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700852 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700853out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700854 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700855 return ret;
856}
857
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700858static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700859{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700860 mutex_lock(&pwr->power_lock);
861 if (pwr->power_count == 0) {
862 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
863 mutex_unlock(&pwr->power_lock);
864 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700865
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700866 } else if (pwr->power_count > 1) {
867 pwr->power_count--;
868 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700869 return;
870 }
871
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700872 arm_smmu_unprepare_clocks(pwr);
873 arm_smmu_unrequest_bus(pwr);
874 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700875
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700876 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700877}
878
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700879static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700880{
881 int ret;
882
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700883 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700884 if (ret)
885 return ret;
886
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700887 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700888 if (ret)
889 goto out_disable;
890
891 return 0;
892
893out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700894 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700895 return ret;
896}
897
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700898static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700899{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700900 arm_smmu_power_off_atomic(pwr);
901 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700902}
903
904/*
905 * Must be used instead of arm_smmu_power_on if it may be called from
906 * atomic context
907 */
908static int arm_smmu_domain_power_on(struct iommu_domain *domain,
909 struct arm_smmu_device *smmu)
910{
911 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
912 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
913
914 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700915 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700916
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700917 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700918}
919
920/*
921 * Must be used instead of arm_smmu_power_on if it may be called from
922 * atomic context
923 */
924static void arm_smmu_domain_power_off(struct iommu_domain *domain,
925 struct arm_smmu_device *smmu)
926{
927 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
928 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
929
930 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700931 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700932 return;
933 }
934
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700935 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700936}
937
Will Deacon45ae7cf2013-06-24 18:31:25 +0100938/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700939static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
940 int cbndx)
941{
942 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
943 u32 val;
944
945 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
946 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
947 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700948 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700949 dev_err(smmu->dev, "TLBSYNC timeout!\n");
950}
951
Will Deacon518f7132014-11-14 17:17:54 +0000952static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100953{
954 int count = 0;
955 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
956
957 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
958 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
959 & sTLBGSTATUS_GSACTIVE) {
960 cpu_relax();
961 if (++count == TLB_LOOP_TIMEOUT) {
962 dev_err_ratelimited(smmu->dev,
963 "TLB sync timed out -- SMMU may be deadlocked\n");
964 return;
965 }
966 udelay(1);
967 }
968}
969
Will Deacon518f7132014-11-14 17:17:54 +0000970static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100971{
Will Deacon518f7132014-11-14 17:17:54 +0000972 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700973 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000974}
975
Patrick Daly8befb662016-08-17 20:03:28 -0700976/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000977static void arm_smmu_tlb_inv_context(void *cookie)
978{
979 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100980 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
981 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100982 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000983 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100984
985 if (stage1) {
986 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800987 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100988 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700989 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100990 } else {
991 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800992 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100993 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700994 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100995 }
Will Deacon1463fe42013-07-31 19:21:27 +0100996}
997
Will Deacon518f7132014-11-14 17:17:54 +0000998static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000999 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001000{
1001 struct arm_smmu_domain *smmu_domain = cookie;
1002 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1003 struct arm_smmu_device *smmu = smmu_domain->smmu;
1004 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1005 void __iomem *reg;
1006
1007 if (stage1) {
1008 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1009 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1010
Robin Murphy7602b872016-04-28 17:12:09 +01001011 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001012 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001013 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001014 do {
1015 writel_relaxed(iova, reg);
1016 iova += granule;
1017 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001018 } else {
1019 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001020 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001021 do {
1022 writeq_relaxed(iova, reg);
1023 iova += granule >> 12;
1024 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001025 }
Will Deacon518f7132014-11-14 17:17:54 +00001026 } else if (smmu->version == ARM_SMMU_V2) {
1027 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1028 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1029 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001030 iova >>= 12;
1031 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001032 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001033 iova += granule >> 12;
1034 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001035 } else {
1036 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001037 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001038 }
1039}
1040
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001041struct arm_smmu_secure_pool_chunk {
1042 void *addr;
1043 size_t size;
1044 struct list_head list;
1045};
1046
1047static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1048 size_t size)
1049{
1050 struct arm_smmu_secure_pool_chunk *it;
1051
1052 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1053 if (it->size == size) {
1054 void *addr = it->addr;
1055
1056 list_del(&it->list);
1057 kfree(it);
1058 return addr;
1059 }
1060 }
1061
1062 return NULL;
1063}
1064
1065static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1066 void *addr, size_t size)
1067{
1068 struct arm_smmu_secure_pool_chunk *chunk;
1069
1070 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1071 if (!chunk)
1072 return -ENOMEM;
1073
1074 chunk->addr = addr;
1075 chunk->size = size;
1076 memset(addr, 0, size);
1077 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1078
1079 return 0;
1080}
1081
1082static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1083{
1084 struct arm_smmu_secure_pool_chunk *it, *i;
1085
1086 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1087 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1088 /* pages will be freed later (after being unassigned) */
1089 kfree(it);
1090 }
1091}
1092
Patrick Dalyc11d1082016-09-01 15:52:44 -07001093static void *arm_smmu_alloc_pages_exact(void *cookie,
1094 size_t size, gfp_t gfp_mask)
1095{
1096 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001097 void *page;
1098 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001099
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001100 if (!arm_smmu_is_domain_secure(smmu_domain))
1101 return alloc_pages_exact(size, gfp_mask);
1102
1103 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1104 if (page)
1105 return page;
1106
1107 page = alloc_pages_exact(size, gfp_mask);
1108 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001109 ret = arm_smmu_prepare_pgtable(page, cookie);
1110 if (ret) {
1111 free_pages_exact(page, size);
1112 return NULL;
1113 }
1114 }
1115
1116 return page;
1117}
1118
1119static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1120{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001121 struct arm_smmu_domain *smmu_domain = cookie;
1122
1123 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1124 free_pages_exact(virt, size);
1125 return;
1126 }
1127
1128 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1129 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001130}
1131
Will Deacon518f7132014-11-14 17:17:54 +00001132static struct iommu_gather_ops arm_smmu_gather_ops = {
1133 .tlb_flush_all = arm_smmu_tlb_inv_context,
1134 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1135 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001136 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1137 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001138};
1139
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001140static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1141 dma_addr_t iova, u32 fsr)
1142{
1143 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001144 struct arm_smmu_device *smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001145 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001146 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001147
1148 smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001149
Patrick Dalyad441dd2016-09-15 15:50:46 -07001150 if (smmu->arch_ops && smmu->arch_ops->iova_to_phys_fault) {
1151 smmu->arch_ops->iova_to_phys_fault(domain, iova, &phys,
1152 &phys_post_tlbiall);
1153 } else {
1154 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001155 arm_smmu_tlb_inv_context(smmu_domain);
Patrick Dalyad441dd2016-09-15 15:50:46 -07001156 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001157 }
1158
Patrick Dalyad441dd2016-09-15 15:50:46 -07001159 if (phys != phys_post_tlbiall) {
1160 dev_err(smmu->dev,
1161 "ATOS results differed across TLBIALL...\n"
1162 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1163 }
1164 if (!phys_post_tlbiall) {
1165 dev_err(smmu->dev,
1166 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1167 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001168
Patrick Dalyad441dd2016-09-15 15:50:46 -07001169 return phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001170}
1171
Will Deacon45ae7cf2013-06-24 18:31:25 +01001172static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1173{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001174 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001175 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001176 unsigned long iova;
1177 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001178 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001179 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1180 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001181 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001182 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001183 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001184 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001185 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001186 bool non_fatal_fault = !!(smmu_domain->attributes &
1187 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001189 static DEFINE_RATELIMIT_STATE(_rs,
1190 DEFAULT_RATELIMIT_INTERVAL,
1191 DEFAULT_RATELIMIT_BURST);
1192
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001193 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001194 if (ret)
1195 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001196
Shalaj Jain04059c52015-03-03 13:34:59 -08001197 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001198 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001199 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1200
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001201 if (!(fsr & FSR_FAULT)) {
1202 ret = IRQ_NONE;
1203 goto out_power_off;
1204 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001206 if (fatal_asf && (fsr & FSR_ASF)) {
1207 dev_err(smmu->dev,
1208 "Took an address size fault. Refusing to recover.\n");
1209 BUG();
1210 }
1211
Will Deacon45ae7cf2013-06-24 18:31:25 +01001212 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001213 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001214 if (fsr & FSR_TF)
1215 flags |= IOMMU_FAULT_TRANSLATION;
1216 if (fsr & FSR_PF)
1217 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001218 if (fsr & FSR_EF)
1219 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001220 if (fsr & FSR_SS)
1221 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001222
Robin Murphyf9a05f02016-04-13 18:13:01 +01001223 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001224 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001225 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1226 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001227 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1228 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001229 dev_dbg(smmu->dev,
1230 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1231 iova, fsr, fsynr, cfg->cbndx);
1232 dev_dbg(smmu->dev,
1233 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001234 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001235 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001236 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001237 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1238 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001239 if (__ratelimit(&_rs)) {
1240 dev_err(smmu->dev,
1241 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1242 iova, fsr, fsynr, cfg->cbndx);
1243 dev_err(smmu->dev, "FAR = %016lx\n",
1244 (unsigned long)iova);
1245 dev_err(smmu->dev,
1246 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1247 fsr,
1248 (fsr & 0x02) ? "TF " : "",
1249 (fsr & 0x04) ? "AFF " : "",
1250 (fsr & 0x08) ? "PF " : "",
1251 (fsr & 0x10) ? "EF " : "",
1252 (fsr & 0x20) ? "TLBMCF " : "",
1253 (fsr & 0x40) ? "TLBLKF " : "",
1254 (fsr & 0x80) ? "MHF " : "",
1255 (fsr & 0x40000000) ? "SS " : "",
1256 (fsr & 0x80000000) ? "MULTI " : "");
1257 dev_err(smmu->dev,
1258 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001259 if (!phys_soft)
1260 dev_err(smmu->dev,
1261 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1262 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001263 dev_err(smmu->dev,
1264 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1265 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1266 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001267 ret = IRQ_NONE;
1268 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001269 if (!non_fatal_fault) {
1270 dev_err(smmu->dev,
1271 "Unhandled arm-smmu context fault!\n");
1272 BUG();
1273 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001274 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001275
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001276 /*
1277 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1278 * if stalled. This is required to keep the IOMMU client stalled on
1279 * the outstanding fault. This gives the client a chance to take any
1280 * debug action and then terminate the stalled transaction.
1281 * So, the sequence in case of stall on fault should be:
1282 * 1) Do not clear FSR or write to RESUME here
1283 * 2) Client takes any debug action
1284 * 3) Client terminates the stalled transaction and resumes the IOMMU
1285 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1286 * not before so that the fault remains outstanding. This ensures
1287 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1288 * need to be terminated.
1289 */
1290 if (tmp != -EBUSY) {
1291 /* Clear the faulting FSR */
1292 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001293
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001294 /*
1295 * Barrier required to ensure that the FSR is cleared
1296 * before resuming SMMU operation
1297 */
1298 wmb();
1299
1300 /* Retry or terminate any stalled transactions */
1301 if (fsr & FSR_SS)
1302 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1303 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001304
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001305out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001306 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001307
Patrick Daly5ba28112016-08-30 19:18:52 -07001308 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001309}
1310
1311static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1312{
1313 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1314 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001315 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001316
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001317 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001318 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001319
Will Deacon45ae7cf2013-06-24 18:31:25 +01001320 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1321 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1322 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1323 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1324
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001325 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001326 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001327 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001328 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001329
Will Deacon45ae7cf2013-06-24 18:31:25 +01001330 dev_err_ratelimited(smmu->dev,
1331 "Unexpected global fault, this could be serious\n");
1332 dev_err_ratelimited(smmu->dev,
1333 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1334 gfsr, gfsynr0, gfsynr1, gfsynr2);
1335
1336 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001337 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001338 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001339}
1340
Will Deacon518f7132014-11-14 17:17:54 +00001341static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1342 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001343{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001344 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001345 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001347 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1348 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001349 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001350
Will Deacon45ae7cf2013-06-24 18:31:25 +01001351 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001352 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1353 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001354
Will Deacon4a1c93c2015-03-04 12:21:03 +00001355 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001356 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1357 reg = CBA2R_RW64_64BIT;
1358 else
1359 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001360 /* 16-bit VMIDs live in CBA2R */
1361 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001362 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001363
Will Deacon4a1c93c2015-03-04 12:21:03 +00001364 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1365 }
1366
Will Deacon45ae7cf2013-06-24 18:31:25 +01001367 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001368 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001369 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001370 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001371
Will Deacon57ca90f2014-02-06 14:59:05 +00001372 /*
1373 * Use the weakest shareability/memory types, so they are
1374 * overridden by the ttbcr/pte.
1375 */
1376 if (stage1) {
1377 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1378 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001379 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1380 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001381 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001382 }
Will Deacon44680ee2014-06-25 11:29:12 +01001383 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001384
Will Deacon518f7132014-11-14 17:17:54 +00001385 /* TTBRs */
1386 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001387 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001388
Robin Murphyb94df6f2016-08-11 17:44:06 +01001389 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1390 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1391 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1392 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1393 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1394 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1395 } else {
1396 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1397 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1398 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1399 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1400 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1401 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1402 }
Will Deacon518f7132014-11-14 17:17:54 +00001403 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001404 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001405 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001406 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001407
Will Deacon518f7132014-11-14 17:17:54 +00001408 /* TTBCR */
1409 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001410 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1411 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1412 reg2 = 0;
1413 } else {
1414 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1415 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1416 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001417 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001418 if (smmu->version > ARM_SMMU_V1)
1419 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001420 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001421 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001422 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001423 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001424
Will Deacon518f7132014-11-14 17:17:54 +00001425 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001426 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001427 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1428 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1429 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1430 } else {
1431 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1432 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1433 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001434 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001435 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001436 }
1437
Will Deacon45ae7cf2013-06-24 18:31:25 +01001438 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001439 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001440
1441 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1442 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1443 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001444 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001445 if (stage1)
1446 reg |= SCTLR_S1_ASIDPNE;
1447#ifdef __BIG_ENDIAN
1448 reg |= SCTLR_E;
1449#endif
Will Deacon25724842013-08-21 13:49:53 +01001450 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001451}
1452
Patrick Dalyc190d932016-08-30 17:23:28 -07001453static int arm_smmu_init_asid(struct iommu_domain *domain,
1454 struct arm_smmu_device *smmu)
1455{
1456 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1457 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1458 bool dynamic = is_dynamic_domain(domain);
1459 int ret;
1460
1461 if (!dynamic) {
1462 cfg->asid = cfg->cbndx + 1;
1463 } else {
1464 mutex_lock(&smmu->idr_mutex);
1465 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1466 smmu->num_context_banks + 2,
1467 MAX_ASID + 1, GFP_KERNEL);
1468
1469 mutex_unlock(&smmu->idr_mutex);
1470 if (ret < 0) {
1471 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1472 ret);
1473 return ret;
1474 }
1475 cfg->asid = ret;
1476 }
1477 return 0;
1478}
1479
1480static void arm_smmu_free_asid(struct iommu_domain *domain)
1481{
1482 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1483 struct arm_smmu_device *smmu = smmu_domain->smmu;
1484 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1485 bool dynamic = is_dynamic_domain(domain);
1486
1487 if (cfg->asid == INVALID_ASID || !dynamic)
1488 return;
1489
1490 mutex_lock(&smmu->idr_mutex);
1491 idr_remove(&smmu->asid_idr, cfg->asid);
1492 mutex_unlock(&smmu->idr_mutex);
1493}
1494
Will Deacon45ae7cf2013-06-24 18:31:25 +01001495static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001496 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001497{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001498 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001499 unsigned long ias, oas;
1500 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001501 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001502 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001503 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001504 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001505 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001506 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001507
Will Deacon518f7132014-11-14 17:17:54 +00001508 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001509 if (smmu_domain->smmu)
1510 goto out_unlock;
1511
Patrick Dalyc190d932016-08-30 17:23:28 -07001512 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1513 smmu_domain->cfg.asid = INVALID_ASID;
1514
Patrick Dalyc190d932016-08-30 17:23:28 -07001515 dynamic = is_dynamic_domain(domain);
1516 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1517 dev_err(smmu->dev, "dynamic domains not supported\n");
1518 ret = -EPERM;
1519 goto out_unlock;
1520 }
1521
Will Deaconc752ce42014-06-25 22:46:31 +01001522 /*
1523 * Mapping the requested stage onto what we support is surprisingly
1524 * complicated, mainly because the spec allows S1+S2 SMMUs without
1525 * support for nested translation. That means we end up with the
1526 * following table:
1527 *
1528 * Requested Supported Actual
1529 * S1 N S1
1530 * S1 S1+S2 S1
1531 * S1 S2 S2
1532 * S1 S1 S1
1533 * N N N
1534 * N S1+S2 S2
1535 * N S2 S2
1536 * N S1 S1
1537 *
1538 * Note that you can't actually request stage-2 mappings.
1539 */
1540 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1541 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1542 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1543 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1544
Robin Murphy7602b872016-04-28 17:12:09 +01001545 /*
1546 * Choosing a suitable context format is even more fiddly. Until we
1547 * grow some way for the caller to express a preference, and/or move
1548 * the decision into the io-pgtable code where it arguably belongs,
1549 * just aim for the closest thing to the rest of the system, and hope
1550 * that the hardware isn't esoteric enough that we can't assume AArch64
1551 * support to be a superset of AArch32 support...
1552 */
1553 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1554 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001555 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1556 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1557 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1558 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1559 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001560 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1561 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1562 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1563 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1564 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1565
1566 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1567 ret = -EINVAL;
1568 goto out_unlock;
1569 }
1570
Will Deaconc752ce42014-06-25 22:46:31 +01001571 switch (smmu_domain->stage) {
1572 case ARM_SMMU_DOMAIN_S1:
1573 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1574 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001575 ias = smmu->va_size;
1576 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001577 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001578 fmt = ARM_64_LPAE_S1;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001579 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001580 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001581 ias = min(ias, 32UL);
1582 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001583 } else {
1584 fmt = ARM_V7S;
1585 ias = min(ias, 32UL);
1586 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001587 }
Will Deaconc752ce42014-06-25 22:46:31 +01001588 break;
1589 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001590 /*
1591 * We will likely want to change this if/when KVM gets
1592 * involved.
1593 */
Will Deaconc752ce42014-06-25 22:46:31 +01001594 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001595 cfg->cbar = CBAR_TYPE_S2_TRANS;
1596 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001597 ias = smmu->ipa_size;
1598 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001599 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001600 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001601 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001602 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001603 ias = min(ias, 40UL);
1604 oas = min(oas, 40UL);
1605 }
Will Deaconc752ce42014-06-25 22:46:31 +01001606 break;
1607 default:
1608 ret = -EINVAL;
1609 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001610 }
1611
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001612 if (is_fast)
1613 fmt = ARM_V8L_FAST;
1614
Patrick Dalyce6786f2016-11-09 14:19:23 -08001615 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1616 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001617 if (is_iommu_pt_coherent(smmu_domain))
1618 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001619
Patrick Dalyc190d932016-08-30 17:23:28 -07001620 /* Dynamic domains must set cbndx through domain attribute */
1621 if (!dynamic) {
1622 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001623 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001624 if (ret < 0)
1625 goto out_unlock;
1626 cfg->cbndx = ret;
1627 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001628 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001629 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1630 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001631 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001632 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001633 }
1634
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001635 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001636 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001637 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001638 .ias = ias,
1639 .oas = oas,
1640 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001641 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001642 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001643
Will Deacon518f7132014-11-14 17:17:54 +00001644 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001645 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1646 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001647 if (!pgtbl_ops) {
1648 ret = -ENOMEM;
1649 goto out_clear_smmu;
1650 }
1651
Patrick Dalyc11d1082016-09-01 15:52:44 -07001652 /*
1653 * assign any page table memory that might have been allocated
1654 * during alloc_io_pgtable_ops
1655 */
Patrick Dalye271f212016-10-04 13:24:49 -07001656 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001657 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001658 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001659
Robin Murphyd5466352016-05-09 17:20:09 +01001660 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001661 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001662 domain->geometry.aperture_end = (1UL << ias) - 1;
1663 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001664
Patrick Dalyc190d932016-08-30 17:23:28 -07001665 /* Assign an asid */
1666 ret = arm_smmu_init_asid(domain, smmu);
1667 if (ret)
1668 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001669
Patrick Dalyc190d932016-08-30 17:23:28 -07001670 if (!dynamic) {
1671 /* Initialise the context bank with our page table cfg */
1672 arm_smmu_init_context_bank(smmu_domain,
1673 &smmu_domain->pgtbl_cfg);
1674
1675 /*
1676 * Request context fault interrupt. Do this last to avoid the
1677 * handler seeing a half-initialised domain state.
1678 */
1679 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1680 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001681 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1682 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001683 if (ret < 0) {
1684 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1685 cfg->irptndx, irq);
1686 cfg->irptndx = INVALID_IRPTNDX;
1687 goto out_clear_smmu;
1688 }
1689 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001690 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001691 }
Will Deacon518f7132014-11-14 17:17:54 +00001692 mutex_unlock(&smmu_domain->init_mutex);
1693
1694 /* Publish page table ops for map/unmap */
1695 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001696 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001697
Will Deacon518f7132014-11-14 17:17:54 +00001698out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001699 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001700 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001701out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001702 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001703 return ret;
1704}
1705
Patrick Daly77db4f92016-10-14 15:34:10 -07001706static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1707{
1708 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1709 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1710 smmu_domain->secure_vmid = VMID_INVAL;
1711}
1712
Will Deacon45ae7cf2013-06-24 18:31:25 +01001713static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1714{
Joerg Roedel1d672632015-03-26 13:43:10 +01001715 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001716 struct arm_smmu_device *smmu = smmu_domain->smmu;
1717 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001718 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001720 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001721 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001722
Robin Murphy7e96c742016-09-14 15:26:46 +01001723 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001724 return;
1725
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001726 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001727 if (ret) {
1728 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1729 smmu);
1730 return;
1731 }
1732
Patrick Dalyc190d932016-08-30 17:23:28 -07001733 dynamic = is_dynamic_domain(domain);
1734 if (dynamic) {
1735 arm_smmu_free_asid(domain);
1736 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001737 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001738 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001739 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001740 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001741 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001742 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001743 return;
1744 }
1745
Will Deacon518f7132014-11-14 17:17:54 +00001746 /*
1747 * Disable the context bank and free the page tables before freeing
1748 * it.
1749 */
Will Deacon44680ee2014-06-25 11:29:12 +01001750 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001751 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001752
Will Deacon44680ee2014-06-25 11:29:12 +01001753 if (cfg->irptndx != INVALID_IRPTNDX) {
1754 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001755 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001756 }
1757
Markus Elfring44830b02015-11-06 18:32:41 +01001758 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001759 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001760 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001761 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001762 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001763 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001764
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001765 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001766 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001767}
1768
Joerg Roedel1d672632015-03-26 13:43:10 +01001769static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001770{
1771 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772
Patrick Daly09801312016-08-29 17:02:52 -07001773 /* Do not support DOMAIN_DMA for now */
1774 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001775 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001776 /*
1777 * Allocate the domain and initialise some of its data structures.
1778 * We can't really do anything meaningful until we've added a
1779 * master.
1780 */
1781 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1782 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001783 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001784
Robin Murphy7e96c742016-09-14 15:26:46 +01001785 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1786 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001787 kfree(smmu_domain);
1788 return NULL;
1789 }
1790
Will Deacon518f7132014-11-14 17:17:54 +00001791 mutex_init(&smmu_domain->init_mutex);
1792 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001793 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1794 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001795 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001796 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001797 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001798
1799 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001800}
1801
Joerg Roedel1d672632015-03-26 13:43:10 +01001802static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001803{
Joerg Roedel1d672632015-03-26 13:43:10 +01001804 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001805
1806 /*
1807 * Free the domain resources. We assume that all devices have
1808 * already been detached.
1809 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001810 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001811 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001812 kfree(smmu_domain);
1813}
1814
Robin Murphy468f4942016-09-12 17:13:49 +01001815static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1816{
1817 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01001818 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01001819
1820 if (smr->valid)
1821 reg |= SMR_VALID;
1822 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1823}
1824
Robin Murphya754fd12016-09-12 17:13:50 +01001825static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1826{
1827 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1828 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1829 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1830 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1831
1832 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1833}
1834
1835static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1836{
1837 arm_smmu_write_s2cr(smmu, idx);
1838 if (smmu->smrs)
1839 arm_smmu_write_smr(smmu, idx);
1840}
1841
Robin Murphy6668f692016-09-12 17:13:54 +01001842static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01001843{
1844 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01001845 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001846
Robin Murphy6668f692016-09-12 17:13:54 +01001847 /* Stream indexing is blissfully easy */
1848 if (!smrs)
1849 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01001850
Robin Murphy6668f692016-09-12 17:13:54 +01001851 /* Validating SMRs is... less so */
1852 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1853 if (!smrs[i].valid) {
1854 /*
1855 * Note the first free entry we come across, which
1856 * we'll claim in the end if nothing else matches.
1857 */
1858 if (free_idx < 0)
1859 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01001860 continue;
1861 }
Robin Murphy6668f692016-09-12 17:13:54 +01001862 /*
1863 * If the new entry is _entirely_ matched by an existing entry,
1864 * then reuse that, with the guarantee that there also cannot
1865 * be any subsequent conflicting entries. In normal use we'd
1866 * expect simply identical entries for this case, but there's
1867 * no harm in accommodating the generalisation.
1868 */
1869 if ((mask & smrs[i].mask) == mask &&
1870 !((id ^ smrs[i].id) & ~smrs[i].mask))
1871 return i;
1872 /*
1873 * If the new entry has any other overlap with an existing one,
1874 * though, then there always exists at least one stream ID
1875 * which would cause a conflict, and we can't allow that risk.
1876 */
1877 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1878 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001879 }
1880
Robin Murphy6668f692016-09-12 17:13:54 +01001881 return free_idx;
1882}
1883
1884static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1885{
1886 if (--smmu->s2crs[idx].count)
1887 return false;
1888
1889 smmu->s2crs[idx] = s2cr_init_val;
1890 if (smmu->smrs)
1891 smmu->smrs[idx].valid = false;
1892
1893 return true;
1894}
1895
1896static int arm_smmu_master_alloc_smes(struct device *dev)
1897{
Robin Murphy06e393e2016-09-12 17:13:55 +01001898 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1899 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01001900 struct arm_smmu_device *smmu = cfg->smmu;
1901 struct arm_smmu_smr *smrs = smmu->smrs;
1902 struct iommu_group *group;
1903 int i, idx, ret;
1904
1905 mutex_lock(&smmu->stream_map_mutex);
1906 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01001907 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001908 u16 sid = fwspec->ids[i];
1909 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1910
Robin Murphy6668f692016-09-12 17:13:54 +01001911 if (idx != INVALID_SMENDX) {
1912 ret = -EEXIST;
1913 goto out_err;
1914 }
1915
Robin Murphy7e96c742016-09-14 15:26:46 +01001916 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01001917 if (ret < 0)
1918 goto out_err;
1919
1920 idx = ret;
1921 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001922 smrs[idx].id = sid;
1923 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01001924 smrs[idx].valid = true;
1925 }
1926 smmu->s2crs[idx].count++;
1927 cfg->smendx[i] = (s16)idx;
1928 }
1929
1930 group = iommu_group_get_for_dev(dev);
1931 if (!group)
1932 group = ERR_PTR(-ENOMEM);
1933 if (IS_ERR(group)) {
1934 ret = PTR_ERR(group);
1935 goto out_err;
1936 }
1937 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01001938
Will Deacon45ae7cf2013-06-24 18:31:25 +01001939 /* It worked! Now, poke the actual hardware */
Robin Murphy06e393e2016-09-12 17:13:55 +01001940 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001941 arm_smmu_write_sme(smmu, idx);
1942 smmu->s2crs[idx].group = group;
1943 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001944
Robin Murphy6668f692016-09-12 17:13:54 +01001945 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001946 return 0;
1947
Robin Murphy6668f692016-09-12 17:13:54 +01001948out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01001949 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01001950 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01001951 cfg->smendx[i] = INVALID_SMENDX;
1952 }
Robin Murphy6668f692016-09-12 17:13:54 +01001953 mutex_unlock(&smmu->stream_map_mutex);
1954 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001955}
1956
Robin Murphy06e393e2016-09-12 17:13:55 +01001957static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001958{
Robin Murphy06e393e2016-09-12 17:13:55 +01001959 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1960 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01001961 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001962
Robin Murphy6668f692016-09-12 17:13:54 +01001963 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01001964 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001965 if (arm_smmu_free_sme(smmu, idx))
1966 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01001967 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001968 }
Robin Murphy6668f692016-09-12 17:13:54 +01001969 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001970}
1971
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01001973 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001974{
Will Deacon44680ee2014-06-25 11:29:12 +01001975 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01001976 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1977 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1978 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01001979 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980
Robin Murphy06e393e2016-09-12 17:13:55 +01001981 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphya754fd12016-09-12 17:13:50 +01001982 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy6668f692016-09-12 17:13:54 +01001983 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01001984
1985 s2cr[idx].type = type;
1986 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1987 s2cr[idx].cbndx = cbndx;
1988 arm_smmu_write_s2cr(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989 }
1990
1991 return 0;
1992}
1993
Patrick Daly09801312016-08-29 17:02:52 -07001994static void arm_smmu_detach_dev(struct iommu_domain *domain,
1995 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001996{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001997 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001998 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Daly09801312016-08-29 17:02:52 -07001999 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002000 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002001
2002 if (dynamic)
2003 return;
2004
Patrick Daly09801312016-08-29 17:02:52 -07002005 if (!smmu) {
2006 dev_err(dev, "Domain not attached; cannot detach!\n");
2007 return;
2008 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002009
Patrick Daly8befb662016-08-17 20:03:28 -07002010 /* Remove additional vote for atomic power */
2011 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002012 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
2013 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002014 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002015}
2016
Patrick Dalye271f212016-10-04 13:24:49 -07002017static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002018{
Patrick Dalye271f212016-10-04 13:24:49 -07002019 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002020 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2021 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2022 int source_vmid = VMID_HLOS;
2023 struct arm_smmu_pte_info *pte_info, *temp;
2024
Patrick Dalye271f212016-10-04 13:24:49 -07002025 if (!arm_smmu_is_domain_secure(smmu_domain))
2026 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002027
Patrick Dalye271f212016-10-04 13:24:49 -07002028 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002029 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2030 PAGE_SIZE, &source_vmid, 1,
2031 dest_vmids, dest_perms, 2);
2032 if (WARN_ON(ret))
2033 break;
2034 }
2035
2036 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2037 entry) {
2038 list_del(&pte_info->entry);
2039 kfree(pte_info);
2040 }
Patrick Dalye271f212016-10-04 13:24:49 -07002041 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002042}
2043
2044static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2045{
2046 int ret;
2047 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002048 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002049 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2050 struct arm_smmu_pte_info *pte_info, *temp;
2051
Patrick Dalye271f212016-10-04 13:24:49 -07002052 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002053 return;
2054
2055 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2056 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2057 PAGE_SIZE, source_vmlist, 2,
2058 &dest_vmids, &dest_perms, 1);
2059 if (WARN_ON(ret))
2060 break;
2061 free_pages_exact(pte_info->virt_addr, pte_info->size);
2062 }
2063
2064 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2065 entry) {
2066 list_del(&pte_info->entry);
2067 kfree(pte_info);
2068 }
2069}
2070
2071static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2072{
2073 struct arm_smmu_domain *smmu_domain = cookie;
2074 struct arm_smmu_pte_info *pte_info;
2075
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002076 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002077
2078 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2079 if (!pte_info)
2080 return;
2081
2082 pte_info->virt_addr = addr;
2083 pte_info->size = size;
2084 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2085}
2086
2087static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2088{
2089 struct arm_smmu_domain *smmu_domain = cookie;
2090 struct arm_smmu_pte_info *pte_info;
2091
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002092 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002093
2094 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2095 if (!pte_info)
2096 return -ENOMEM;
2097 pte_info->virt_addr = addr;
2098 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2099 return 0;
2100}
2101
Will Deacon45ae7cf2013-06-24 18:31:25 +01002102static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2103{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002104 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002105 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002106 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002107 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002108 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002109
Robin Murphy06e393e2016-09-12 17:13:55 +01002110 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002111 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2112 return -ENXIO;
2113 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002114
Robin Murphy4f79b142016-10-17 12:06:21 +01002115 /*
2116 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2117 * domains between of_xlate() and add_device() - we have no way to cope
2118 * with that, so until ARM gets converted to rely on groups and default
2119 * domains, just say no (but more politely than by dereferencing NULL).
2120 * This should be at least a WARN_ON once that's sorted.
2121 */
2122 if (!fwspec->iommu_priv)
2123 return -ENODEV;
2124
Robin Murphy06e393e2016-09-12 17:13:55 +01002125 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002126
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002127 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002128 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002129 if (ret)
2130 return ret;
2131
Will Deacon518f7132014-11-14 17:17:54 +00002132 /* Ensure that the domain is finalised */
Robin Murphy06e393e2016-09-12 17:13:55 +01002133 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002134 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002135 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002136
Patrick Dalyc190d932016-08-30 17:23:28 -07002137 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002138 if (is_dynamic_domain(domain)) {
2139 ret = 0;
2140 goto out_power_off;
2141 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002142
Will Deacon45ae7cf2013-06-24 18:31:25 +01002143 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002144 * Sanity check the domain. We don't support domains across
2145 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002146 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002147 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002148 dev_err(dev,
2149 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002150 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002151 ret = -EINVAL;
2152 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002153 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002154
2155 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002156 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002157
2158out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002159 /*
2160 * Keep an additional vote for non-atomic power until domain is
2161 * detached
2162 */
2163 if (!ret && atomic_domain) {
2164 WARN_ON(arm_smmu_power_on(smmu->pwr));
2165 arm_smmu_power_off_atomic(smmu->pwr);
2166 }
2167
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002168 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002169
Will Deacon45ae7cf2013-06-24 18:31:25 +01002170 return ret;
2171}
2172
Will Deacon45ae7cf2013-06-24 18:31:25 +01002173static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002174 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002175{
Will Deacon518f7132014-11-14 17:17:54 +00002176 int ret;
2177 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002178 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002179 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002180
Will Deacon518f7132014-11-14 17:17:54 +00002181 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002182 return -ENODEV;
2183
Patrick Dalye271f212016-10-04 13:24:49 -07002184 arm_smmu_secure_domain_lock(smmu_domain);
2185
Will Deacon518f7132014-11-14 17:17:54 +00002186 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2187 ret = ops->map(ops, iova, paddr, size, prot);
2188 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002189
2190 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002191 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002192
Will Deacon518f7132014-11-14 17:17:54 +00002193 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002194}
2195
2196static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2197 size_t size)
2198{
Will Deacon518f7132014-11-14 17:17:54 +00002199 size_t ret;
2200 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002201 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002202 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002203
Will Deacon518f7132014-11-14 17:17:54 +00002204 if (!ops)
2205 return 0;
2206
Patrick Daly8befb662016-08-17 20:03:28 -07002207 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002208 if (ret)
2209 return ret;
2210
Patrick Dalye271f212016-10-04 13:24:49 -07002211 arm_smmu_secure_domain_lock(smmu_domain);
2212
Will Deacon518f7132014-11-14 17:17:54 +00002213 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2214 ret = ops->unmap(ops, iova, size);
2215 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002216
Patrick Daly8befb662016-08-17 20:03:28 -07002217 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002218 /*
2219 * While splitting up block mappings, we might allocate page table
2220 * memory during unmap, so the vmids needs to be assigned to the
2221 * memory here as well.
2222 */
2223 arm_smmu_assign_table(smmu_domain);
2224 /* Also unassign any pages that were free'd during unmap */
2225 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002226 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002227 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002228}
2229
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002230static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2231 struct scatterlist *sg, unsigned int nents, int prot)
2232{
2233 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002234 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002235 unsigned long flags;
2236 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2237 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2238
2239 if (!ops)
2240 return -ENODEV;
2241
Patrick Daly8befb662016-08-17 20:03:28 -07002242 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002243 if (ret)
2244 return ret;
2245
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002246 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002247 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002248 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002249
2250 if (!ret)
2251 arm_smmu_unmap(domain, iova, size);
2252
Patrick Daly8befb662016-08-17 20:03:28 -07002253 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002254 arm_smmu_assign_table(smmu_domain);
2255
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002256 return ret;
2257}
2258
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002259static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002260 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002261{
Joerg Roedel1d672632015-03-26 13:43:10 +01002262 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002263 struct arm_smmu_device *smmu = smmu_domain->smmu;
2264 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2265 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2266 struct device *dev = smmu->dev;
2267 void __iomem *cb_base;
2268 u32 tmp;
2269 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002270 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002271
2272 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2273
Robin Murphy661d9622015-05-27 17:09:34 +01002274 /* ATS1 registers can only be written atomically */
2275 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002276 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002277 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2278 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002279 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002280
2281 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2282 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002283 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002284 dev_err(dev,
2285 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2286 &iova, &phys);
2287 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002288 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002289 }
2290
Robin Murphyf9a05f02016-04-13 18:13:01 +01002291 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002292 if (phys & CB_PAR_F) {
2293 dev_err(dev, "translation fault!\n");
2294 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002295 phys = 0;
2296 } else {
2297 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002298 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002299
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002300 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002301}
2302
Will Deacon45ae7cf2013-06-24 18:31:25 +01002303static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002304 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002305{
Will Deacon518f7132014-11-14 17:17:54 +00002306 phys_addr_t ret;
2307 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002308 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002309 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002310
Will Deacon518f7132014-11-14 17:17:54 +00002311 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002312 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002313
Will Deacon518f7132014-11-14 17:17:54 +00002314 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002315 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002316 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002317
Will Deacon518f7132014-11-14 17:17:54 +00002318 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002319}
2320
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002321/*
2322 * This function can sleep, and cannot be called from atomic context. Will
2323 * power on register block if required. This restriction does not apply to the
2324 * original iova_to_phys() op.
2325 */
2326static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2327 dma_addr_t iova)
2328{
2329 phys_addr_t ret = 0;
2330 unsigned long flags;
2331 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002332
Patrick Dalyad441dd2016-09-15 15:50:46 -07002333 if (smmu_domain->smmu->arch_ops &&
2334 smmu_domain->smmu->arch_ops->iova_to_phys_hard)
2335 return smmu_domain->smmu->arch_ops->iova_to_phys_hard(
2336 domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002337
2338 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2339 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2340 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002341 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002342
2343 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2344
2345 return ret;
2346}
2347
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002348static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002349{
Will Deacond0948942014-06-24 17:30:10 +01002350 switch (cap) {
2351 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002352 /*
2353 * Return true here as the SMMU can always send out coherent
2354 * requests.
2355 */
2356 return true;
Will Deacond0948942014-06-24 17:30:10 +01002357 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002358 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002359 case IOMMU_CAP_NOEXEC:
2360 return true;
Will Deacond0948942014-06-24 17:30:10 +01002361 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002362 return false;
Will Deacond0948942014-06-24 17:30:10 +01002363 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002364}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002365
Patrick Daly8e3371a2017-02-13 22:14:53 -08002366static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2367{
2368 struct arm_smmu_device *smmu;
2369 unsigned long flags;
2370
2371 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2372 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2373 if (smmu->dev->of_node == np) {
2374 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2375 return smmu;
2376 }
2377 }
2378 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2379 return NULL;
2380}
2381
Robin Murphy7e96c742016-09-14 15:26:46 +01002382static int arm_smmu_match_node(struct device *dev, void *data)
2383{
2384 return dev->of_node == data;
2385}
2386
2387static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2388{
2389 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2390 np, arm_smmu_match_node);
2391 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002392 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002393}
2394
Will Deacon03edb222015-01-19 14:27:33 +00002395static int arm_smmu_add_device(struct device *dev)
2396{
Robin Murphy06e393e2016-09-12 17:13:55 +01002397 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002398 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002399 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002400 int i, ret;
2401
Robin Murphy7e96c742016-09-14 15:26:46 +01002402 if (using_legacy_binding) {
2403 ret = arm_smmu_register_legacy_master(dev, &smmu);
2404 fwspec = dev->iommu_fwspec;
2405 if (ret)
2406 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002407 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002408 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2409 if (!smmu)
2410 return -ENODEV;
2411 } else {
2412 return -ENODEV;
2413 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002414
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002415 ret = arm_smmu_power_on(smmu->pwr);
2416 if (ret)
2417 goto out_free;
2418
Robin Murphyd5b41782016-09-14 15:21:39 +01002419 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002420 for (i = 0; i < fwspec->num_ids; i++) {
2421 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002422 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002423
Robin Murphy06e393e2016-09-12 17:13:55 +01002424 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002425 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002426 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002427 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002428 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002429 if (mask & ~smmu->smr_mask_mask) {
2430 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2431 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002432 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002433 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002434 }
Will Deacon03edb222015-01-19 14:27:33 +00002435
Robin Murphy06e393e2016-09-12 17:13:55 +01002436 ret = -ENOMEM;
2437 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2438 GFP_KERNEL);
2439 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002440 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002441
2442 cfg->smmu = smmu;
2443 fwspec->iommu_priv = cfg;
2444 while (i--)
2445 cfg->smendx[i] = INVALID_SMENDX;
2446
Robin Murphy6668f692016-09-12 17:13:54 +01002447 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002448 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002449 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002450
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002451 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002452 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002453
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002454out_pwr_off:
2455 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002456out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002457 if (fwspec)
2458 kfree(fwspec->iommu_priv);
2459 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002460 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002461}
2462
Will Deacon45ae7cf2013-06-24 18:31:25 +01002463static void arm_smmu_remove_device(struct device *dev)
2464{
Robin Murphy06e393e2016-09-12 17:13:55 +01002465 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002466 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002467
Robin Murphy06e393e2016-09-12 17:13:55 +01002468 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002469 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002470
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002471 smmu = fwspec_smmu(fwspec);
2472 if (arm_smmu_power_on(smmu->pwr)) {
2473 WARN_ON(1);
2474 return;
2475 }
2476
Robin Murphy06e393e2016-09-12 17:13:55 +01002477 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002478 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002479 kfree(fwspec->iommu_priv);
2480 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002481 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002482}
2483
Joerg Roedelaf659932015-10-21 23:51:41 +02002484static struct iommu_group *arm_smmu_device_group(struct device *dev)
2485{
Robin Murphy06e393e2016-09-12 17:13:55 +01002486 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2487 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002488 struct iommu_group *group = NULL;
2489 int i, idx;
2490
Robin Murphy06e393e2016-09-12 17:13:55 +01002491 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002492 if (group && smmu->s2crs[idx].group &&
2493 group != smmu->s2crs[idx].group)
2494 return ERR_PTR(-EINVAL);
2495
2496 group = smmu->s2crs[idx].group;
2497 }
2498
2499 if (group)
2500 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02002501
2502 if (dev_is_pci(dev))
2503 group = pci_device_group(dev);
2504 else
2505 group = generic_device_group(dev);
2506
Joerg Roedelaf659932015-10-21 23:51:41 +02002507 return group;
2508}
2509
Will Deaconc752ce42014-06-25 22:46:31 +01002510static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2511 enum iommu_attr attr, void *data)
2512{
Joerg Roedel1d672632015-03-26 13:43:10 +01002513 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002514 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002515
2516 switch (attr) {
2517 case DOMAIN_ATTR_NESTING:
2518 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2519 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002520 case DOMAIN_ATTR_PT_BASE_ADDR:
2521 *((phys_addr_t *)data) =
2522 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2523 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002524 case DOMAIN_ATTR_CONTEXT_BANK:
2525 /* context bank index isn't valid until we are attached */
2526 if (smmu_domain->smmu == NULL)
2527 return -ENODEV;
2528
2529 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2530 ret = 0;
2531 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002532 case DOMAIN_ATTR_TTBR0: {
2533 u64 val;
2534 struct arm_smmu_device *smmu = smmu_domain->smmu;
2535 /* not valid until we are attached */
2536 if (smmu == NULL)
2537 return -ENODEV;
2538
2539 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2540 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2541 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2542 << (TTBRn_ASID_SHIFT);
2543 *((u64 *)data) = val;
2544 ret = 0;
2545 break;
2546 }
2547 case DOMAIN_ATTR_CONTEXTIDR:
2548 /* not valid until attached */
2549 if (smmu_domain->smmu == NULL)
2550 return -ENODEV;
2551 *((u32 *)data) = smmu_domain->cfg.procid;
2552 ret = 0;
2553 break;
2554 case DOMAIN_ATTR_PROCID:
2555 *((u32 *)data) = smmu_domain->cfg.procid;
2556 ret = 0;
2557 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002558 case DOMAIN_ATTR_DYNAMIC:
2559 *((int *)data) = !!(smmu_domain->attributes
2560 & (1 << DOMAIN_ATTR_DYNAMIC));
2561 ret = 0;
2562 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002563 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2564 *((int *)data) = !!(smmu_domain->attributes
2565 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2566 ret = 0;
2567 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002568 case DOMAIN_ATTR_S1_BYPASS:
2569 *((int *)data) = !!(smmu_domain->attributes
2570 & (1 << DOMAIN_ATTR_S1_BYPASS));
2571 ret = 0;
2572 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002573 case DOMAIN_ATTR_SECURE_VMID:
2574 *((int *)data) = smmu_domain->secure_vmid;
2575 ret = 0;
2576 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002577 case DOMAIN_ATTR_PGTBL_INFO: {
2578 struct iommu_pgtbl_info *info = data;
2579
2580 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2581 ret = -ENODEV;
2582 break;
2583 }
2584 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2585 ret = 0;
2586 break;
2587 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002588 case DOMAIN_ATTR_FAST:
2589 *((int *)data) = !!(smmu_domain->attributes
2590 & (1 << DOMAIN_ATTR_FAST));
2591 ret = 0;
2592 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002593 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2594 *((int *)data) = !!(smmu_domain->attributes &
2595 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2596 ret = 0;
2597 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002598 case DOMAIN_ATTR_EARLY_MAP:
2599 *((int *)data) = !!(smmu_domain->attributes
2600 & (1 << DOMAIN_ATTR_EARLY_MAP));
2601 ret = 0;
2602 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002603 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
2604 if (!smmu_domain->smmu)
2605 return -ENODEV;
Liam Mark53cf2342016-12-20 11:36:07 -08002606 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
2607 ret = 0;
2608 break;
2609 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
2610 *((int *)data) = !!(smmu_domain->attributes
2611 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002612 ret = 0;
2613 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002614 default:
2615 return -ENODEV;
2616 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002617 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002618}
2619
2620static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2621 enum iommu_attr attr, void *data)
2622{
Will Deacon518f7132014-11-14 17:17:54 +00002623 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002624 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002625
Will Deacon518f7132014-11-14 17:17:54 +00002626 mutex_lock(&smmu_domain->init_mutex);
2627
Will Deaconc752ce42014-06-25 22:46:31 +01002628 switch (attr) {
2629 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002630 if (smmu_domain->smmu) {
2631 ret = -EPERM;
2632 goto out_unlock;
2633 }
2634
Will Deaconc752ce42014-06-25 22:46:31 +01002635 if (*(int *)data)
2636 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2637 else
2638 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2639
Will Deacon518f7132014-11-14 17:17:54 +00002640 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002641 case DOMAIN_ATTR_PROCID:
2642 if (smmu_domain->smmu != NULL) {
2643 dev_err(smmu_domain->smmu->dev,
2644 "cannot change procid attribute while attached\n");
2645 ret = -EBUSY;
2646 break;
2647 }
2648 smmu_domain->cfg.procid = *((u32 *)data);
2649 ret = 0;
2650 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002651 case DOMAIN_ATTR_DYNAMIC: {
2652 int dynamic = *((int *)data);
2653
2654 if (smmu_domain->smmu != NULL) {
2655 dev_err(smmu_domain->smmu->dev,
2656 "cannot change dynamic attribute while attached\n");
2657 ret = -EBUSY;
2658 break;
2659 }
2660
2661 if (dynamic)
2662 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2663 else
2664 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2665 ret = 0;
2666 break;
2667 }
2668 case DOMAIN_ATTR_CONTEXT_BANK:
2669 /* context bank can't be set while attached */
2670 if (smmu_domain->smmu != NULL) {
2671 ret = -EBUSY;
2672 break;
2673 }
2674 /* ... and it can only be set for dynamic contexts. */
2675 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2676 ret = -EINVAL;
2677 break;
2678 }
2679
2680 /* this will be validated during attach */
2681 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2682 ret = 0;
2683 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002684 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2685 u32 non_fatal_faults = *((int *)data);
2686
2687 if (non_fatal_faults)
2688 smmu_domain->attributes |=
2689 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2690 else
2691 smmu_domain->attributes &=
2692 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2693 ret = 0;
2694 break;
2695 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002696 case DOMAIN_ATTR_S1_BYPASS: {
2697 int bypass = *((int *)data);
2698
2699 /* bypass can't be changed while attached */
2700 if (smmu_domain->smmu != NULL) {
2701 ret = -EBUSY;
2702 break;
2703 }
2704 if (bypass)
2705 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2706 else
2707 smmu_domain->attributes &=
2708 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2709
2710 ret = 0;
2711 break;
2712 }
Patrick Daly8befb662016-08-17 20:03:28 -07002713 case DOMAIN_ATTR_ATOMIC:
2714 {
2715 int atomic_ctx = *((int *)data);
2716
2717 /* can't be changed while attached */
2718 if (smmu_domain->smmu != NULL) {
2719 ret = -EBUSY;
2720 break;
2721 }
2722 if (atomic_ctx)
2723 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2724 else
2725 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2726 break;
2727 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002728 case DOMAIN_ATTR_SECURE_VMID:
2729 if (smmu_domain->secure_vmid != VMID_INVAL) {
2730 ret = -ENODEV;
2731 WARN(1, "secure vmid already set!");
2732 break;
2733 }
2734 smmu_domain->secure_vmid = *((int *)data);
2735 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002736 case DOMAIN_ATTR_FAST:
2737 if (*((int *)data))
2738 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2739 ret = 0;
2740 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002741 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2742 /* can't be changed while attached */
2743 if (smmu_domain->smmu != NULL) {
2744 ret = -EBUSY;
2745 break;
2746 }
2747 if (*((int *)data))
2748 smmu_domain->attributes |=
2749 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
2750 ret = 0;
2751 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002752 case DOMAIN_ATTR_EARLY_MAP: {
2753 int early_map = *((int *)data);
2754
2755 ret = 0;
2756 if (early_map) {
2757 smmu_domain->attributes |=
2758 1 << DOMAIN_ATTR_EARLY_MAP;
2759 } else {
2760 if (smmu_domain->smmu)
2761 ret = arm_smmu_enable_s1_translations(
2762 smmu_domain);
2763
2764 if (!ret)
2765 smmu_domain->attributes &=
2766 ~(1 << DOMAIN_ATTR_EARLY_MAP);
2767 }
2768 break;
2769 }
Liam Mark53cf2342016-12-20 11:36:07 -08002770 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
2771 int force_coherent = *((int *)data);
2772
2773 if (smmu_domain->smmu != NULL) {
2774 dev_err(smmu_domain->smmu->dev,
2775 "cannot change force coherent attribute while attached\n");
2776 ret = -EBUSY;
2777 break;
2778 }
2779
2780 if (force_coherent)
2781 smmu_domain->attributes |=
2782 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
2783 else
2784 smmu_domain->attributes &=
2785 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
2786
2787 ret = 0;
2788 break;
2789 }
2790
Will Deaconc752ce42014-06-25 22:46:31 +01002791 default:
Will Deacon518f7132014-11-14 17:17:54 +00002792 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002793 }
Will Deacon518f7132014-11-14 17:17:54 +00002794
2795out_unlock:
2796 mutex_unlock(&smmu_domain->init_mutex);
2797 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002798}
2799
Robin Murphy7e96c742016-09-14 15:26:46 +01002800static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
2801{
2802 u32 fwid = 0;
2803
2804 if (args->args_count > 0)
2805 fwid |= (u16)args->args[0];
2806
2807 if (args->args_count > 1)
2808 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
2809
2810 return iommu_fwspec_add_ids(dev, &fwid, 1);
2811}
2812
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002813static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
2814{
2815 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2816 struct arm_smmu_device *smmu = smmu_domain->smmu;
2817 void __iomem *cb_base;
2818 u32 reg;
2819 int ret;
2820
2821 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2822 ret = arm_smmu_power_on(smmu->pwr);
2823 if (ret)
2824 return ret;
2825
2826 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
2827 reg |= SCTLR_M;
2828
2829 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
2830 arm_smmu_power_off(smmu->pwr);
2831 return ret;
2832}
2833
Liam Mark3ba41cf2016-12-09 14:39:04 -08002834static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
2835 dma_addr_t iova)
2836{
2837 bool ret;
2838 unsigned long flags;
2839 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2840 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2841
2842 if (!ops)
2843 return false;
2844
2845 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2846 ret = ops->is_iova_coherent(ops, iova);
2847 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2848 return ret;
2849}
2850
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002851static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2852 unsigned long flags)
2853{
2854 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2855 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2856 struct arm_smmu_device *smmu;
2857 void __iomem *cb_base;
2858
2859 if (!smmu_domain->smmu) {
2860 pr_err("Can't trigger faults on non-attached domains\n");
2861 return;
2862 }
2863
2864 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002865 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002866 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002867
2868 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2869 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2870 flags, cfg->cbndx);
2871 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002872 /* give the interrupt time to fire... */
2873 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002874
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002875 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002876}
2877
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002878static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2879 unsigned long offset)
2880{
2881 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2882 struct arm_smmu_device *smmu;
2883 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2884 void __iomem *cb_base;
2885 unsigned long val;
2886
2887 if (offset >= SZ_4K) {
2888 pr_err("Invalid offset: 0x%lx\n", offset);
2889 return 0;
2890 }
2891
2892 smmu = smmu_domain->smmu;
2893 if (!smmu) {
2894 WARN(1, "Can't read registers of a detached domain\n");
2895 val = 0;
2896 return val;
2897 }
2898
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002899 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002900 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002901
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002902 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2903 val = readl_relaxed(cb_base + offset);
2904
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002905 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002906 return val;
2907}
2908
2909static void arm_smmu_reg_write(struct iommu_domain *domain,
2910 unsigned long offset, unsigned long val)
2911{
2912 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2913 struct arm_smmu_device *smmu;
2914 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2915 void __iomem *cb_base;
2916
2917 if (offset >= SZ_4K) {
2918 pr_err("Invalid offset: 0x%lx\n", offset);
2919 return;
2920 }
2921
2922 smmu = smmu_domain->smmu;
2923 if (!smmu) {
2924 WARN(1, "Can't read registers of a detached domain\n");
2925 return;
2926 }
2927
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002928 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002929 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002930
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002931 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2932 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002933
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002934 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002935}
2936
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002937static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
2938{
2939 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
2940}
2941
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002942static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
2943{
2944 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2945
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002946 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002947}
2948
2949static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
2950{
2951 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2952
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002953 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002954}
2955
Will Deacon518f7132014-11-14 17:17:54 +00002956static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002957 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002958 .domain_alloc = arm_smmu_domain_alloc,
2959 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002960 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002961 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002962 .map = arm_smmu_map,
2963 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002964 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002965 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002966 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002967 .add_device = arm_smmu_add_device,
2968 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002969 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002970 .domain_get_attr = arm_smmu_domain_get_attr,
2971 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01002972 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00002973 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002974 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002975 .reg_read = arm_smmu_reg_read,
2976 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002977 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002978 .enable_config_clocks = arm_smmu_enable_config_clocks,
2979 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08002980 .is_iova_coherent = arm_smmu_is_iova_coherent,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002981};
2982
Patrick Dalyad441dd2016-09-15 15:50:46 -07002983#define IMPL_DEF1_MICRO_MMU_CTRL 0
2984#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
2985#define MICRO_MMU_CTRL_IDLE (1 << 3)
2986
2987/* Definitions for implementation-defined registers */
2988#define ACTLR_QCOM_OSH_SHIFT 28
2989#define ACTLR_QCOM_OSH 1
2990
2991#define ACTLR_QCOM_ISH_SHIFT 29
2992#define ACTLR_QCOM_ISH 1
2993
2994#define ACTLR_QCOM_NSH_SHIFT 30
2995#define ACTLR_QCOM_NSH 1
2996
2997static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002998{
2999 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003000 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003001
3002 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3003 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3004 0, 30000)) {
3005 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3006 return -EBUSY;
3007 }
3008
3009 return 0;
3010}
3011
Patrick Dalyad441dd2016-09-15 15:50:46 -07003012static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003013{
3014 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3015 u32 reg;
3016
3017 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3018 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3019 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3020
Patrick Dalyad441dd2016-09-15 15:50:46 -07003021 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003022}
3023
Patrick Dalyad441dd2016-09-15 15:50:46 -07003024static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003025{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003026 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003027}
3028
Patrick Dalyad441dd2016-09-15 15:50:46 -07003029static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003030{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003031 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003032}
3033
Patrick Dalyad441dd2016-09-15 15:50:46 -07003034static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003035{
3036 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3037 u32 reg;
3038
3039 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3040 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3041 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3042}
3043
Patrick Dalyad441dd2016-09-15 15:50:46 -07003044static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003045{
3046 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003047 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003048 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003049 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003050
Patrick Dalyad441dd2016-09-15 15:50:46 -07003051 /*
3052 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3053 * to prevent table walks with an inconsistent state.
3054 */
3055 for (i = 0; i < smmu->num_context_banks; ++i) {
3056 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3057 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3058 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3059 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3060 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3061 }
3062
3063 /* Program implementation defined registers */
3064 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003065 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3066 writel_relaxed(regs[i].value,
3067 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003068 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003069}
3070
Patrick Dalyad441dd2016-09-15 15:50:46 -07003071static phys_addr_t __qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3072 dma_addr_t iova, bool halt)
3073{
3074 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3075 struct arm_smmu_device *smmu = smmu_domain->smmu;
3076 int ret;
3077 phys_addr_t phys = 0;
3078 unsigned long flags;
3079
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003080 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003081 if (ret)
3082 return 0;
3083
3084 if (halt) {
3085 ret = qsmmuv2_halt(smmu);
3086 if (ret)
3087 goto out_power_off;
3088 }
3089
3090 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3091 spin_lock(&smmu->atos_lock);
3092 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
3093 spin_unlock(&smmu->atos_lock);
3094 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3095
3096 if (halt)
3097 qsmmuv2_resume(smmu);
3098
3099out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003100 arm_smmu_power_off(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003101 return phys;
3102}
3103
3104static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3105 dma_addr_t iova)
3106{
3107 return __qsmmuv2_iova_to_phys_hard(domain, iova, true);
3108}
3109
3110static void qsmmuv2_iova_to_phys_fault(
3111 struct iommu_domain *domain,
3112 dma_addr_t iova, phys_addr_t *phys,
3113 phys_addr_t *phys_post_tlbiall)
3114{
3115 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3116 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3117 struct arm_smmu_device *smmu;
3118 void __iomem *cb_base;
3119 u64 sctlr, sctlr_orig;
3120 u32 fsr;
3121
3122 smmu = smmu_domain->smmu;
3123 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3124
3125 qsmmuv2_halt_nowait(smmu);
3126
3127 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
3128
3129 qsmmuv2_wait_for_halt(smmu);
3130
3131 /* clear FSR to allow ATOS to log any faults */
3132 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3133 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3134
3135 /* disable stall mode momentarily */
3136 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3137 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3138 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3139
3140 *phys = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
3141 arm_smmu_tlb_inv_context(smmu_domain);
3142 *phys_post_tlbiall = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
3143
3144 /* restore SCTLR */
3145 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3146
3147 qsmmuv2_resume(smmu);
3148}
3149
3150struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3151 .device_reset = qsmmuv2_device_reset,
3152 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
3153 .iova_to_phys_fault = qsmmuv2_iova_to_phys_fault,
3154};
3155
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003156static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003157{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003158 int i;
3159 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003160 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003161 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003162
Peng Fan3ca37122016-05-03 21:50:30 +08003163 /*
3164 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3165 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3166 * bit is only present in MMU-500r2 onwards.
3167 */
3168 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3169 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3170 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3171 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3172 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3173 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3174 }
3175
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003176 /* Make sure all context banks are disabled and clear CB_FSR */
3177 for (i = 0; i < smmu->num_context_banks; ++i) {
3178 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3179 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3180 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003181 /*
3182 * Disable MMU-500's not-particularly-beneficial next-page
3183 * prefetcher for the sake of errata #841119 and #826419.
3184 */
3185 if (smmu->model == ARM_MMU500) {
3186 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3187 reg &= ~ARM_MMU500_ACTLR_CPRE;
3188 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3189 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003190 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003191}
3192
3193static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3194{
3195 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003196 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003197 u32 reg;
3198
3199 /* clear global FSR */
3200 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3201 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3202
Robin Murphy468f4942016-09-12 17:13:49 +01003203 /*
3204 * Reset stream mapping groups: Initial values mark all SMRn as
3205 * invalid and all S2CRn as bypass unless overridden.
3206 */
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003207 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
Robin Murphya754fd12016-09-12 17:13:50 +01003208 for (i = 0; i < smmu->num_mapping_groups; ++i)
3209 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003210
3211 arm_smmu_context_bank_reset(smmu);
3212 }
Will Deacon1463fe42013-07-31 19:21:27 +01003213
Will Deacon45ae7cf2013-06-24 18:31:25 +01003214 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003215 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3216 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3217
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003218 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003219
Will Deacon45ae7cf2013-06-24 18:31:25 +01003220 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003221 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003222
3223 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003224 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003225
Robin Murphy25a1c962016-02-10 14:25:33 +00003226 /* Enable client access, handling unmatched streams as appropriate */
3227 reg &= ~sCR0_CLIENTPD;
3228 if (disable_bypass)
3229 reg |= sCR0_USFCFG;
3230 else
3231 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003232
3233 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003234 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003235
3236 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003237 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003238
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003239 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3240 reg |= sCR0_VMID16EN;
3241
Will Deacon45ae7cf2013-06-24 18:31:25 +01003242 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003243 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003244 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003245
3246 /* Manage any implementation defined features */
3247 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003248}
3249
3250static int arm_smmu_id_size_to_bits(int size)
3251{
3252 switch (size) {
3253 case 0:
3254 return 32;
3255 case 1:
3256 return 36;
3257 case 2:
3258 return 40;
3259 case 3:
3260 return 42;
3261 case 4:
3262 return 44;
3263 case 5:
3264 default:
3265 return 48;
3266 }
3267}
3268
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003269static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3270{
3271 struct device *dev = smmu->dev;
3272 int i, ntuples, ret;
3273 u32 *tuples;
3274 struct arm_smmu_impl_def_reg *regs, *regit;
3275
3276 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3277 return 0;
3278
3279 ntuples /= sizeof(u32);
3280 if (ntuples % 2) {
3281 dev_err(dev,
3282 "Invalid number of attach-impl-defs registers: %d\n",
3283 ntuples);
3284 return -EINVAL;
3285 }
3286
3287 regs = devm_kmalloc(
3288 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3289 GFP_KERNEL);
3290 if (!regs)
3291 return -ENOMEM;
3292
3293 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3294 if (!tuples)
3295 return -ENOMEM;
3296
3297 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3298 tuples, ntuples);
3299 if (ret)
3300 return ret;
3301
3302 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3303 regit->offset = tuples[i];
3304 regit->value = tuples[i + 1];
3305 }
3306
3307 devm_kfree(dev, tuples);
3308
3309 smmu->impl_def_attach_registers = regs;
3310 smmu->num_impl_def_attach_registers = ntuples / 2;
3311
3312 return 0;
3313}
3314
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003315
3316static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003317{
3318 const char *cname;
3319 struct property *prop;
3320 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003321 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003322
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003323 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003324 of_property_count_strings(dev->of_node, "clock-names");
3325
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003326 if (pwr->num_clocks < 1) {
3327 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003328 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003329 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003330
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003331 pwr->clocks = devm_kzalloc(
3332 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003333 GFP_KERNEL);
3334
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003335 if (!pwr->clocks)
3336 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003337
3338 i = 0;
3339 of_property_for_each_string(dev->of_node, "clock-names",
3340 prop, cname) {
3341 struct clk *c = devm_clk_get(dev, cname);
3342
3343 if (IS_ERR(c)) {
3344 dev_err(dev, "Couldn't get clock: %s",
3345 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003346 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003347 }
3348
3349 if (clk_get_rate(c) == 0) {
3350 long rate = clk_round_rate(c, 1000);
3351
3352 clk_set_rate(c, rate);
3353 }
3354
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003355 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003356
3357 ++i;
3358 }
3359 return 0;
3360}
3361
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003362static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003363{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003364 const char *cname;
3365 struct property *prop;
3366 int i, ret = 0;
3367 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003368
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003369 pwr->num_gdscs =
3370 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3371
3372 if (pwr->num_gdscs < 1) {
3373 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003374 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003375 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003376
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003377 pwr->gdscs = devm_kzalloc(
3378 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3379
3380 if (!pwr->gdscs)
3381 return -ENOMEM;
3382
3383 i = 0;
3384 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3385 prop, cname)
3386 pwr->gdscs[i].supply = cname;
3387
3388 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3389 return ret;
3390}
3391
3392static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3393{
3394 struct device *dev = pwr->dev;
3395
3396 /* We don't want the bus APIs to print an error message */
3397 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3398 dev_dbg(dev, "No bus scaling info\n");
3399 return 0;
3400 }
3401
3402 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3403 if (!pwr->bus_dt_data) {
3404 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3405 return -EINVAL;
3406 }
3407
3408 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3409 if (!pwr->bus_client) {
3410 dev_err(dev, "Bus client registration failed\n");
3411 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
3412 return -EINVAL;
3413 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003414
3415 return 0;
3416}
3417
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003418/*
3419 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3420 */
3421static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3422 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003423{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003424 struct arm_smmu_power_resources *pwr;
3425 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003426
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003427 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3428 if (!pwr)
3429 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003430
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003431 pwr->dev = &pdev->dev;
3432 pwr->pdev = pdev;
3433 mutex_init(&pwr->power_lock);
3434 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003435
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003436 ret = arm_smmu_init_clocks(pwr);
3437 if (ret)
3438 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003439
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003440 ret = arm_smmu_init_regulators(pwr);
3441 if (ret)
3442 return ERR_PTR(ret);
3443
3444 ret = arm_smmu_init_bus_scaling(pwr);
3445 if (ret)
3446 return ERR_PTR(ret);
3447
3448 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003449}
3450
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003451/*
3452 * Bus APIs are not devm-safe.
3453 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003454static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003455{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003456 msm_bus_scale_unregister_client(pwr->bus_client);
3457 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003458}
3459
Will Deacon45ae7cf2013-06-24 18:31:25 +01003460static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3461{
3462 unsigned long size;
3463 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3464 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003465 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003466 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003467
Mitchel Humpherysba822582015-10-20 11:37:41 -07003468 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3469 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003470 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003471
3472 /* ID0 */
3473 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003474
3475 /* Restrict available stages based on module parameter */
3476 if (force_stage == 1)
3477 id &= ~(ID0_S2TS | ID0_NTS);
3478 else if (force_stage == 2)
3479 id &= ~(ID0_S1TS | ID0_NTS);
3480
Will Deacon45ae7cf2013-06-24 18:31:25 +01003481 if (id & ID0_S1TS) {
3482 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003483 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003484 }
3485
3486 if (id & ID0_S2TS) {
3487 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003488 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003489 }
3490
3491 if (id & ID0_NTS) {
3492 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003493 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003494 }
3495
3496 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003497 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003498 dev_err(smmu->dev, "\tno translation support!\n");
3499 return -ENODEV;
3500 }
3501
Robin Murphyb7862e32016-04-13 18:13:03 +01003502 if ((id & ID0_S1TS) &&
3503 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003504 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003505 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003506 }
3507
Robin Murphybae2c2d2015-07-29 19:46:05 +01003508 /*
3509 * In order for DMA API calls to work properly, we must defer to what
3510 * the DT says about coherency, regardless of what the hardware claims.
3511 * Fortunately, this also opens up a workaround for systems where the
3512 * ID register value has ended up configured incorrectly.
3513 */
3514 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3515 cttw_reg = !!(id & ID0_CTTW);
3516 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003517 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003518 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003519 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003520 cttw_dt ? "" : "non-");
3521 if (cttw_dt != cttw_reg)
3522 dev_notice(smmu->dev,
3523 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003524
Robin Murphy53867802016-09-12 17:13:48 +01003525 /* Max. number of entries we have for stream matching/indexing */
3526 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3527 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003528 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003529 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003530
3531 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003532 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3533 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003534 dev_err(smmu->dev,
3535 "stream-matching supported, but no SMRs present!\n");
3536 return -ENODEV;
3537 }
3538
Robin Murphy53867802016-09-12 17:13:48 +01003539 /*
3540 * SMR.ID bits may not be preserved if the corresponding MASK
3541 * bits are set, so check each one separately. We can reject
3542 * masters later if they try to claim IDs outside these masks.
3543 */
3544 smr = smmu->streamid_mask << SMR_ID_SHIFT;
3545 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3546 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3547 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003548
Robin Murphy53867802016-09-12 17:13:48 +01003549 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
3550 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3551 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3552 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003553
Robin Murphy468f4942016-09-12 17:13:49 +01003554 /* Zero-initialised to mark as invalid */
3555 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3556 GFP_KERNEL);
3557 if (!smmu->smrs)
3558 return -ENOMEM;
3559
Robin Murphy53867802016-09-12 17:13:48 +01003560 dev_notice(smmu->dev,
3561 "\tstream matching with %lu register groups, mask 0x%x",
3562 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003563 }
Robin Murphya754fd12016-09-12 17:13:50 +01003564 /* s2cr->type == 0 means translation, so initialise explicitly */
3565 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3566 GFP_KERNEL);
3567 if (!smmu->s2crs)
3568 return -ENOMEM;
3569 for (i = 0; i < size; i++)
3570 smmu->s2crs[i] = s2cr_init_val;
3571
Robin Murphy53867802016-09-12 17:13:48 +01003572 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003573 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003574
Robin Murphy7602b872016-04-28 17:12:09 +01003575 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3576 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3577 if (!(id & ID0_PTFS_NO_AARCH32S))
3578 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3579 }
3580
Will Deacon45ae7cf2013-06-24 18:31:25 +01003581 /* ID1 */
3582 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003583 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003584
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003585 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003586 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003587 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003588 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003589 dev_warn(smmu->dev,
3590 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3591 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003592
Will Deacon518f7132014-11-14 17:17:54 +00003593 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003594 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3595 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3596 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3597 return -ENODEV;
3598 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003599 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003600 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003601 /*
3602 * Cavium CN88xx erratum #27704.
3603 * Ensure ASID and VMID allocation is unique across all SMMUs in
3604 * the system.
3605 */
3606 if (smmu->model == CAVIUM_SMMUV2) {
3607 smmu->cavium_id_base =
3608 atomic_add_return(smmu->num_context_banks,
3609 &cavium_smmu_context_count);
3610 smmu->cavium_id_base -= smmu->num_context_banks;
3611 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003612
3613 /* ID2 */
3614 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3615 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003616 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003617
Will Deacon518f7132014-11-14 17:17:54 +00003618 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003619 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003620 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003621
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003622 if (id & ID2_VMID16)
3623 smmu->features |= ARM_SMMU_FEAT_VMID16;
3624
Robin Murphyf1d84542015-03-04 16:41:05 +00003625 /*
3626 * What the page table walker can address actually depends on which
3627 * descriptor format is in use, but since a) we don't know that yet,
3628 * and b) it can vary per context bank, this will have to do...
3629 */
3630 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3631 dev_warn(smmu->dev,
3632 "failed to set DMA mask for table walker\n");
3633
Robin Murphyb7862e32016-04-13 18:13:03 +01003634 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003635 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003636 if (smmu->version == ARM_SMMU_V1_64K)
3637 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003638 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003639 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003640 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003641 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003642 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003643 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003644 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003645 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003646 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003647 }
3648
Robin Murphy7602b872016-04-28 17:12:09 +01003649 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003650 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003651 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003652 if (smmu->features &
3653 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003654 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003655 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003656 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003657 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003658 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003659
Robin Murphyd5466352016-05-09 17:20:09 +01003660 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3661 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3662 else
3663 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003664 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003665 smmu->pgsize_bitmap);
3666
Will Deacon518f7132014-11-14 17:17:54 +00003667
Will Deacon28d60072014-09-01 16:24:48 +01003668 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003669 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3670 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003671
3672 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003673 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3674 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003675
Will Deacon45ae7cf2013-06-24 18:31:25 +01003676 return 0;
3677}
3678
Patrick Dalyd7476202016-09-08 18:23:28 -07003679static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
3680{
3681 if (!smmu->arch_ops)
3682 return 0;
3683 if (!smmu->arch_ops->init)
3684 return 0;
3685 return smmu->arch_ops->init(smmu);
3686}
3687
3688static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
3689{
3690 if (!smmu->arch_ops)
3691 return;
3692 if (!smmu->arch_ops->device_reset)
3693 return;
3694 return smmu->arch_ops->device_reset(smmu);
3695}
3696
Robin Murphy67b65a32016-04-13 18:12:57 +01003697struct arm_smmu_match_data {
3698 enum arm_smmu_arch_version version;
3699 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003700 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01003701};
3702
Patrick Dalyd7476202016-09-08 18:23:28 -07003703#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
3704static struct arm_smmu_match_data name = { \
3705.version = ver, \
3706.model = imp, \
3707.arch_ops = ops, \
3708} \
Robin Murphy67b65a32016-04-13 18:12:57 +01003709
Patrick Daly1f8a2882016-09-12 17:32:05 -07003710struct arm_smmu_arch_ops qsmmuv500_arch_ops;
3711
Patrick Dalyd7476202016-09-08 18:23:28 -07003712ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
3713ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
3714ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
3715ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
3716ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003717ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07003718ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
3719 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01003720
Joerg Roedel09b52692014-10-02 12:24:45 +02003721static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003722 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3723 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3724 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003725 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003726 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003727 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003728 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07003729 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01003730 { },
3731};
3732MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3733
Patrick Dalyc47dcd42017-02-09 23:09:57 -08003734
3735static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
3736{
3737 if (!dev->iommu_fwspec)
3738 of_iommu_configure(dev, dev->of_node);
3739 return 0;
3740}
3741
Patrick Daly000a2f22017-02-13 22:18:12 -08003742static int arm_smmu_add_device_fixup(struct device *dev, void *data)
3743{
3744 struct iommu_ops *ops = data;
3745
3746 ops->add_device(dev);
3747 return 0;
3748}
3749
Patrick Daly1f8a2882016-09-12 17:32:05 -07003750static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003751static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3752{
Robin Murphy67b65a32016-04-13 18:12:57 +01003753 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003754 struct resource *res;
3755 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003756 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01003757 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01003758 bool legacy_binding;
3759
3760 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
3761 if (legacy_binding && !using_generic_binding) {
3762 if (!using_legacy_binding)
3763 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
3764 using_legacy_binding = true;
3765 } else if (!legacy_binding && !using_legacy_binding) {
3766 using_generic_binding = true;
3767 } else {
3768 dev_err(dev, "not probing due to mismatched DT properties\n");
3769 return -ENODEV;
3770 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003771
3772 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3773 if (!smmu) {
3774 dev_err(dev, "failed to allocate arm_smmu_device\n");
3775 return -ENOMEM;
3776 }
3777 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003778 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003779 idr_init(&smmu->asid_idr);
3780 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003781
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003782 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01003783 smmu->version = data->version;
3784 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003785 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01003786
Will Deacon45ae7cf2013-06-24 18:31:25 +01003787 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003788 smmu->base = devm_ioremap_resource(dev, res);
3789 if (IS_ERR(smmu->base))
3790 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003791 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003792
3793 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3794 &smmu->num_global_irqs)) {
3795 dev_err(dev, "missing #global-interrupts property\n");
3796 return -ENODEV;
3797 }
3798
3799 num_irqs = 0;
3800 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3801 num_irqs++;
3802 if (num_irqs > smmu->num_global_irqs)
3803 smmu->num_context_irqs++;
3804 }
3805
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003806 if (!smmu->num_context_irqs) {
3807 dev_err(dev, "found %d interrupts but expected at least %d\n",
3808 num_irqs, smmu->num_global_irqs + 1);
3809 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003810 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003811
3812 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3813 GFP_KERNEL);
3814 if (!smmu->irqs) {
3815 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3816 return -ENOMEM;
3817 }
3818
3819 for (i = 0; i < num_irqs; ++i) {
3820 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003821
Will Deacon45ae7cf2013-06-24 18:31:25 +01003822 if (irq < 0) {
3823 dev_err(dev, "failed to get irq index %d\n", i);
3824 return -ENODEV;
3825 }
3826 smmu->irqs[i] = irq;
3827 }
3828
Dhaval Patel031d7462015-05-09 14:47:29 -07003829 parse_driver_options(smmu);
3830
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003831 smmu->pwr = arm_smmu_init_power_resources(pdev);
3832 if (IS_ERR(smmu->pwr))
3833 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003834
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003835 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003836 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003837 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003838
3839 err = arm_smmu_device_cfg_probe(smmu);
3840 if (err)
3841 goto out_power_off;
3842
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003843 err = arm_smmu_parse_impl_def_registers(smmu);
3844 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003845 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003846
Robin Murphyb7862e32016-04-13 18:13:03 +01003847 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003848 smmu->num_context_banks != smmu->num_context_irqs) {
3849 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08003850 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
3851 smmu->num_context_irqs, smmu->num_context_banks,
3852 smmu->num_context_banks);
3853 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003854 }
3855
Will Deacon45ae7cf2013-06-24 18:31:25 +01003856 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003857 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3858 NULL, arm_smmu_global_fault,
3859 IRQF_ONESHOT | IRQF_SHARED,
3860 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003861 if (err) {
3862 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3863 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01003864 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003865 }
3866 }
3867
Patrick Dalyd7476202016-09-08 18:23:28 -07003868 err = arm_smmu_arch_init(smmu);
3869 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003870 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07003871
Robin Murphy06e393e2016-09-12 17:13:55 +01003872 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003873 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01003874 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003875 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07003876
Patrick Daly8e3371a2017-02-13 22:14:53 -08003877 INIT_LIST_HEAD(&smmu->list);
3878 spin_lock(&arm_smmu_devices_lock);
3879 list_add(&smmu->list, &arm_smmu_devices);
3880 spin_unlock(&arm_smmu_devices_lock);
3881
Patrick Dalyc47dcd42017-02-09 23:09:57 -08003882 /* bus_set_iommu depends on this. */
3883 bus_for_each_dev(&platform_bus_type, NULL, NULL,
3884 arm_smmu_of_iommu_configure_fixup);
3885
Robin Murphy7e96c742016-09-14 15:26:46 +01003886 /* Oh, for a proper bus abstraction */
3887 if (!iommu_present(&platform_bus_type))
3888 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08003889 else
3890 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
3891 arm_smmu_add_device_fixup);
Robin Murphy7e96c742016-09-14 15:26:46 +01003892#ifdef CONFIG_ARM_AMBA
3893 if (!iommu_present(&amba_bustype))
3894 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
3895#endif
3896#ifdef CONFIG_PCI
3897 if (!iommu_present(&pci_bus_type)) {
3898 pci_request_acs();
3899 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
3900 }
3901#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003902 return 0;
3903
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003904out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003905 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003906
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003907out_exit_power_resources:
3908 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003909
Will Deacon45ae7cf2013-06-24 18:31:25 +01003910 return err;
3911}
3912
3913static int arm_smmu_device_remove(struct platform_device *pdev)
3914{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003915 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003916
3917 if (!smmu)
3918 return -ENODEV;
3919
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003920 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003921 return -EINVAL;
3922
Will Deaconecfadb62013-07-31 19:21:28 +01003923 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003924 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003925
Patrick Dalyc190d932016-08-30 17:23:28 -07003926 idr_destroy(&smmu->asid_idr);
3927
Will Deacon45ae7cf2013-06-24 18:31:25 +01003928 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003929 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003930 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003931
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003932 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003933
Will Deacon45ae7cf2013-06-24 18:31:25 +01003934 return 0;
3935}
3936
Will Deacon45ae7cf2013-06-24 18:31:25 +01003937static struct platform_driver arm_smmu_driver = {
3938 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003939 .name = "arm-smmu",
3940 .of_match_table = of_match_ptr(arm_smmu_of_match),
3941 },
3942 .probe = arm_smmu_device_dt_probe,
3943 .remove = arm_smmu_device_remove,
3944};
3945
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08003946static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003947static int __init arm_smmu_init(void)
3948{
Robin Murphy7e96c742016-09-14 15:26:46 +01003949 static bool registered;
3950 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003951
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08003952 if (registered)
3953 return 0;
3954
3955 ret = platform_driver_register(&qsmmuv500_tbu_driver);
3956 if (ret)
3957 return ret;
3958
3959 ret = platform_driver_register(&arm_smmu_driver);
3960 registered = !ret;
Robin Murphy7e96c742016-09-14 15:26:46 +01003961 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003962}
3963
3964static void __exit arm_smmu_exit(void)
3965{
3966 return platform_driver_unregister(&arm_smmu_driver);
3967}
3968
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003969subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003970module_exit(arm_smmu_exit);
3971
Robin Murphy7e96c742016-09-14 15:26:46 +01003972static int __init arm_smmu_of_init(struct device_node *np)
3973{
3974 int ret = arm_smmu_init();
3975
3976 if (ret)
3977 return ret;
3978
3979 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
3980 return -ENODEV;
3981
3982 return 0;
3983}
3984IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
3985IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
3986IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
3987IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
3988IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
3989IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01003990
Patrick Daly1f8a2882016-09-12 17:32:05 -07003991#define DEBUG_SID_HALT_REG 0x0
3992#define DEBUG_SID_HALT_VAL (0x1 << 16)
3993
3994#define DEBUG_SR_HALT_ACK_REG 0x20
3995#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
3996
3997#define TBU_DBG_TIMEOUT_US 30000
3998
3999struct qsmmuv500_tbu_device {
4000 struct list_head list;
4001 struct device *dev;
4002 struct arm_smmu_device *smmu;
4003 void __iomem *base;
4004 void __iomem *status_reg;
4005
4006 struct arm_smmu_power_resources *pwr;
4007
4008 /* Protects halt count */
4009 spinlock_t halt_lock;
4010 u32 halt_count;
4011};
4012
4013static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
4014{
4015 struct qsmmuv500_tbu_device *tbu;
4016 struct list_head *list = smmu->archdata;
4017 int ret = 0;
4018
4019 list_for_each_entry(tbu, list, list) {
4020 ret = arm_smmu_power_on(tbu->pwr);
4021 if (ret)
4022 break;
4023 }
4024 if (!ret)
4025 return 0;
4026
4027 list_for_each_entry_continue_reverse(tbu, list, list) {
4028 arm_smmu_power_off(tbu->pwr);
4029 }
4030 return ret;
4031}
4032
4033static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
4034{
4035 struct qsmmuv500_tbu_device *tbu;
4036 struct list_head *list = smmu->archdata;
4037
4038 list_for_each_entry_reverse(tbu, list, list) {
4039 arm_smmu_power_off(tbu->pwr);
4040 }
4041}
4042
4043static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
4044{
4045 unsigned long flags;
4046 u32 val;
4047 void __iomem *base;
4048
4049 spin_lock_irqsave(&tbu->halt_lock, flags);
4050 if (tbu->halt_count) {
4051 tbu->halt_count++;
4052 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4053 return 0;
4054 }
4055
4056 base = tbu->base;
4057 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4058 val |= DEBUG_SID_HALT_VAL;
4059 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4060
4061 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
4062 val, (val & DEBUG_SR_HALT_ACK_VAL),
4063 0, TBU_DBG_TIMEOUT_US)) {
4064 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4065 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4066 return -ETIMEDOUT;
4067 }
4068
4069 tbu->halt_count = 1;
4070 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4071 return 0;
4072}
4073
4074static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4075{
4076 unsigned long flags;
4077 u32 val;
4078 void __iomem *base;
4079
4080 spin_lock_irqsave(&tbu->halt_lock, flags);
4081 if (!tbu->halt_count) {
4082 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4083 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4084 return;
4085
4086 } else if (tbu->halt_count > 1) {
4087 tbu->halt_count--;
4088 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4089 return;
4090 }
4091
4092 base = tbu->base;
4093 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4094 val &= ~DEBUG_SID_HALT_VAL;
4095 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4096
4097 tbu->halt_count = 0;
4098 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4099}
4100
4101static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
4102{
4103 struct qsmmuv500_tbu_device *tbu;
4104 struct list_head *list = smmu->archdata;
4105 int ret = 0;
4106
4107 list_for_each_entry(tbu, list, list) {
4108 ret = qsmmuv500_tbu_halt(tbu);
4109 if (ret)
4110 break;
4111 }
4112
4113 if (!ret)
4114 return 0;
4115
4116 list_for_each_entry_continue_reverse(tbu, list, list) {
4117 qsmmuv500_tbu_resume(tbu);
4118 }
4119 return ret;
4120}
4121
4122static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
4123{
4124 struct qsmmuv500_tbu_device *tbu;
4125 struct list_head *list = smmu->archdata;
4126
4127 list_for_each_entry(tbu, list, list) {
4128 qsmmuv500_tbu_resume(tbu);
4129 }
4130}
4131
4132static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
4133{
4134 int i, ret;
4135 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
4136
4137 ret = qsmmuv500_tbu_power_on_all(smmu);
4138 if (ret)
4139 return;
4140
4141 /* Program implementation defined registers */
4142 qsmmuv500_halt_all(smmu);
4143 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
4144 writel_relaxed(regs[i].value,
4145 ARM_SMMU_GR0(smmu) + regs[i].offset);
4146 qsmmuv500_resume_all(smmu);
4147 qsmmuv500_tbu_power_off_all(smmu);
4148}
4149
4150static int qsmmuv500_tbu_register(struct device *dev, void *data)
4151{
4152 struct arm_smmu_device *smmu = data;
4153 struct qsmmuv500_tbu_device *tbu;
4154 struct list_head *list = smmu->archdata;
4155
4156 if (!dev->driver) {
4157 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4158 return -EINVAL;
4159 }
4160
4161 tbu = dev_get_drvdata(dev);
4162
4163 INIT_LIST_HEAD(&tbu->list);
4164 tbu->smmu = smmu;
4165 list_add(&tbu->list, list);
4166 return 0;
4167}
4168
4169static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4170{
4171 struct device *dev = smmu->dev;
4172 struct list_head *list;
4173 int ret;
4174
4175 list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
4176 if (!list)
4177 return -ENOMEM;
4178
4179 INIT_LIST_HEAD(list);
4180 smmu->archdata = list;
4181
4182 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4183 if (ret)
4184 return ret;
4185
4186 /* Attempt to register child devices */
4187 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
4188 if (ret)
4189 return -EINVAL;
4190
4191 return 0;
4192}
4193
4194struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
4195 .init = qsmmuv500_arch_init,
4196 .device_reset = qsmmuv500_device_reset,
4197};
4198
4199static const struct of_device_id qsmmuv500_tbu_of_match[] = {
4200 {.compatible = "qcom,qsmmuv500-tbu"},
4201 {}
4202};
4203
4204static int qsmmuv500_tbu_probe(struct platform_device *pdev)
4205{
4206 struct resource *res;
4207 struct device *dev = &pdev->dev;
4208 struct qsmmuv500_tbu_device *tbu;
4209
4210 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
4211 if (!tbu)
4212 return -ENOMEM;
4213
4214 INIT_LIST_HEAD(&tbu->list);
4215 tbu->dev = dev;
4216 spin_lock_init(&tbu->halt_lock);
4217
4218 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
4219 tbu->base = devm_ioremap_resource(dev, res);
4220 if (IS_ERR(tbu->base))
4221 return PTR_ERR(tbu->base);
4222
4223 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
4224 tbu->status_reg = devm_ioremap_resource(dev, res);
4225 if (IS_ERR(tbu->status_reg))
4226 return PTR_ERR(tbu->status_reg);
4227
4228 tbu->pwr = arm_smmu_init_power_resources(pdev);
4229 if (IS_ERR(tbu->pwr))
4230 return PTR_ERR(tbu->pwr);
4231
4232 dev_set_drvdata(dev, tbu);
4233 return 0;
4234}
4235
4236static struct platform_driver qsmmuv500_tbu_driver = {
4237 .driver = {
4238 .name = "qsmmuv500-tbu",
4239 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
4240 },
4241 .probe = qsmmuv500_tbu_probe,
4242};
4243
Will Deacon45ae7cf2013-06-24 18:31:25 +01004244MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
4245MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
4246MODULE_LICENSE("GPL v2");