blob: 59f913e99868794b01c62680790d3c8f86bbddca [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070050#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070051#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070052#include <linux/msm-bus.h>
53#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55#include <linux/amba/bus.h>
56
Will Deacon518f7132014-11-14 17:17:54 +000057#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* Maximum number of context banks per SMMU */
60#define ARM_SMMU_MAX_CBS 128
61
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* SMMU global address space */
63#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010064#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000066/*
67 * SMMU global address space with conditional offset to access secure
68 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 * nsGFSYNR0: 0x450)
70 */
71#define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu)->base + \
73 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
74 ? 0x400 : 0))
75
Robin Murphyf9a05f02016-04-13 18:13:01 +010076/*
77 * Some 64-bit registers only make sense to write atomically, but in such
78 * cases all the data relevant to AArch32 formats lies within the lower word,
79 * therefore this actually makes more sense than it might first appear.
80 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#endif
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087/* Configuration registers */
88#define ARM_SMMU_GR0_sCR0 0x0
89#define sCR0_CLIENTPD (1 << 0)
90#define sCR0_GFRE (1 << 1)
91#define sCR0_GFIE (1 << 2)
92#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100131#define ID0_NUMSMRG_SHIFT 0
132#define ID0_NUMSMRG_MASK 0xff
133
134#define ID1_PAGESIZE (1 << 31)
135#define ID1_NUMPAGENDXB_SHIFT 28
136#define ID1_NUMPAGENDXB_MASK 7
137#define ID1_NUMS2CB_SHIFT 16
138#define ID1_NUMS2CB_MASK 0xff
139#define ID1_NUMCB_SHIFT 0
140#define ID1_NUMCB_MASK 0xff
141
142#define ID2_OAS_SHIFT 4
143#define ID2_OAS_MASK 0xf
144#define ID2_IAS_SHIFT 0
145#define ID2_IAS_MASK 0xf
146#define ID2_UBS_SHIFT 8
147#define ID2_UBS_MASK 0xf
148#define ID2_PTFS_4K (1 << 12)
149#define ID2_PTFS_16K (1 << 13)
150#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800151#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152
Peng Fan3ca37122016-05-03 21:50:30 +0800153#define ID7_MAJOR_SHIFT 4
154#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157#define ARM_SMMU_GR0_TLBIVMID 0x64
158#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
159#define ARM_SMMU_GR0_TLBIALLH 0x6c
160#define ARM_SMMU_GR0_sTLBGSYNC 0x70
161#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
162#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800163#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
165/* Stream mapping registers */
166#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
167#define SMR_VALID (1 << 31)
168#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100169#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170
171#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
172#define S2CR_CBNDX_SHIFT 0
173#define S2CR_CBNDX_MASK 0xff
174#define S2CR_TYPE_SHIFT 16
175#define S2CR_TYPE_MASK 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100176enum arm_smmu_s2cr_type {
177 S2CR_TYPE_TRANS,
178 S2CR_TYPE_BYPASS,
179 S2CR_TYPE_FAULT,
180};
181
182#define S2CR_PRIVCFG_SHIFT 24
183#define S2CR_PRIVCFG_MASK 0x3
184enum arm_smmu_s2cr_privcfg {
185 S2CR_PRIVCFG_DEFAULT,
186 S2CR_PRIVCFG_DIPAN,
187 S2CR_PRIVCFG_UNPRIV,
188 S2CR_PRIVCFG_PRIV,
189};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100190
191/* Context bank attribute registers */
192#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
193#define CBAR_VMID_SHIFT 0
194#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000195#define CBAR_S1_BPSHCFG_SHIFT 8
196#define CBAR_S1_BPSHCFG_MASK 3
197#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100198#define CBAR_S1_MEMATTR_SHIFT 12
199#define CBAR_S1_MEMATTR_MASK 0xf
200#define CBAR_S1_MEMATTR_WB 0xf
201#define CBAR_TYPE_SHIFT 16
202#define CBAR_TYPE_MASK 0x3
203#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
204#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
205#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
206#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
207#define CBAR_IRPTNDX_SHIFT 24
208#define CBAR_IRPTNDX_MASK 0xff
209
Shalaj Jain04059c52015-03-03 13:34:59 -0800210#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
211#define CBFRSYNRA_SID_MASK (0xffff)
212
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
214#define CBA2R_RW64_32BIT (0 << 0)
215#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800216#define CBA2R_VMID_SHIFT 16
217#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218
219/* Translation context bank */
220#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100221#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222
223#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100224#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_RESUME 0x8
226#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100227#define ARM_SMMU_CB_TTBR0 0x20
228#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600230#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100231#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100233#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100234#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700235#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100236#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000238#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100239#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000240#define ARM_SMMU_CB_S1_TLBIVAL 0x620
241#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
242#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700243#define ARM_SMMU_CB_TLBSYNC 0x7f0
244#define ARM_SMMU_CB_TLBSTATUS 0x7f4
245#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100246#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000247#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248
249#define SCTLR_S1_ASIDPNE (1 << 12)
250#define SCTLR_CFCFG (1 << 7)
251#define SCTLR_CFIE (1 << 6)
252#define SCTLR_CFRE (1 << 5)
253#define SCTLR_E (1 << 4)
254#define SCTLR_AFE (1 << 2)
255#define SCTLR_TRE (1 << 1)
256#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100258#define ARM_MMU500_ACTLR_CPRE (1 << 1)
259
Peng Fan3ca37122016-05-03 21:50:30 +0800260#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
261
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700262#define ARM_SMMU_IMPL_DEF0(smmu) \
263 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
264#define ARM_SMMU_IMPL_DEF1(smmu) \
265 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000266#define CB_PAR_F (1 << 0)
267
268#define ATSR_ACTIVE (1 << 0)
269
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270#define RESUME_RETRY (0 << 0)
271#define RESUME_TERMINATE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100274#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100276#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
278#define FSR_MULTI (1 << 31)
279#define FSR_SS (1 << 30)
280#define FSR_UUT (1 << 8)
281#define FSR_ASF (1 << 7)
282#define FSR_TLBLKF (1 << 6)
283#define FSR_TLBMCF (1 << 5)
284#define FSR_EF (1 << 4)
285#define FSR_PF (1 << 3)
286#define FSR_AFF (1 << 2)
287#define FSR_TF (1 << 1)
288
Mitchel Humpherys29073202014-07-08 09:52:18 -0700289#define FSR_IGN (FSR_AFF | FSR_ASF | \
290 FSR_TLBMCF | FSR_TLBLKF)
291#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100292 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293
294#define FSYNR0_WNR (1 << 4)
295
Will Deacon4cf740b2014-07-14 19:47:39 +0100296static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000297module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100298MODULE_PARM_DESC(force_stage,
299 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800300static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000301module_param(disable_bypass, bool, S_IRUGO);
302MODULE_PARM_DESC(disable_bypass,
303 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100304
Robin Murphy09360402014-08-28 17:51:59 +0100305enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100306 ARM_SMMU_V1,
307 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100308 ARM_SMMU_V2,
309};
310
Robin Murphy67b65a32016-04-13 18:12:57 +0100311enum arm_smmu_implementation {
312 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100313 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100314 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700315 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700316 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100317};
318
Patrick Dalyd7476202016-09-08 18:23:28 -0700319struct arm_smmu_device;
320struct arm_smmu_arch_ops {
321 int (*init)(struct arm_smmu_device *smmu);
322 void (*device_reset)(struct arm_smmu_device *smmu);
323 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
324 dma_addr_t iova);
325 void (*iova_to_phys_fault)(struct iommu_domain *domain,
326 dma_addr_t iova, phys_addr_t *phys1,
327 phys_addr_t *phys_post_tlbiall);
328};
329
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700330struct arm_smmu_impl_def_reg {
331 u32 offset;
332 u32 value;
333};
334
Robin Murphya754fd12016-09-12 17:13:50 +0100335struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100336 struct iommu_group *group;
337 int count;
Robin Murphya754fd12016-09-12 17:13:50 +0100338 enum arm_smmu_s2cr_type type;
339 enum arm_smmu_s2cr_privcfg privcfg;
340 u8 cbndx;
341};
342
343#define s2cr_init_val (struct arm_smmu_s2cr){ \
344 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
345}
346
Will Deacon45ae7cf2013-06-24 18:31:25 +0100347struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100348 u16 mask;
349 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100350 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351};
352
Will Deacona9a1b0b2014-05-01 18:05:08 +0100353struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100354 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100355 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100356};
Robin Murphy468f4942016-09-12 17:13:49 +0100357#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100358#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
359#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000360#define fwspec_smendx(fw, i) \
361 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100362#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000363 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100364
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700365/*
366 * Describes resources required for on/off power operation.
367 * Separate reference count is provided for atomic/nonatomic
368 * operations.
369 */
370struct arm_smmu_power_resources {
371 struct platform_device *pdev;
372 struct device *dev;
373
374 struct clk **clocks;
375 int num_clocks;
376
377 struct regulator_bulk_data *gdscs;
378 int num_gdscs;
379
380 uint32_t bus_client;
381 struct msm_bus_scale_pdata *bus_dt_data;
382
383 /* Protects power_count */
384 struct mutex power_lock;
385 int power_count;
386
387 /* Protects clock_refs_count */
388 spinlock_t clock_refs_lock;
389 int clock_refs_count;
390};
391
Will Deacon45ae7cf2013-06-24 18:31:25 +0100392struct arm_smmu_device {
393 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394
395 void __iomem *base;
396 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100397 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100398
399#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
400#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
401#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
402#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
403#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000404#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800405#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100406#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
407#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
408#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
409#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
410#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000412
413#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800414#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800415#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700416#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000417 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100418 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100419 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100420
421 u32 num_context_banks;
422 u32 num_s2_context_banks;
423 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
424 atomic_t irptndx;
425
426 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100427 u16 streamid_mask;
428 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100429 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100430 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100431 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100432
Will Deacon518f7132014-11-14 17:17:54 +0000433 unsigned long va_size;
434 unsigned long ipa_size;
435 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100436 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100437
438 u32 num_global_irqs;
439 u32 num_context_irqs;
440 unsigned int *irqs;
441
Patrick Daly8e3371a2017-02-13 22:14:53 -0800442 struct list_head list;
443
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800444 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700445 /* Specific to QCOM */
446 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
447 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800448
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700449 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700450
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800451 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700452
453 /* protects idr */
454 struct mutex idr_mutex;
455 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700456
457 struct arm_smmu_arch_ops *arch_ops;
458 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100459};
460
Robin Murphy7602b872016-04-28 17:12:09 +0100461enum arm_smmu_context_fmt {
462 ARM_SMMU_CTX_FMT_NONE,
463 ARM_SMMU_CTX_FMT_AARCH64,
464 ARM_SMMU_CTX_FMT_AARCH32_L,
465 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100466};
467
468struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100469 u8 cbndx;
470 u8 irptndx;
471 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600472 u32 procid;
473 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100474 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100475};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100476#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600477#define INVALID_CBNDX 0xff
478#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700479/*
480 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
481 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
482 */
483#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100484
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600485#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800486#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100487
Will Deaconc752ce42014-06-25 22:46:31 +0100488enum arm_smmu_domain_stage {
489 ARM_SMMU_DOMAIN_S1 = 0,
490 ARM_SMMU_DOMAIN_S2,
491 ARM_SMMU_DOMAIN_NESTED,
492};
493
Patrick Dalyc11d1082016-09-01 15:52:44 -0700494struct arm_smmu_pte_info {
495 void *virt_addr;
496 size_t size;
497 struct list_head entry;
498};
499
Will Deacon45ae7cf2013-06-24 18:31:25 +0100500struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100501 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000502 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700503 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000504 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100505 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100506 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000507 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700508 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700509 u32 secure_vmid;
510 struct list_head pte_info_list;
511 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700512 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700513 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100514 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100515};
516
Patrick Daly8e3371a2017-02-13 22:14:53 -0800517static DEFINE_SPINLOCK(arm_smmu_devices_lock);
518static LIST_HEAD(arm_smmu_devices);
519
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000520struct arm_smmu_option_prop {
521 u32 opt;
522 const char *prop;
523};
524
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800525static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
526
Robin Murphy7e96c742016-09-14 15:26:46 +0100527static bool using_legacy_binding, using_generic_binding;
528
Mitchel Humpherys29073202014-07-08 09:52:18 -0700529static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000530 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800531 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800532 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700533 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000534 { 0, NULL},
535};
536
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800537static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
538 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700539static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
540 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600541static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800542
Patrick Dalyc11d1082016-09-01 15:52:44 -0700543static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
544static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700545static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700546static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
547
Patrick Dalyd7476202016-09-08 18:23:28 -0700548static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
549static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
550
Joerg Roedel1d672632015-03-26 13:43:10 +0100551static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
552{
553 return container_of(dom, struct arm_smmu_domain, domain);
554}
555
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000556static void parse_driver_options(struct arm_smmu_device *smmu)
557{
558 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700559
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000560 do {
561 if (of_property_read_bool(smmu->dev->of_node,
562 arm_smmu_options[i].prop)) {
563 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700564 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000565 arm_smmu_options[i].prop);
566 }
567 } while (arm_smmu_options[++i].opt);
568}
569
Patrick Dalyc190d932016-08-30 17:23:28 -0700570static bool is_dynamic_domain(struct iommu_domain *domain)
571{
572 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
573
574 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
575}
576
Patrick Dalye271f212016-10-04 13:24:49 -0700577static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
578{
579 return (smmu_domain->secure_vmid != VMID_INVAL);
580}
581
582static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
583{
584 if (arm_smmu_is_domain_secure(smmu_domain))
585 mutex_lock(&smmu_domain->assign_lock);
586}
587
588static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
589{
590 if (arm_smmu_is_domain_secure(smmu_domain))
591 mutex_unlock(&smmu_domain->assign_lock);
592}
593
Will Deacon8f68f8e2014-07-15 11:27:08 +0100594static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100595{
596 if (dev_is_pci(dev)) {
597 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700598
Will Deacona9a1b0b2014-05-01 18:05:08 +0100599 while (!pci_is_root_bus(bus))
600 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100601 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100602 }
603
Robin Murphyd5b41782016-09-14 15:21:39 +0100604 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100605}
606
Robin Murphyd5b41782016-09-14 15:21:39 +0100607static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100608{
Robin Murphyd5b41782016-09-14 15:21:39 +0100609 *((__be32 *)data) = cpu_to_be32(alias);
610 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100611}
612
Robin Murphyd5b41782016-09-14 15:21:39 +0100613static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100614{
Robin Murphyd5b41782016-09-14 15:21:39 +0100615 struct of_phandle_iterator *it = *(void **)data;
616 struct device_node *np = it->node;
617 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100618
Robin Murphyd5b41782016-09-14 15:21:39 +0100619 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
620 "#stream-id-cells", 0)
621 if (it->node == np) {
622 *(void **)data = dev;
623 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700624 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100625 it->node = np;
626 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100627}
628
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100629static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100630static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100631
Robin Murphy06e393e2016-09-12 17:13:55 +0100632static int arm_smmu_register_legacy_master(struct device *dev,
633 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100634{
Robin Murphy06e393e2016-09-12 17:13:55 +0100635 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100636 struct device_node *np;
637 struct of_phandle_iterator it;
638 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100639 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100640 __be32 pci_sid;
641 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100642
Stephen Boydfecdeef2017-03-01 16:53:19 -0800643 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100644 np = dev_get_dev_node(dev);
645 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
646 of_node_put(np);
647 return -ENODEV;
648 }
649
650 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100651 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
652 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100653 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100654 of_node_put(np);
655 if (err == 0)
656 return -ENODEV;
657 if (err < 0)
658 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100659
Robin Murphyd5b41782016-09-14 15:21:39 +0100660 if (dev_is_pci(dev)) {
661 /* "mmu-masters" assumes Stream ID == Requester ID */
662 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
663 &pci_sid);
664 it.cur = &pci_sid;
665 it.cur_count = 1;
666 }
667
Robin Murphy06e393e2016-09-12 17:13:55 +0100668 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
669 &arm_smmu_ops);
670 if (err)
671 return err;
672
673 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
674 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100675 return -ENOMEM;
676
Robin Murphy06e393e2016-09-12 17:13:55 +0100677 *smmu = dev_get_drvdata(smmu_dev);
678 of_phandle_iterator_args(&it, sids, it.cur_count);
679 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
680 kfree(sids);
681 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100682}
683
684static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
685{
686 int idx;
687
688 do {
689 idx = find_next_zero_bit(map, end, start);
690 if (idx == end)
691 return -ENOSPC;
692 } while (test_and_set_bit(idx, map));
693
694 return idx;
695}
696
697static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
698{
699 clear_bit(idx, map);
700}
701
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700702static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700703{
704 int i, ret = 0;
705
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700706 for (i = 0; i < pwr->num_clocks; ++i) {
707 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700708 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700709 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700710 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700711 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700712 break;
713 }
714 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700715 return ret;
716}
717
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700718static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700719{
720 int i;
721
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700722 for (i = pwr->num_clocks; i; --i)
723 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700724}
725
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700726static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700727{
728 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700729
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700730 for (i = 0; i < pwr->num_clocks; ++i) {
731 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700732 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700733 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700734 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700735 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700736 break;
737 }
738 }
739
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700740 return ret;
741}
Patrick Daly8befb662016-08-17 20:03:28 -0700742
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700743static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
744{
745 int i;
746
747 for (i = pwr->num_clocks; i; --i)
748 clk_disable(pwr->clocks[i - 1]);
749}
750
751static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
752{
753 if (!pwr->bus_client)
754 return 0;
755 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
756}
757
758static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
759{
760 if (!pwr->bus_client)
761 return;
762 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
763}
764
765/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
766static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
767{
768 int ret = 0;
769 unsigned long flags;
770
771 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
772 if (pwr->clock_refs_count > 0) {
773 pwr->clock_refs_count++;
774 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
775 return 0;
776 }
777
778 ret = arm_smmu_enable_clocks(pwr);
779 if (!ret)
780 pwr->clock_refs_count = 1;
781
782 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700783 return ret;
784}
785
786/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700787static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700788{
Patrick Daly8befb662016-08-17 20:03:28 -0700789 unsigned long flags;
790
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700791 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
792 if (pwr->clock_refs_count == 0) {
793 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
794 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
795 return;
796
797 } else if (pwr->clock_refs_count > 1) {
798 pwr->clock_refs_count--;
799 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700800 return;
801 }
802
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700803 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700804
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700805 pwr->clock_refs_count = 0;
806 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700807}
808
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700809static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700810{
811 int ret;
812
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700813 mutex_lock(&pwr->power_lock);
814 if (pwr->power_count > 0) {
815 pwr->power_count += 1;
816 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700817 return 0;
818 }
819
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700820 ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700821 if (ret)
822 goto out_unlock;
823
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700824 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700825 if (ret)
826 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700827
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700828 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700829 if (ret)
830 goto out_disable_bus;
831
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700832 pwr->power_count = 1;
833 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700834 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700835
Patrick Daly2764f952016-09-06 19:22:44 -0700836out_disable_bus:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700837 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700838out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700839 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700840out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700841 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700842 return ret;
843}
844
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700845static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700846{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700847 mutex_lock(&pwr->power_lock);
848 if (pwr->power_count == 0) {
849 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
850 mutex_unlock(&pwr->power_lock);
851 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700852
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700853 } else if (pwr->power_count > 1) {
854 pwr->power_count--;
855 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700856 return;
857 }
858
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700859 arm_smmu_unprepare_clocks(pwr);
860 arm_smmu_unrequest_bus(pwr);
861 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700862
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700863 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700864}
865
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700866static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700867{
868 int ret;
869
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700870 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700871 if (ret)
872 return ret;
873
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700874 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700875 if (ret)
876 goto out_disable;
877
878 return 0;
879
880out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700881 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700882 return ret;
883}
884
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700885static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700886{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700887 arm_smmu_power_off_atomic(pwr);
888 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700889}
890
891/*
892 * Must be used instead of arm_smmu_power_on if it may be called from
893 * atomic context
894 */
895static int arm_smmu_domain_power_on(struct iommu_domain *domain,
896 struct arm_smmu_device *smmu)
897{
898 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
899 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
900
901 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700902 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700903
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700904 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700905}
906
907/*
908 * Must be used instead of arm_smmu_power_on if it may be called from
909 * atomic context
910 */
911static void arm_smmu_domain_power_off(struct iommu_domain *domain,
912 struct arm_smmu_device *smmu)
913{
914 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
915 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
916
917 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700918 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700919 return;
920 }
921
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700922 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700923}
924
Will Deacon45ae7cf2013-06-24 18:31:25 +0100925/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700926static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
927 int cbndx)
928{
929 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
930 u32 val;
931
932 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
933 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
934 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700935 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700936 dev_err(smmu->dev, "TLBSYNC timeout!\n");
937}
938
Will Deacon518f7132014-11-14 17:17:54 +0000939static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100940{
941 int count = 0;
942 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
943
944 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
945 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
946 & sTLBGSTATUS_GSACTIVE) {
947 cpu_relax();
948 if (++count == TLB_LOOP_TIMEOUT) {
949 dev_err_ratelimited(smmu->dev,
950 "TLB sync timed out -- SMMU may be deadlocked\n");
951 return;
952 }
953 udelay(1);
954 }
955}
956
Will Deacon518f7132014-11-14 17:17:54 +0000957static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100958{
Will Deacon518f7132014-11-14 17:17:54 +0000959 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700960 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000961}
962
Patrick Daly8befb662016-08-17 20:03:28 -0700963/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000964static void arm_smmu_tlb_inv_context(void *cookie)
965{
966 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100967 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
968 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100969 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000970 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100971
972 if (stage1) {
973 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800974 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100975 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700976 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100977 } else {
978 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800979 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100980 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700981 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100982 }
Will Deacon1463fe42013-07-31 19:21:27 +0100983}
984
Will Deacon518f7132014-11-14 17:17:54 +0000985static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000986 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000987{
988 struct arm_smmu_domain *smmu_domain = cookie;
989 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
990 struct arm_smmu_device *smmu = smmu_domain->smmu;
991 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
992 void __iomem *reg;
993
994 if (stage1) {
995 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
996 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
997
Robin Murphy7602b872016-04-28 17:12:09 +0100998 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000999 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001000 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001001 do {
1002 writel_relaxed(iova, reg);
1003 iova += granule;
1004 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001005 } else {
1006 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001007 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001008 do {
1009 writeq_relaxed(iova, reg);
1010 iova += granule >> 12;
1011 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001012 }
Will Deacon518f7132014-11-14 17:17:54 +00001013 } else if (smmu->version == ARM_SMMU_V2) {
1014 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1015 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1016 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001017 iova >>= 12;
1018 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001019 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001020 iova += granule >> 12;
1021 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001022 } else {
1023 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001024 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001025 }
1026}
1027
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001028struct arm_smmu_secure_pool_chunk {
1029 void *addr;
1030 size_t size;
1031 struct list_head list;
1032};
1033
1034static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1035 size_t size)
1036{
1037 struct arm_smmu_secure_pool_chunk *it;
1038
1039 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1040 if (it->size == size) {
1041 void *addr = it->addr;
1042
1043 list_del(&it->list);
1044 kfree(it);
1045 return addr;
1046 }
1047 }
1048
1049 return NULL;
1050}
1051
1052static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1053 void *addr, size_t size)
1054{
1055 struct arm_smmu_secure_pool_chunk *chunk;
1056
1057 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1058 if (!chunk)
1059 return -ENOMEM;
1060
1061 chunk->addr = addr;
1062 chunk->size = size;
1063 memset(addr, 0, size);
1064 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1065
1066 return 0;
1067}
1068
1069static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1070{
1071 struct arm_smmu_secure_pool_chunk *it, *i;
1072
1073 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1074 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1075 /* pages will be freed later (after being unassigned) */
1076 kfree(it);
1077 }
1078}
1079
Patrick Dalyc11d1082016-09-01 15:52:44 -07001080static void *arm_smmu_alloc_pages_exact(void *cookie,
1081 size_t size, gfp_t gfp_mask)
1082{
1083 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001084 void *page;
1085 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001086
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001087 if (!arm_smmu_is_domain_secure(smmu_domain))
1088 return alloc_pages_exact(size, gfp_mask);
1089
1090 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1091 if (page)
1092 return page;
1093
1094 page = alloc_pages_exact(size, gfp_mask);
1095 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001096 ret = arm_smmu_prepare_pgtable(page, cookie);
1097 if (ret) {
1098 free_pages_exact(page, size);
1099 return NULL;
1100 }
1101 }
1102
1103 return page;
1104}
1105
1106static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1107{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001108 struct arm_smmu_domain *smmu_domain = cookie;
1109
1110 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1111 free_pages_exact(virt, size);
1112 return;
1113 }
1114
1115 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1116 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001117}
1118
Will Deacon518f7132014-11-14 17:17:54 +00001119static struct iommu_gather_ops arm_smmu_gather_ops = {
1120 .tlb_flush_all = arm_smmu_tlb_inv_context,
1121 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1122 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001123 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1124 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001125};
1126
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001127static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1128 dma_addr_t iova, u32 fsr)
1129{
1130 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001131 struct arm_smmu_device *smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001132 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001133 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001134
1135 smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001136
Patrick Dalyad441dd2016-09-15 15:50:46 -07001137 if (smmu->arch_ops && smmu->arch_ops->iova_to_phys_fault) {
1138 smmu->arch_ops->iova_to_phys_fault(domain, iova, &phys,
1139 &phys_post_tlbiall);
1140 } else {
1141 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001142 arm_smmu_tlb_inv_context(smmu_domain);
Patrick Dalyad441dd2016-09-15 15:50:46 -07001143 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001144 }
1145
Patrick Dalyad441dd2016-09-15 15:50:46 -07001146 if (phys != phys_post_tlbiall) {
1147 dev_err(smmu->dev,
1148 "ATOS results differed across TLBIALL...\n"
1149 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1150 }
1151 if (!phys_post_tlbiall) {
1152 dev_err(smmu->dev,
1153 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1154 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001155
Patrick Dalyad441dd2016-09-15 15:50:46 -07001156 return phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001157}
1158
Will Deacon45ae7cf2013-06-24 18:31:25 +01001159static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1160{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001161 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001162 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001163 unsigned long iova;
1164 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001165 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001166 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1167 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001168 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001169 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001170 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001171 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001172 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001173 bool non_fatal_fault = !!(smmu_domain->attributes &
1174 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001175
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001176 static DEFINE_RATELIMIT_STATE(_rs,
1177 DEFAULT_RATELIMIT_INTERVAL,
1178 DEFAULT_RATELIMIT_BURST);
1179
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001180 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001181 if (ret)
1182 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001183
Shalaj Jain04059c52015-03-03 13:34:59 -08001184 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001185 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001186 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1187
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001188 if (!(fsr & FSR_FAULT)) {
1189 ret = IRQ_NONE;
1190 goto out_power_off;
1191 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001193 if (fatal_asf && (fsr & FSR_ASF)) {
1194 dev_err(smmu->dev,
1195 "Took an address size fault. Refusing to recover.\n");
1196 BUG();
1197 }
1198
Will Deacon45ae7cf2013-06-24 18:31:25 +01001199 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001200 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001201 if (fsr & FSR_TF)
1202 flags |= IOMMU_FAULT_TRANSLATION;
1203 if (fsr & FSR_PF)
1204 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001205 if (fsr & FSR_EF)
1206 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001207 if (fsr & FSR_SS)
1208 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001209
Robin Murphyf9a05f02016-04-13 18:13:01 +01001210 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001211 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001212 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1213 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001214 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1215 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001216 dev_dbg(smmu->dev,
1217 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1218 iova, fsr, fsynr, cfg->cbndx);
1219 dev_dbg(smmu->dev,
1220 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001221 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001222 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001223 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001224 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1225 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001226 if (__ratelimit(&_rs)) {
1227 dev_err(smmu->dev,
1228 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1229 iova, fsr, fsynr, cfg->cbndx);
1230 dev_err(smmu->dev, "FAR = %016lx\n",
1231 (unsigned long)iova);
1232 dev_err(smmu->dev,
1233 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1234 fsr,
1235 (fsr & 0x02) ? "TF " : "",
1236 (fsr & 0x04) ? "AFF " : "",
1237 (fsr & 0x08) ? "PF " : "",
1238 (fsr & 0x10) ? "EF " : "",
1239 (fsr & 0x20) ? "TLBMCF " : "",
1240 (fsr & 0x40) ? "TLBLKF " : "",
1241 (fsr & 0x80) ? "MHF " : "",
1242 (fsr & 0x40000000) ? "SS " : "",
1243 (fsr & 0x80000000) ? "MULTI " : "");
1244 dev_err(smmu->dev,
1245 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001246 if (!phys_soft)
1247 dev_err(smmu->dev,
1248 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1249 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001250 dev_err(smmu->dev,
1251 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1252 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1253 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001254 ret = IRQ_NONE;
1255 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001256 if (!non_fatal_fault) {
1257 dev_err(smmu->dev,
1258 "Unhandled arm-smmu context fault!\n");
1259 BUG();
1260 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001261 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001262
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001263 /*
1264 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1265 * if stalled. This is required to keep the IOMMU client stalled on
1266 * the outstanding fault. This gives the client a chance to take any
1267 * debug action and then terminate the stalled transaction.
1268 * So, the sequence in case of stall on fault should be:
1269 * 1) Do not clear FSR or write to RESUME here
1270 * 2) Client takes any debug action
1271 * 3) Client terminates the stalled transaction and resumes the IOMMU
1272 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1273 * not before so that the fault remains outstanding. This ensures
1274 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1275 * need to be terminated.
1276 */
1277 if (tmp != -EBUSY) {
1278 /* Clear the faulting FSR */
1279 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001280
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001281 /*
1282 * Barrier required to ensure that the FSR is cleared
1283 * before resuming SMMU operation
1284 */
1285 wmb();
1286
1287 /* Retry or terminate any stalled transactions */
1288 if (fsr & FSR_SS)
1289 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1290 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001291
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001292out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001293 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001294
Patrick Daly5ba28112016-08-30 19:18:52 -07001295 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001296}
1297
1298static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1299{
1300 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1301 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001302 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001303
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001304 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001305 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001306
Will Deacon45ae7cf2013-06-24 18:31:25 +01001307 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1308 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1309 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1310 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1311
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001312 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001313 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001314 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001315 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001316
Will Deacon45ae7cf2013-06-24 18:31:25 +01001317 dev_err_ratelimited(smmu->dev,
1318 "Unexpected global fault, this could be serious\n");
1319 dev_err_ratelimited(smmu->dev,
1320 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1321 gfsr, gfsynr0, gfsynr1, gfsynr2);
1322
1323 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001324 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001325 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001326}
1327
Will Deacon518f7132014-11-14 17:17:54 +00001328static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1329 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001330{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001331 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001332 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001333 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001334 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1335 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001336 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001337
Will Deacon45ae7cf2013-06-24 18:31:25 +01001338 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001339 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1340 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001341
Will Deacon4a1c93c2015-03-04 12:21:03 +00001342 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001343 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1344 reg = CBA2R_RW64_64BIT;
1345 else
1346 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001347 /* 16-bit VMIDs live in CBA2R */
1348 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001349 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001350
Will Deacon4a1c93c2015-03-04 12:21:03 +00001351 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1352 }
1353
Will Deacon45ae7cf2013-06-24 18:31:25 +01001354 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001355 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001356 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001357 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001358
Will Deacon57ca90f2014-02-06 14:59:05 +00001359 /*
1360 * Use the weakest shareability/memory types, so they are
1361 * overridden by the ttbcr/pte.
1362 */
1363 if (stage1) {
1364 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1365 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001366 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1367 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001368 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001369 }
Will Deacon44680ee2014-06-25 11:29:12 +01001370 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001371
Will Deacon518f7132014-11-14 17:17:54 +00001372 /* TTBRs */
1373 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001374 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001375
Robin Murphyb94df6f2016-08-11 17:44:06 +01001376 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1377 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1378 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1379 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1380 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1381 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1382 } else {
1383 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1384 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1385 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1386 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1387 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1388 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1389 }
Will Deacon518f7132014-11-14 17:17:54 +00001390 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001391 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001392 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001393 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001394
Will Deacon518f7132014-11-14 17:17:54 +00001395 /* TTBCR */
1396 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001397 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1398 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1399 reg2 = 0;
1400 } else {
1401 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1402 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1403 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001405 if (smmu->version > ARM_SMMU_V1)
1406 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001407 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001408 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001409 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001410 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001411
Will Deacon518f7132014-11-14 17:17:54 +00001412 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001413 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001414 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1415 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1416 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1417 } else {
1418 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1419 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1420 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001421 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001422 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001423 }
1424
Will Deacon45ae7cf2013-06-24 18:31:25 +01001425 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001426 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalye62d3362016-03-15 18:58:28 -07001427 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1428 !stage1)
1429 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001430 if (stage1)
1431 reg |= SCTLR_S1_ASIDPNE;
1432#ifdef __BIG_ENDIAN
1433 reg |= SCTLR_E;
1434#endif
Will Deacon25724842013-08-21 13:49:53 +01001435 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001436}
1437
Patrick Dalyc190d932016-08-30 17:23:28 -07001438static int arm_smmu_init_asid(struct iommu_domain *domain,
1439 struct arm_smmu_device *smmu)
1440{
1441 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1442 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1443 bool dynamic = is_dynamic_domain(domain);
1444 int ret;
1445
1446 if (!dynamic) {
1447 cfg->asid = cfg->cbndx + 1;
1448 } else {
1449 mutex_lock(&smmu->idr_mutex);
1450 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1451 smmu->num_context_banks + 2,
1452 MAX_ASID + 1, GFP_KERNEL);
1453
1454 mutex_unlock(&smmu->idr_mutex);
1455 if (ret < 0) {
1456 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1457 ret);
1458 return ret;
1459 }
1460 cfg->asid = ret;
1461 }
1462 return 0;
1463}
1464
1465static void arm_smmu_free_asid(struct iommu_domain *domain)
1466{
1467 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1468 struct arm_smmu_device *smmu = smmu_domain->smmu;
1469 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1470 bool dynamic = is_dynamic_domain(domain);
1471
1472 if (cfg->asid == INVALID_ASID || !dynamic)
1473 return;
1474
1475 mutex_lock(&smmu->idr_mutex);
1476 idr_remove(&smmu->asid_idr, cfg->asid);
1477 mutex_unlock(&smmu->idr_mutex);
1478}
1479
Will Deacon45ae7cf2013-06-24 18:31:25 +01001480static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001481 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001482{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001483 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001484 unsigned long ias, oas;
1485 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001486 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001487 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001488 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001489 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001490 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001491 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001492
Will Deacon518f7132014-11-14 17:17:54 +00001493 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001494 if (smmu_domain->smmu)
1495 goto out_unlock;
1496
Patrick Dalyc190d932016-08-30 17:23:28 -07001497 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1498 smmu_domain->cfg.asid = INVALID_ASID;
1499
Patrick Dalyc190d932016-08-30 17:23:28 -07001500 dynamic = is_dynamic_domain(domain);
1501 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1502 dev_err(smmu->dev, "dynamic domains not supported\n");
1503 ret = -EPERM;
1504 goto out_unlock;
1505 }
1506
Will Deaconc752ce42014-06-25 22:46:31 +01001507 /*
1508 * Mapping the requested stage onto what we support is surprisingly
1509 * complicated, mainly because the spec allows S1+S2 SMMUs without
1510 * support for nested translation. That means we end up with the
1511 * following table:
1512 *
1513 * Requested Supported Actual
1514 * S1 N S1
1515 * S1 S1+S2 S1
1516 * S1 S2 S2
1517 * S1 S1 S1
1518 * N N N
1519 * N S1+S2 S2
1520 * N S2 S2
1521 * N S1 S1
1522 *
1523 * Note that you can't actually request stage-2 mappings.
1524 */
1525 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1526 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1527 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1528 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1529
Robin Murphy7602b872016-04-28 17:12:09 +01001530 /*
1531 * Choosing a suitable context format is even more fiddly. Until we
1532 * grow some way for the caller to express a preference, and/or move
1533 * the decision into the io-pgtable code where it arguably belongs,
1534 * just aim for the closest thing to the rest of the system, and hope
1535 * that the hardware isn't esoteric enough that we can't assume AArch64
1536 * support to be a superset of AArch32 support...
1537 */
1538 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1539 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001540 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1541 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1542 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1543 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1544 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001545 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1546 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1547 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1548 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1549 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1550
1551 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1552 ret = -EINVAL;
1553 goto out_unlock;
1554 }
1555
Will Deaconc752ce42014-06-25 22:46:31 +01001556 switch (smmu_domain->stage) {
1557 case ARM_SMMU_DOMAIN_S1:
1558 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1559 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001560 ias = smmu->va_size;
1561 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001562 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001563 fmt = ARM_64_LPAE_S1;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001564 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001565 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001566 ias = min(ias, 32UL);
1567 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001568 } else {
1569 fmt = ARM_V7S;
1570 ias = min(ias, 32UL);
1571 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001572 }
Will Deaconc752ce42014-06-25 22:46:31 +01001573 break;
1574 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001575 /*
1576 * We will likely want to change this if/when KVM gets
1577 * involved.
1578 */
Will Deaconc752ce42014-06-25 22:46:31 +01001579 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001580 cfg->cbar = CBAR_TYPE_S2_TRANS;
1581 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001582 ias = smmu->ipa_size;
1583 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001584 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001585 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001586 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001587 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001588 ias = min(ias, 40UL);
1589 oas = min(oas, 40UL);
1590 }
Will Deaconc752ce42014-06-25 22:46:31 +01001591 break;
1592 default:
1593 ret = -EINVAL;
1594 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001595 }
1596
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001597 if (is_fast)
1598 fmt = ARM_V8L_FAST;
1599
Patrick Dalyce6786f2016-11-09 14:19:23 -08001600 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1601 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001602
Patrick Dalyc190d932016-08-30 17:23:28 -07001603 /* Dynamic domains must set cbndx through domain attribute */
1604 if (!dynamic) {
1605 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001606 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001607 if (ret < 0)
1608 goto out_unlock;
1609 cfg->cbndx = ret;
1610 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001611 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001612 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1613 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001614 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001615 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001616 }
1617
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001618 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001619 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001620 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001621 .ias = ias,
1622 .oas = oas,
1623 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001624 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001625 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001626
Will Deacon518f7132014-11-14 17:17:54 +00001627 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001628 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1629 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001630 if (!pgtbl_ops) {
1631 ret = -ENOMEM;
1632 goto out_clear_smmu;
1633 }
1634
Patrick Dalyc11d1082016-09-01 15:52:44 -07001635 /*
1636 * assign any page table memory that might have been allocated
1637 * during alloc_io_pgtable_ops
1638 */
Patrick Dalye271f212016-10-04 13:24:49 -07001639 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001640 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001641 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001642
Robin Murphyd5466352016-05-09 17:20:09 +01001643 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001644 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001645 domain->geometry.aperture_end = (1UL << ias) - 1;
1646 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001647
Patrick Dalyc190d932016-08-30 17:23:28 -07001648 /* Assign an asid */
1649 ret = arm_smmu_init_asid(domain, smmu);
1650 if (ret)
1651 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001652
Patrick Dalyc190d932016-08-30 17:23:28 -07001653 if (!dynamic) {
1654 /* Initialise the context bank with our page table cfg */
1655 arm_smmu_init_context_bank(smmu_domain,
1656 &smmu_domain->pgtbl_cfg);
1657
1658 /*
1659 * Request context fault interrupt. Do this last to avoid the
1660 * handler seeing a half-initialised domain state.
1661 */
1662 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1663 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001664 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1665 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001666 if (ret < 0) {
1667 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1668 cfg->irptndx, irq);
1669 cfg->irptndx = INVALID_IRPTNDX;
1670 goto out_clear_smmu;
1671 }
1672 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001673 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001674 }
Will Deacon518f7132014-11-14 17:17:54 +00001675 mutex_unlock(&smmu_domain->init_mutex);
1676
1677 /* Publish page table ops for map/unmap */
1678 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001679 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001680
Will Deacon518f7132014-11-14 17:17:54 +00001681out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001682 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001683 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001684out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001685 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001686 return ret;
1687}
1688
Patrick Daly77db4f92016-10-14 15:34:10 -07001689static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1690{
1691 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1692 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1693 smmu_domain->secure_vmid = VMID_INVAL;
1694}
1695
Will Deacon45ae7cf2013-06-24 18:31:25 +01001696static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1697{
Joerg Roedel1d672632015-03-26 13:43:10 +01001698 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001699 struct arm_smmu_device *smmu = smmu_domain->smmu;
1700 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001701 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001702 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001703 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001704 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001705
Robin Murphy7e96c742016-09-14 15:26:46 +01001706 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001707 return;
1708
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001709 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001710 if (ret) {
1711 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1712 smmu);
1713 return;
1714 }
1715
Patrick Dalyc190d932016-08-30 17:23:28 -07001716 dynamic = is_dynamic_domain(domain);
1717 if (dynamic) {
1718 arm_smmu_free_asid(domain);
1719 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001720 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001721 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001722 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001723 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001724 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001725 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001726 return;
1727 }
1728
Will Deacon518f7132014-11-14 17:17:54 +00001729 /*
1730 * Disable the context bank and free the page tables before freeing
1731 * it.
1732 */
Will Deacon44680ee2014-06-25 11:29:12 +01001733 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001734 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001735
Will Deacon44680ee2014-06-25 11:29:12 +01001736 if (cfg->irptndx != INVALID_IRPTNDX) {
1737 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001738 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001739 }
1740
Markus Elfring44830b02015-11-06 18:32:41 +01001741 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001742 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001743 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001744 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001745 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001746 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001747
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001748 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001749 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001750}
1751
Joerg Roedel1d672632015-03-26 13:43:10 +01001752static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001753{
1754 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001755
Patrick Daly09801312016-08-29 17:02:52 -07001756 /* Do not support DOMAIN_DMA for now */
1757 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001758 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759 /*
1760 * Allocate the domain and initialise some of its data structures.
1761 * We can't really do anything meaningful until we've added a
1762 * master.
1763 */
1764 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1765 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001766 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001767
Robin Murphy7e96c742016-09-14 15:26:46 +01001768 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1769 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001770 kfree(smmu_domain);
1771 return NULL;
1772 }
1773
Will Deacon518f7132014-11-14 17:17:54 +00001774 mutex_init(&smmu_domain->init_mutex);
1775 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001776 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1777 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001778 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001779 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001780 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001781
1782 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001783}
1784
Joerg Roedel1d672632015-03-26 13:43:10 +01001785static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001786{
Joerg Roedel1d672632015-03-26 13:43:10 +01001787 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001788
1789 /*
1790 * Free the domain resources. We assume that all devices have
1791 * already been detached.
1792 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001793 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001794 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001795 kfree(smmu_domain);
1796}
1797
Robin Murphy468f4942016-09-12 17:13:49 +01001798static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1799{
1800 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01001801 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01001802
1803 if (smr->valid)
1804 reg |= SMR_VALID;
1805 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1806}
1807
Robin Murphya754fd12016-09-12 17:13:50 +01001808static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1809{
1810 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1811 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1812 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1813 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1814
1815 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1816}
1817
1818static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1819{
1820 arm_smmu_write_s2cr(smmu, idx);
1821 if (smmu->smrs)
1822 arm_smmu_write_smr(smmu, idx);
1823}
1824
Robin Murphy6668f692016-09-12 17:13:54 +01001825static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01001826{
1827 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01001828 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001829
Robin Murphy6668f692016-09-12 17:13:54 +01001830 /* Stream indexing is blissfully easy */
1831 if (!smrs)
1832 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01001833
Robin Murphy6668f692016-09-12 17:13:54 +01001834 /* Validating SMRs is... less so */
1835 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1836 if (!smrs[i].valid) {
1837 /*
1838 * Note the first free entry we come across, which
1839 * we'll claim in the end if nothing else matches.
1840 */
1841 if (free_idx < 0)
1842 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01001843 continue;
1844 }
Robin Murphy6668f692016-09-12 17:13:54 +01001845 /*
1846 * If the new entry is _entirely_ matched by an existing entry,
1847 * then reuse that, with the guarantee that there also cannot
1848 * be any subsequent conflicting entries. In normal use we'd
1849 * expect simply identical entries for this case, but there's
1850 * no harm in accommodating the generalisation.
1851 */
1852 if ((mask & smrs[i].mask) == mask &&
1853 !((id ^ smrs[i].id) & ~smrs[i].mask))
1854 return i;
1855 /*
1856 * If the new entry has any other overlap with an existing one,
1857 * though, then there always exists at least one stream ID
1858 * which would cause a conflict, and we can't allow that risk.
1859 */
1860 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1861 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001862 }
1863
Robin Murphy6668f692016-09-12 17:13:54 +01001864 return free_idx;
1865}
1866
1867static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1868{
1869 if (--smmu->s2crs[idx].count)
1870 return false;
1871
1872 smmu->s2crs[idx] = s2cr_init_val;
1873 if (smmu->smrs)
1874 smmu->smrs[idx].valid = false;
1875
1876 return true;
1877}
1878
1879static int arm_smmu_master_alloc_smes(struct device *dev)
1880{
Robin Murphy06e393e2016-09-12 17:13:55 +01001881 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1882 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01001883 struct arm_smmu_device *smmu = cfg->smmu;
1884 struct arm_smmu_smr *smrs = smmu->smrs;
1885 struct iommu_group *group;
1886 int i, idx, ret;
1887
1888 mutex_lock(&smmu->stream_map_mutex);
1889 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01001890 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001891 u16 sid = fwspec->ids[i];
1892 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1893
Robin Murphy6668f692016-09-12 17:13:54 +01001894 if (idx != INVALID_SMENDX) {
1895 ret = -EEXIST;
1896 goto out_err;
1897 }
1898
Robin Murphy7e96c742016-09-14 15:26:46 +01001899 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01001900 if (ret < 0)
1901 goto out_err;
1902
1903 idx = ret;
1904 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001905 smrs[idx].id = sid;
1906 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01001907 smrs[idx].valid = true;
1908 }
1909 smmu->s2crs[idx].count++;
1910 cfg->smendx[i] = (s16)idx;
1911 }
1912
1913 group = iommu_group_get_for_dev(dev);
1914 if (!group)
1915 group = ERR_PTR(-ENOMEM);
1916 if (IS_ERR(group)) {
1917 ret = PTR_ERR(group);
1918 goto out_err;
1919 }
1920 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01001921
Will Deacon45ae7cf2013-06-24 18:31:25 +01001922 /* It worked! Now, poke the actual hardware */
Robin Murphy06e393e2016-09-12 17:13:55 +01001923 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001924 arm_smmu_write_sme(smmu, idx);
1925 smmu->s2crs[idx].group = group;
1926 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001927
Robin Murphy6668f692016-09-12 17:13:54 +01001928 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001929 return 0;
1930
Robin Murphy6668f692016-09-12 17:13:54 +01001931out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01001932 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01001933 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01001934 cfg->smendx[i] = INVALID_SMENDX;
1935 }
Robin Murphy6668f692016-09-12 17:13:54 +01001936 mutex_unlock(&smmu->stream_map_mutex);
1937 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001938}
1939
Robin Murphy06e393e2016-09-12 17:13:55 +01001940static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001941{
Robin Murphy06e393e2016-09-12 17:13:55 +01001942 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1943 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01001944 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001945
Robin Murphy6668f692016-09-12 17:13:54 +01001946 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01001947 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001948 if (arm_smmu_free_sme(smmu, idx))
1949 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01001950 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001951 }
Robin Murphy6668f692016-09-12 17:13:54 +01001952 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001953}
1954
Will Deacon45ae7cf2013-06-24 18:31:25 +01001955static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01001956 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001957{
Will Deacon44680ee2014-06-25 11:29:12 +01001958 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01001959 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1960 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1961 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01001962 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963
Robin Murphy06e393e2016-09-12 17:13:55 +01001964 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphya754fd12016-09-12 17:13:50 +01001965 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy6668f692016-09-12 17:13:54 +01001966 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01001967
1968 s2cr[idx].type = type;
1969 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1970 s2cr[idx].cbndx = cbndx;
1971 arm_smmu_write_s2cr(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972 }
1973
1974 return 0;
1975}
1976
Patrick Daly09801312016-08-29 17:02:52 -07001977static void arm_smmu_detach_dev(struct iommu_domain *domain,
1978 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001979{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001980 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001981 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Daly09801312016-08-29 17:02:52 -07001982 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07001983 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07001984
1985 if (dynamic)
1986 return;
1987
Patrick Daly09801312016-08-29 17:02:52 -07001988 if (!smmu) {
1989 dev_err(dev, "Domain not attached; cannot detach!\n");
1990 return;
1991 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001992
Patrick Daly8befb662016-08-17 20:03:28 -07001993 /* Remove additional vote for atomic power */
1994 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001995 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
1996 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001997 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001998}
1999
Patrick Dalye271f212016-10-04 13:24:49 -07002000static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002001{
Patrick Dalye271f212016-10-04 13:24:49 -07002002 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002003 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2004 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2005 int source_vmid = VMID_HLOS;
2006 struct arm_smmu_pte_info *pte_info, *temp;
2007
Patrick Dalye271f212016-10-04 13:24:49 -07002008 if (!arm_smmu_is_domain_secure(smmu_domain))
2009 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002010
Patrick Dalye271f212016-10-04 13:24:49 -07002011 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002012 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2013 PAGE_SIZE, &source_vmid, 1,
2014 dest_vmids, dest_perms, 2);
2015 if (WARN_ON(ret))
2016 break;
2017 }
2018
2019 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2020 entry) {
2021 list_del(&pte_info->entry);
2022 kfree(pte_info);
2023 }
Patrick Dalye271f212016-10-04 13:24:49 -07002024 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002025}
2026
2027static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2028{
2029 int ret;
2030 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002031 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002032 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2033 struct arm_smmu_pte_info *pte_info, *temp;
2034
Patrick Dalye271f212016-10-04 13:24:49 -07002035 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002036 return;
2037
2038 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2039 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2040 PAGE_SIZE, source_vmlist, 2,
2041 &dest_vmids, &dest_perms, 1);
2042 if (WARN_ON(ret))
2043 break;
2044 free_pages_exact(pte_info->virt_addr, pte_info->size);
2045 }
2046
2047 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2048 entry) {
2049 list_del(&pte_info->entry);
2050 kfree(pte_info);
2051 }
2052}
2053
2054static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2055{
2056 struct arm_smmu_domain *smmu_domain = cookie;
2057 struct arm_smmu_pte_info *pte_info;
2058
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002059 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002060
2061 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2062 if (!pte_info)
2063 return;
2064
2065 pte_info->virt_addr = addr;
2066 pte_info->size = size;
2067 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2068}
2069
2070static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2071{
2072 struct arm_smmu_domain *smmu_domain = cookie;
2073 struct arm_smmu_pte_info *pte_info;
2074
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002075 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002076
2077 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2078 if (!pte_info)
2079 return -ENOMEM;
2080 pte_info->virt_addr = addr;
2081 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2082 return 0;
2083}
2084
Will Deacon45ae7cf2013-06-24 18:31:25 +01002085static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2086{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002087 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002088 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002089 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002090 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002091 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002092
Robin Murphy06e393e2016-09-12 17:13:55 +01002093 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002094 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2095 return -ENXIO;
2096 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002097
Robin Murphy4f79b142016-10-17 12:06:21 +01002098 /*
2099 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2100 * domains between of_xlate() and add_device() - we have no way to cope
2101 * with that, so until ARM gets converted to rely on groups and default
2102 * domains, just say no (but more politely than by dereferencing NULL).
2103 * This should be at least a WARN_ON once that's sorted.
2104 */
2105 if (!fwspec->iommu_priv)
2106 return -ENODEV;
2107
Robin Murphy06e393e2016-09-12 17:13:55 +01002108 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002109
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002110 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002111 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002112 if (ret)
2113 return ret;
2114
Will Deacon518f7132014-11-14 17:17:54 +00002115 /* Ensure that the domain is finalised */
Robin Murphy06e393e2016-09-12 17:13:55 +01002116 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002117 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002118 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002119
Patrick Dalyc190d932016-08-30 17:23:28 -07002120 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002121 if (is_dynamic_domain(domain)) {
2122 ret = 0;
2123 goto out_power_off;
2124 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002125
Will Deacon45ae7cf2013-06-24 18:31:25 +01002126 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002127 * Sanity check the domain. We don't support domains across
2128 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002129 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002130 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002131 dev_err(dev,
2132 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002133 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002134 ret = -EINVAL;
2135 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002136 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002137
2138 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002139 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002140
2141out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002142 /*
2143 * Keep an additional vote for non-atomic power until domain is
2144 * detached
2145 */
2146 if (!ret && atomic_domain) {
2147 WARN_ON(arm_smmu_power_on(smmu->pwr));
2148 arm_smmu_power_off_atomic(smmu->pwr);
2149 }
2150
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002151 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002152
Will Deacon45ae7cf2013-06-24 18:31:25 +01002153 return ret;
2154}
2155
Will Deacon45ae7cf2013-06-24 18:31:25 +01002156static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002157 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002158{
Will Deacon518f7132014-11-14 17:17:54 +00002159 int ret;
2160 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002161 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002162 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002163
Will Deacon518f7132014-11-14 17:17:54 +00002164 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002165 return -ENODEV;
2166
Patrick Dalye271f212016-10-04 13:24:49 -07002167 arm_smmu_secure_domain_lock(smmu_domain);
2168
Will Deacon518f7132014-11-14 17:17:54 +00002169 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2170 ret = ops->map(ops, iova, paddr, size, prot);
2171 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002172
2173 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002174 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002175
Will Deacon518f7132014-11-14 17:17:54 +00002176 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002177}
2178
2179static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2180 size_t size)
2181{
Will Deacon518f7132014-11-14 17:17:54 +00002182 size_t ret;
2183 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002184 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002185 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002186
Will Deacon518f7132014-11-14 17:17:54 +00002187 if (!ops)
2188 return 0;
2189
Patrick Daly8befb662016-08-17 20:03:28 -07002190 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002191 if (ret)
2192 return ret;
2193
Patrick Dalye271f212016-10-04 13:24:49 -07002194 arm_smmu_secure_domain_lock(smmu_domain);
2195
Will Deacon518f7132014-11-14 17:17:54 +00002196 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2197 ret = ops->unmap(ops, iova, size);
2198 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002199
Patrick Daly8befb662016-08-17 20:03:28 -07002200 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002201 /*
2202 * While splitting up block mappings, we might allocate page table
2203 * memory during unmap, so the vmids needs to be assigned to the
2204 * memory here as well.
2205 */
2206 arm_smmu_assign_table(smmu_domain);
2207 /* Also unassign any pages that were free'd during unmap */
2208 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002209 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002210 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002211}
2212
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002213static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2214 struct scatterlist *sg, unsigned int nents, int prot)
2215{
2216 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002217 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002218 unsigned long flags;
2219 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2220 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2221
2222 if (!ops)
2223 return -ENODEV;
2224
Patrick Daly8befb662016-08-17 20:03:28 -07002225 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002226 if (ret)
2227 return ret;
2228
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002229 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002230 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002231 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002232
2233 if (!ret)
2234 arm_smmu_unmap(domain, iova, size);
2235
Patrick Daly8befb662016-08-17 20:03:28 -07002236 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002237 arm_smmu_assign_table(smmu_domain);
2238
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002239 return ret;
2240}
2241
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002242static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002243 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002244{
Joerg Roedel1d672632015-03-26 13:43:10 +01002245 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002246 struct arm_smmu_device *smmu = smmu_domain->smmu;
2247 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2248 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2249 struct device *dev = smmu->dev;
2250 void __iomem *cb_base;
2251 u32 tmp;
2252 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002253 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002254
2255 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2256
Robin Murphy661d9622015-05-27 17:09:34 +01002257 /* ATS1 registers can only be written atomically */
2258 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002259 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002260 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2261 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002262 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002263
2264 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2265 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002266 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002267 dev_err(dev,
2268 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2269 &iova, &phys);
2270 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002271 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002272 }
2273
Robin Murphyf9a05f02016-04-13 18:13:01 +01002274 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002275 if (phys & CB_PAR_F) {
2276 dev_err(dev, "translation fault!\n");
2277 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002278 phys = 0;
2279 } else {
2280 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002281 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002282
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002283 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002284}
2285
Will Deacon45ae7cf2013-06-24 18:31:25 +01002286static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002287 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002288{
Will Deacon518f7132014-11-14 17:17:54 +00002289 phys_addr_t ret;
2290 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002291 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002292 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002293
Will Deacon518f7132014-11-14 17:17:54 +00002294 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002295 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002296
Will Deacon518f7132014-11-14 17:17:54 +00002297 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002298 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002299 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002300
Will Deacon518f7132014-11-14 17:17:54 +00002301 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002302}
2303
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002304/*
2305 * This function can sleep, and cannot be called from atomic context. Will
2306 * power on register block if required. This restriction does not apply to the
2307 * original iova_to_phys() op.
2308 */
2309static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2310 dma_addr_t iova)
2311{
2312 phys_addr_t ret = 0;
2313 unsigned long flags;
2314 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002315
Patrick Dalyad441dd2016-09-15 15:50:46 -07002316 if (smmu_domain->smmu->arch_ops &&
2317 smmu_domain->smmu->arch_ops->iova_to_phys_hard)
2318 return smmu_domain->smmu->arch_ops->iova_to_phys_hard(
2319 domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002320
2321 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2322 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2323 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002324 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002325
2326 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2327
2328 return ret;
2329}
2330
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002331static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002332{
Will Deacond0948942014-06-24 17:30:10 +01002333 switch (cap) {
2334 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002335 /*
2336 * Return true here as the SMMU can always send out coherent
2337 * requests.
2338 */
2339 return true;
Will Deacond0948942014-06-24 17:30:10 +01002340 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002341 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002342 case IOMMU_CAP_NOEXEC:
2343 return true;
Will Deacond0948942014-06-24 17:30:10 +01002344 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002345 return false;
Will Deacond0948942014-06-24 17:30:10 +01002346 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002347}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002348
Patrick Daly8e3371a2017-02-13 22:14:53 -08002349static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2350{
2351 struct arm_smmu_device *smmu;
2352 unsigned long flags;
2353
2354 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2355 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2356 if (smmu->dev->of_node == np) {
2357 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2358 return smmu;
2359 }
2360 }
2361 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2362 return NULL;
2363}
2364
Robin Murphy7e96c742016-09-14 15:26:46 +01002365static int arm_smmu_match_node(struct device *dev, void *data)
2366{
2367 return dev->of_node == data;
2368}
2369
2370static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2371{
2372 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2373 np, arm_smmu_match_node);
2374 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002375 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002376}
2377
Will Deacon03edb222015-01-19 14:27:33 +00002378static int arm_smmu_add_device(struct device *dev)
2379{
Robin Murphy06e393e2016-09-12 17:13:55 +01002380 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002381 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002382 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002383 int i, ret;
2384
Robin Murphy7e96c742016-09-14 15:26:46 +01002385 if (using_legacy_binding) {
2386 ret = arm_smmu_register_legacy_master(dev, &smmu);
2387 fwspec = dev->iommu_fwspec;
2388 if (ret)
2389 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002390 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002391 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2392 if (!smmu)
2393 return -ENODEV;
2394 } else {
2395 return -ENODEV;
2396 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002397
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002398 ret = arm_smmu_power_on(smmu->pwr);
2399 if (ret)
2400 goto out_free;
2401
Robin Murphyd5b41782016-09-14 15:21:39 +01002402 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002403 for (i = 0; i < fwspec->num_ids; i++) {
2404 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002405 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002406
Robin Murphy06e393e2016-09-12 17:13:55 +01002407 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002408 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002409 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002410 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002411 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002412 if (mask & ~smmu->smr_mask_mask) {
2413 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2414 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002415 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002416 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002417 }
Will Deacon03edb222015-01-19 14:27:33 +00002418
Robin Murphy06e393e2016-09-12 17:13:55 +01002419 ret = -ENOMEM;
2420 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2421 GFP_KERNEL);
2422 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002423 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002424
2425 cfg->smmu = smmu;
2426 fwspec->iommu_priv = cfg;
2427 while (i--)
2428 cfg->smendx[i] = INVALID_SMENDX;
2429
Robin Murphy6668f692016-09-12 17:13:54 +01002430 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002431 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002432 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002433
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002434 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002435 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002436
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002437out_pwr_off:
2438 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002439out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002440 if (fwspec)
2441 kfree(fwspec->iommu_priv);
2442 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002443 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002444}
2445
Will Deacon45ae7cf2013-06-24 18:31:25 +01002446static void arm_smmu_remove_device(struct device *dev)
2447{
Robin Murphy06e393e2016-09-12 17:13:55 +01002448 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002449 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002450
Robin Murphy06e393e2016-09-12 17:13:55 +01002451 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002452 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002453
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002454 smmu = fwspec_smmu(fwspec);
2455 if (arm_smmu_power_on(smmu->pwr)) {
2456 WARN_ON(1);
2457 return;
2458 }
2459
Robin Murphy06e393e2016-09-12 17:13:55 +01002460 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002461 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002462 kfree(fwspec->iommu_priv);
2463 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002464 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002465}
2466
Joerg Roedelaf659932015-10-21 23:51:41 +02002467static struct iommu_group *arm_smmu_device_group(struct device *dev)
2468{
Robin Murphy06e393e2016-09-12 17:13:55 +01002469 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2470 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002471 struct iommu_group *group = NULL;
2472 int i, idx;
2473
Robin Murphy06e393e2016-09-12 17:13:55 +01002474 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002475 if (group && smmu->s2crs[idx].group &&
2476 group != smmu->s2crs[idx].group)
2477 return ERR_PTR(-EINVAL);
2478
2479 group = smmu->s2crs[idx].group;
2480 }
2481
2482 if (group)
2483 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02002484
2485 if (dev_is_pci(dev))
2486 group = pci_device_group(dev);
2487 else
2488 group = generic_device_group(dev);
2489
Joerg Roedelaf659932015-10-21 23:51:41 +02002490 return group;
2491}
2492
Will Deaconc752ce42014-06-25 22:46:31 +01002493static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2494 enum iommu_attr attr, void *data)
2495{
Joerg Roedel1d672632015-03-26 13:43:10 +01002496 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002497 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002498
2499 switch (attr) {
2500 case DOMAIN_ATTR_NESTING:
2501 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2502 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002503 case DOMAIN_ATTR_PT_BASE_ADDR:
2504 *((phys_addr_t *)data) =
2505 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2506 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002507 case DOMAIN_ATTR_CONTEXT_BANK:
2508 /* context bank index isn't valid until we are attached */
2509 if (smmu_domain->smmu == NULL)
2510 return -ENODEV;
2511
2512 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2513 ret = 0;
2514 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002515 case DOMAIN_ATTR_TTBR0: {
2516 u64 val;
2517 struct arm_smmu_device *smmu = smmu_domain->smmu;
2518 /* not valid until we are attached */
2519 if (smmu == NULL)
2520 return -ENODEV;
2521
2522 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2523 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2524 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2525 << (TTBRn_ASID_SHIFT);
2526 *((u64 *)data) = val;
2527 ret = 0;
2528 break;
2529 }
2530 case DOMAIN_ATTR_CONTEXTIDR:
2531 /* not valid until attached */
2532 if (smmu_domain->smmu == NULL)
2533 return -ENODEV;
2534 *((u32 *)data) = smmu_domain->cfg.procid;
2535 ret = 0;
2536 break;
2537 case DOMAIN_ATTR_PROCID:
2538 *((u32 *)data) = smmu_domain->cfg.procid;
2539 ret = 0;
2540 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002541 case DOMAIN_ATTR_DYNAMIC:
2542 *((int *)data) = !!(smmu_domain->attributes
2543 & (1 << DOMAIN_ATTR_DYNAMIC));
2544 ret = 0;
2545 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002546 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2547 *((int *)data) = !!(smmu_domain->attributes
2548 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2549 ret = 0;
2550 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002551 case DOMAIN_ATTR_S1_BYPASS:
2552 *((int *)data) = !!(smmu_domain->attributes
2553 & (1 << DOMAIN_ATTR_S1_BYPASS));
2554 ret = 0;
2555 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002556 case DOMAIN_ATTR_SECURE_VMID:
2557 *((int *)data) = smmu_domain->secure_vmid;
2558 ret = 0;
2559 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002560 case DOMAIN_ATTR_PGTBL_INFO: {
2561 struct iommu_pgtbl_info *info = data;
2562
2563 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2564 ret = -ENODEV;
2565 break;
2566 }
2567 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2568 ret = 0;
2569 break;
2570 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002571 case DOMAIN_ATTR_FAST:
2572 *((int *)data) = !!(smmu_domain->attributes
2573 & (1 << DOMAIN_ATTR_FAST));
2574 ret = 0;
2575 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002576 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2577 *((int *)data) = !!(smmu_domain->attributes &
2578 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2579 ret = 0;
2580 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002581 default:
2582 return -ENODEV;
2583 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002584 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002585}
2586
2587static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2588 enum iommu_attr attr, void *data)
2589{
Will Deacon518f7132014-11-14 17:17:54 +00002590 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002591 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002592
Will Deacon518f7132014-11-14 17:17:54 +00002593 mutex_lock(&smmu_domain->init_mutex);
2594
Will Deaconc752ce42014-06-25 22:46:31 +01002595 switch (attr) {
2596 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002597 if (smmu_domain->smmu) {
2598 ret = -EPERM;
2599 goto out_unlock;
2600 }
2601
Will Deaconc752ce42014-06-25 22:46:31 +01002602 if (*(int *)data)
2603 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2604 else
2605 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2606
Will Deacon518f7132014-11-14 17:17:54 +00002607 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002608 case DOMAIN_ATTR_PROCID:
2609 if (smmu_domain->smmu != NULL) {
2610 dev_err(smmu_domain->smmu->dev,
2611 "cannot change procid attribute while attached\n");
2612 ret = -EBUSY;
2613 break;
2614 }
2615 smmu_domain->cfg.procid = *((u32 *)data);
2616 ret = 0;
2617 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002618 case DOMAIN_ATTR_DYNAMIC: {
2619 int dynamic = *((int *)data);
2620
2621 if (smmu_domain->smmu != NULL) {
2622 dev_err(smmu_domain->smmu->dev,
2623 "cannot change dynamic attribute while attached\n");
2624 ret = -EBUSY;
2625 break;
2626 }
2627
2628 if (dynamic)
2629 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2630 else
2631 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2632 ret = 0;
2633 break;
2634 }
2635 case DOMAIN_ATTR_CONTEXT_BANK:
2636 /* context bank can't be set while attached */
2637 if (smmu_domain->smmu != NULL) {
2638 ret = -EBUSY;
2639 break;
2640 }
2641 /* ... and it can only be set for dynamic contexts. */
2642 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2643 ret = -EINVAL;
2644 break;
2645 }
2646
2647 /* this will be validated during attach */
2648 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2649 ret = 0;
2650 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002651 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2652 u32 non_fatal_faults = *((int *)data);
2653
2654 if (non_fatal_faults)
2655 smmu_domain->attributes |=
2656 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2657 else
2658 smmu_domain->attributes &=
2659 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2660 ret = 0;
2661 break;
2662 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002663 case DOMAIN_ATTR_S1_BYPASS: {
2664 int bypass = *((int *)data);
2665
2666 /* bypass can't be changed while attached */
2667 if (smmu_domain->smmu != NULL) {
2668 ret = -EBUSY;
2669 break;
2670 }
2671 if (bypass)
2672 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2673 else
2674 smmu_domain->attributes &=
2675 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2676
2677 ret = 0;
2678 break;
2679 }
Patrick Daly8befb662016-08-17 20:03:28 -07002680 case DOMAIN_ATTR_ATOMIC:
2681 {
2682 int atomic_ctx = *((int *)data);
2683
2684 /* can't be changed while attached */
2685 if (smmu_domain->smmu != NULL) {
2686 ret = -EBUSY;
2687 break;
2688 }
2689 if (atomic_ctx)
2690 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2691 else
2692 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2693 break;
2694 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002695 case DOMAIN_ATTR_SECURE_VMID:
2696 if (smmu_domain->secure_vmid != VMID_INVAL) {
2697 ret = -ENODEV;
2698 WARN(1, "secure vmid already set!");
2699 break;
2700 }
2701 smmu_domain->secure_vmid = *((int *)data);
2702 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002703 case DOMAIN_ATTR_FAST:
2704 if (*((int *)data))
2705 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2706 ret = 0;
2707 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002708 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2709 /* can't be changed while attached */
2710 if (smmu_domain->smmu != NULL) {
2711 ret = -EBUSY;
2712 break;
2713 }
2714 if (*((int *)data))
2715 smmu_domain->attributes |=
2716 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
2717 ret = 0;
2718 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002719 default:
Will Deacon518f7132014-11-14 17:17:54 +00002720 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002721 }
Will Deacon518f7132014-11-14 17:17:54 +00002722
2723out_unlock:
2724 mutex_unlock(&smmu_domain->init_mutex);
2725 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002726}
2727
Robin Murphy7e96c742016-09-14 15:26:46 +01002728static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
2729{
2730 u32 fwid = 0;
2731
2732 if (args->args_count > 0)
2733 fwid |= (u16)args->args[0];
2734
2735 if (args->args_count > 1)
2736 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
2737
2738 return iommu_fwspec_add_ids(dev, &fwid, 1);
2739}
2740
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002741static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2742 unsigned long flags)
2743{
2744 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2745 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2746 struct arm_smmu_device *smmu;
2747 void __iomem *cb_base;
2748
2749 if (!smmu_domain->smmu) {
2750 pr_err("Can't trigger faults on non-attached domains\n");
2751 return;
2752 }
2753
2754 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002755 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002756 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002757
2758 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2759 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2760 flags, cfg->cbndx);
2761 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002762 /* give the interrupt time to fire... */
2763 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002764
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002765 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002766}
2767
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002768static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2769 unsigned long offset)
2770{
2771 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2772 struct arm_smmu_device *smmu;
2773 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2774 void __iomem *cb_base;
2775 unsigned long val;
2776
2777 if (offset >= SZ_4K) {
2778 pr_err("Invalid offset: 0x%lx\n", offset);
2779 return 0;
2780 }
2781
2782 smmu = smmu_domain->smmu;
2783 if (!smmu) {
2784 WARN(1, "Can't read registers of a detached domain\n");
2785 val = 0;
2786 return val;
2787 }
2788
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002789 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002790 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002791
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002792 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2793 val = readl_relaxed(cb_base + offset);
2794
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002795 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002796 return val;
2797}
2798
2799static void arm_smmu_reg_write(struct iommu_domain *domain,
2800 unsigned long offset, unsigned long val)
2801{
2802 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2803 struct arm_smmu_device *smmu;
2804 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2805 void __iomem *cb_base;
2806
2807 if (offset >= SZ_4K) {
2808 pr_err("Invalid offset: 0x%lx\n", offset);
2809 return;
2810 }
2811
2812 smmu = smmu_domain->smmu;
2813 if (!smmu) {
2814 WARN(1, "Can't read registers of a detached domain\n");
2815 return;
2816 }
2817
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002818 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002819 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002820
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002821 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2822 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002823
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002824 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002825}
2826
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002827static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
2828{
2829 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
2830}
2831
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002832static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
2833{
2834 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2835
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002836 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002837}
2838
2839static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
2840{
2841 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2842
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002843 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002844}
2845
Will Deacon518f7132014-11-14 17:17:54 +00002846static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002847 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002848 .domain_alloc = arm_smmu_domain_alloc,
2849 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002850 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002851 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002852 .map = arm_smmu_map,
2853 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002854 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002855 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002856 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002857 .add_device = arm_smmu_add_device,
2858 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002859 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002860 .domain_get_attr = arm_smmu_domain_get_attr,
2861 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01002862 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00002863 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002864 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002865 .reg_read = arm_smmu_reg_read,
2866 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002867 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002868 .enable_config_clocks = arm_smmu_enable_config_clocks,
2869 .disable_config_clocks = arm_smmu_disable_config_clocks,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002870};
2871
Patrick Dalyad441dd2016-09-15 15:50:46 -07002872#define IMPL_DEF1_MICRO_MMU_CTRL 0
2873#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
2874#define MICRO_MMU_CTRL_IDLE (1 << 3)
2875
2876/* Definitions for implementation-defined registers */
2877#define ACTLR_QCOM_OSH_SHIFT 28
2878#define ACTLR_QCOM_OSH 1
2879
2880#define ACTLR_QCOM_ISH_SHIFT 29
2881#define ACTLR_QCOM_ISH 1
2882
2883#define ACTLR_QCOM_NSH_SHIFT 30
2884#define ACTLR_QCOM_NSH 1
2885
2886static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002887{
2888 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002889 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002890
2891 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2892 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2893 0, 30000)) {
2894 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2895 return -EBUSY;
2896 }
2897
2898 return 0;
2899}
2900
Patrick Dalyad441dd2016-09-15 15:50:46 -07002901static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002902{
2903 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2904 u32 reg;
2905
2906 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2907 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2908 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2909
Patrick Dalyad441dd2016-09-15 15:50:46 -07002910 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002911}
2912
Patrick Dalyad441dd2016-09-15 15:50:46 -07002913static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002914{
Patrick Dalyad441dd2016-09-15 15:50:46 -07002915 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002916}
2917
Patrick Dalyad441dd2016-09-15 15:50:46 -07002918static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002919{
Patrick Dalyad441dd2016-09-15 15:50:46 -07002920 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002921}
2922
Patrick Dalyad441dd2016-09-15 15:50:46 -07002923static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002924{
2925 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2926 u32 reg;
2927
2928 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2929 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2930 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2931}
2932
Patrick Dalyad441dd2016-09-15 15:50:46 -07002933static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002934{
2935 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002936 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002937 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002938 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002939
Patrick Dalyad441dd2016-09-15 15:50:46 -07002940 /*
2941 * SCTLR.M must be disabled here per ARM SMMUv2 spec
2942 * to prevent table walks with an inconsistent state.
2943 */
2944 for (i = 0; i < smmu->num_context_banks; ++i) {
2945 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2946 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2947 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2948 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2949 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
2950 }
2951
2952 /* Program implementation defined registers */
2953 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002954 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2955 writel_relaxed(regs[i].value,
2956 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002957 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002958}
2959
Patrick Dalyad441dd2016-09-15 15:50:46 -07002960static phys_addr_t __qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
2961 dma_addr_t iova, bool halt)
2962{
2963 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2964 struct arm_smmu_device *smmu = smmu_domain->smmu;
2965 int ret;
2966 phys_addr_t phys = 0;
2967 unsigned long flags;
2968
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002969 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002970 if (ret)
2971 return 0;
2972
2973 if (halt) {
2974 ret = qsmmuv2_halt(smmu);
2975 if (ret)
2976 goto out_power_off;
2977 }
2978
2979 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2980 spin_lock(&smmu->atos_lock);
2981 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
2982 spin_unlock(&smmu->atos_lock);
2983 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2984
2985 if (halt)
2986 qsmmuv2_resume(smmu);
2987
2988out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002989 arm_smmu_power_off(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002990 return phys;
2991}
2992
2993static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
2994 dma_addr_t iova)
2995{
2996 return __qsmmuv2_iova_to_phys_hard(domain, iova, true);
2997}
2998
2999static void qsmmuv2_iova_to_phys_fault(
3000 struct iommu_domain *domain,
3001 dma_addr_t iova, phys_addr_t *phys,
3002 phys_addr_t *phys_post_tlbiall)
3003{
3004 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3005 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3006 struct arm_smmu_device *smmu;
3007 void __iomem *cb_base;
3008 u64 sctlr, sctlr_orig;
3009 u32 fsr;
3010
3011 smmu = smmu_domain->smmu;
3012 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3013
3014 qsmmuv2_halt_nowait(smmu);
3015
3016 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
3017
3018 qsmmuv2_wait_for_halt(smmu);
3019
3020 /* clear FSR to allow ATOS to log any faults */
3021 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3022 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3023
3024 /* disable stall mode momentarily */
3025 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3026 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3027 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3028
3029 *phys = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
3030 arm_smmu_tlb_inv_context(smmu_domain);
3031 *phys_post_tlbiall = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
3032
3033 /* restore SCTLR */
3034 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3035
3036 qsmmuv2_resume(smmu);
3037}
3038
3039struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3040 .device_reset = qsmmuv2_device_reset,
3041 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
3042 .iova_to_phys_fault = qsmmuv2_iova_to_phys_fault,
3043};
3044
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003045static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003046{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003047 int i;
3048 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003049 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003050 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003051
Peng Fan3ca37122016-05-03 21:50:30 +08003052 /*
3053 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3054 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3055 * bit is only present in MMU-500r2 onwards.
3056 */
3057 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3058 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3059 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3060 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3061 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3062 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3063 }
3064
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003065 /* Make sure all context banks are disabled and clear CB_FSR */
3066 for (i = 0; i < smmu->num_context_banks; ++i) {
3067 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3068 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3069 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003070 /*
3071 * Disable MMU-500's not-particularly-beneficial next-page
3072 * prefetcher for the sake of errata #841119 and #826419.
3073 */
3074 if (smmu->model == ARM_MMU500) {
3075 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3076 reg &= ~ARM_MMU500_ACTLR_CPRE;
3077 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3078 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003079 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003080}
3081
3082static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3083{
3084 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003085 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003086 u32 reg;
3087
3088 /* clear global FSR */
3089 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3090 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3091
Robin Murphy468f4942016-09-12 17:13:49 +01003092 /*
3093 * Reset stream mapping groups: Initial values mark all SMRn as
3094 * invalid and all S2CRn as bypass unless overridden.
3095 */
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003096 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
Robin Murphya754fd12016-09-12 17:13:50 +01003097 for (i = 0; i < smmu->num_mapping_groups; ++i)
3098 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003099
3100 arm_smmu_context_bank_reset(smmu);
3101 }
Will Deacon1463fe42013-07-31 19:21:27 +01003102
Will Deacon45ae7cf2013-06-24 18:31:25 +01003103 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003104 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3105 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3106
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003107 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003108
Will Deacon45ae7cf2013-06-24 18:31:25 +01003109 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003110 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003111
3112 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003113 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003114
Robin Murphy25a1c962016-02-10 14:25:33 +00003115 /* Enable client access, handling unmatched streams as appropriate */
3116 reg &= ~sCR0_CLIENTPD;
3117 if (disable_bypass)
3118 reg |= sCR0_USFCFG;
3119 else
3120 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003121
3122 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003123 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003124
3125 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003126 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003127
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003128 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3129 reg |= sCR0_VMID16EN;
3130
Will Deacon45ae7cf2013-06-24 18:31:25 +01003131 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003132 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003133 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003134
3135 /* Manage any implementation defined features */
3136 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003137}
3138
3139static int arm_smmu_id_size_to_bits(int size)
3140{
3141 switch (size) {
3142 case 0:
3143 return 32;
3144 case 1:
3145 return 36;
3146 case 2:
3147 return 40;
3148 case 3:
3149 return 42;
3150 case 4:
3151 return 44;
3152 case 5:
3153 default:
3154 return 48;
3155 }
3156}
3157
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003158static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3159{
3160 struct device *dev = smmu->dev;
3161 int i, ntuples, ret;
3162 u32 *tuples;
3163 struct arm_smmu_impl_def_reg *regs, *regit;
3164
3165 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3166 return 0;
3167
3168 ntuples /= sizeof(u32);
3169 if (ntuples % 2) {
3170 dev_err(dev,
3171 "Invalid number of attach-impl-defs registers: %d\n",
3172 ntuples);
3173 return -EINVAL;
3174 }
3175
3176 regs = devm_kmalloc(
3177 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3178 GFP_KERNEL);
3179 if (!regs)
3180 return -ENOMEM;
3181
3182 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3183 if (!tuples)
3184 return -ENOMEM;
3185
3186 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3187 tuples, ntuples);
3188 if (ret)
3189 return ret;
3190
3191 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3192 regit->offset = tuples[i];
3193 regit->value = tuples[i + 1];
3194 }
3195
3196 devm_kfree(dev, tuples);
3197
3198 smmu->impl_def_attach_registers = regs;
3199 smmu->num_impl_def_attach_registers = ntuples / 2;
3200
3201 return 0;
3202}
3203
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003204
3205static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003206{
3207 const char *cname;
3208 struct property *prop;
3209 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003210 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003211
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003212 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003213 of_property_count_strings(dev->of_node, "clock-names");
3214
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003215 if (pwr->num_clocks < 1) {
3216 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003217 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003218 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003219
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003220 pwr->clocks = devm_kzalloc(
3221 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003222 GFP_KERNEL);
3223
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003224 if (!pwr->clocks)
3225 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003226
3227 i = 0;
3228 of_property_for_each_string(dev->of_node, "clock-names",
3229 prop, cname) {
3230 struct clk *c = devm_clk_get(dev, cname);
3231
3232 if (IS_ERR(c)) {
3233 dev_err(dev, "Couldn't get clock: %s",
3234 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003235 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003236 }
3237
3238 if (clk_get_rate(c) == 0) {
3239 long rate = clk_round_rate(c, 1000);
3240
3241 clk_set_rate(c, rate);
3242 }
3243
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003244 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003245
3246 ++i;
3247 }
3248 return 0;
3249}
3250
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003251static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003252{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003253 const char *cname;
3254 struct property *prop;
3255 int i, ret = 0;
3256 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003257
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003258 pwr->num_gdscs =
3259 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3260
3261 if (pwr->num_gdscs < 1) {
3262 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003263 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003264 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003265
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003266 pwr->gdscs = devm_kzalloc(
3267 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3268
3269 if (!pwr->gdscs)
3270 return -ENOMEM;
3271
3272 i = 0;
3273 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3274 prop, cname)
3275 pwr->gdscs[i].supply = cname;
3276
3277 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3278 return ret;
3279}
3280
3281static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3282{
3283 struct device *dev = pwr->dev;
3284
3285 /* We don't want the bus APIs to print an error message */
3286 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3287 dev_dbg(dev, "No bus scaling info\n");
3288 return 0;
3289 }
3290
3291 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3292 if (!pwr->bus_dt_data) {
3293 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3294 return -EINVAL;
3295 }
3296
3297 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3298 if (!pwr->bus_client) {
3299 dev_err(dev, "Bus client registration failed\n");
3300 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
3301 return -EINVAL;
3302 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003303
3304 return 0;
3305}
3306
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003307/*
3308 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3309 */
3310static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3311 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003312{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003313 struct arm_smmu_power_resources *pwr;
3314 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003315
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003316 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3317 if (!pwr)
3318 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003319
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003320 pwr->dev = &pdev->dev;
3321 pwr->pdev = pdev;
3322 mutex_init(&pwr->power_lock);
3323 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003324
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003325 ret = arm_smmu_init_clocks(pwr);
3326 if (ret)
3327 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003328
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003329 ret = arm_smmu_init_regulators(pwr);
3330 if (ret)
3331 return ERR_PTR(ret);
3332
3333 ret = arm_smmu_init_bus_scaling(pwr);
3334 if (ret)
3335 return ERR_PTR(ret);
3336
3337 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003338}
3339
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003340/*
3341 * Bus APIs are not devm-safe.
3342 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003343static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003344{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003345 msm_bus_scale_unregister_client(pwr->bus_client);
3346 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003347}
3348
Will Deacon45ae7cf2013-06-24 18:31:25 +01003349static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3350{
3351 unsigned long size;
3352 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3353 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003354 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003355 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003356
Mitchel Humpherysba822582015-10-20 11:37:41 -07003357 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3358 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003359 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003360
3361 /* ID0 */
3362 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003363
3364 /* Restrict available stages based on module parameter */
3365 if (force_stage == 1)
3366 id &= ~(ID0_S2TS | ID0_NTS);
3367 else if (force_stage == 2)
3368 id &= ~(ID0_S1TS | ID0_NTS);
3369
Will Deacon45ae7cf2013-06-24 18:31:25 +01003370 if (id & ID0_S1TS) {
3371 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003372 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003373 }
3374
3375 if (id & ID0_S2TS) {
3376 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003377 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003378 }
3379
3380 if (id & ID0_NTS) {
3381 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003382 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003383 }
3384
3385 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003386 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003387 dev_err(smmu->dev, "\tno translation support!\n");
3388 return -ENODEV;
3389 }
3390
Robin Murphyb7862e32016-04-13 18:13:03 +01003391 if ((id & ID0_S1TS) &&
3392 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003393 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003394 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003395 }
3396
Robin Murphybae2c2d2015-07-29 19:46:05 +01003397 /*
3398 * In order for DMA API calls to work properly, we must defer to what
3399 * the DT says about coherency, regardless of what the hardware claims.
3400 * Fortunately, this also opens up a workaround for systems where the
3401 * ID register value has ended up configured incorrectly.
3402 */
3403 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3404 cttw_reg = !!(id & ID0_CTTW);
3405 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003406 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003407 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003408 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003409 cttw_dt ? "" : "non-");
3410 if (cttw_dt != cttw_reg)
3411 dev_notice(smmu->dev,
3412 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003413
Robin Murphy53867802016-09-12 17:13:48 +01003414 /* Max. number of entries we have for stream matching/indexing */
3415 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3416 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003417 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003418 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003419
3420 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003421 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3422 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003423 dev_err(smmu->dev,
3424 "stream-matching supported, but no SMRs present!\n");
3425 return -ENODEV;
3426 }
3427
Robin Murphy53867802016-09-12 17:13:48 +01003428 /*
3429 * SMR.ID bits may not be preserved if the corresponding MASK
3430 * bits are set, so check each one separately. We can reject
3431 * masters later if they try to claim IDs outside these masks.
3432 */
3433 smr = smmu->streamid_mask << SMR_ID_SHIFT;
3434 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3435 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3436 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003437
Robin Murphy53867802016-09-12 17:13:48 +01003438 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
3439 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3440 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3441 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003442
Robin Murphy468f4942016-09-12 17:13:49 +01003443 /* Zero-initialised to mark as invalid */
3444 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3445 GFP_KERNEL);
3446 if (!smmu->smrs)
3447 return -ENOMEM;
3448
Robin Murphy53867802016-09-12 17:13:48 +01003449 dev_notice(smmu->dev,
3450 "\tstream matching with %lu register groups, mask 0x%x",
3451 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003452 }
Robin Murphya754fd12016-09-12 17:13:50 +01003453 /* s2cr->type == 0 means translation, so initialise explicitly */
3454 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3455 GFP_KERNEL);
3456 if (!smmu->s2crs)
3457 return -ENOMEM;
3458 for (i = 0; i < size; i++)
3459 smmu->s2crs[i] = s2cr_init_val;
3460
Robin Murphy53867802016-09-12 17:13:48 +01003461 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003462 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003463
Robin Murphy7602b872016-04-28 17:12:09 +01003464 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3465 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3466 if (!(id & ID0_PTFS_NO_AARCH32S))
3467 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3468 }
3469
Will Deacon45ae7cf2013-06-24 18:31:25 +01003470 /* ID1 */
3471 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003472 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003473
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003474 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003475 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003476 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003477 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003478 dev_warn(smmu->dev,
3479 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3480 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003481
Will Deacon518f7132014-11-14 17:17:54 +00003482 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003483 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3484 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3485 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3486 return -ENODEV;
3487 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003488 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003489 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003490 /*
3491 * Cavium CN88xx erratum #27704.
3492 * Ensure ASID and VMID allocation is unique across all SMMUs in
3493 * the system.
3494 */
3495 if (smmu->model == CAVIUM_SMMUV2) {
3496 smmu->cavium_id_base =
3497 atomic_add_return(smmu->num_context_banks,
3498 &cavium_smmu_context_count);
3499 smmu->cavium_id_base -= smmu->num_context_banks;
3500 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003501
3502 /* ID2 */
3503 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3504 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003505 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003506
Will Deacon518f7132014-11-14 17:17:54 +00003507 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003508 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003509 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003510
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003511 if (id & ID2_VMID16)
3512 smmu->features |= ARM_SMMU_FEAT_VMID16;
3513
Robin Murphyf1d84542015-03-04 16:41:05 +00003514 /*
3515 * What the page table walker can address actually depends on which
3516 * descriptor format is in use, but since a) we don't know that yet,
3517 * and b) it can vary per context bank, this will have to do...
3518 */
3519 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3520 dev_warn(smmu->dev,
3521 "failed to set DMA mask for table walker\n");
3522
Robin Murphyb7862e32016-04-13 18:13:03 +01003523 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003524 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003525 if (smmu->version == ARM_SMMU_V1_64K)
3526 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003527 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003528 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003529 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003530 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003531 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003532 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003533 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003534 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003535 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003536 }
3537
Robin Murphy7602b872016-04-28 17:12:09 +01003538 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003539 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003540 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003541 if (smmu->features &
3542 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003543 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003544 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003545 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003546 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003547 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003548
Robin Murphyd5466352016-05-09 17:20:09 +01003549 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3550 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3551 else
3552 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003553 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003554 smmu->pgsize_bitmap);
3555
Will Deacon518f7132014-11-14 17:17:54 +00003556
Will Deacon28d60072014-09-01 16:24:48 +01003557 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003558 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3559 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003560
3561 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003562 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3563 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003564
Will Deacon45ae7cf2013-06-24 18:31:25 +01003565 return 0;
3566}
3567
Patrick Dalyd7476202016-09-08 18:23:28 -07003568static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
3569{
3570 if (!smmu->arch_ops)
3571 return 0;
3572 if (!smmu->arch_ops->init)
3573 return 0;
3574 return smmu->arch_ops->init(smmu);
3575}
3576
3577static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
3578{
3579 if (!smmu->arch_ops)
3580 return;
3581 if (!smmu->arch_ops->device_reset)
3582 return;
3583 return smmu->arch_ops->device_reset(smmu);
3584}
3585
Robin Murphy67b65a32016-04-13 18:12:57 +01003586struct arm_smmu_match_data {
3587 enum arm_smmu_arch_version version;
3588 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003589 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01003590};
3591
Patrick Dalyd7476202016-09-08 18:23:28 -07003592#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
3593static struct arm_smmu_match_data name = { \
3594.version = ver, \
3595.model = imp, \
3596.arch_ops = ops, \
3597} \
Robin Murphy67b65a32016-04-13 18:12:57 +01003598
Patrick Daly1f8a2882016-09-12 17:32:05 -07003599struct arm_smmu_arch_ops qsmmuv500_arch_ops;
3600
Patrick Dalyd7476202016-09-08 18:23:28 -07003601ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
3602ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
3603ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
3604ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
3605ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003606ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07003607ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
3608 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01003609
Joerg Roedel09b52692014-10-02 12:24:45 +02003610static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003611 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3612 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3613 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003614 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003615 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003616 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003617 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07003618 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01003619 { },
3620};
3621MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3622
Patrick Dalyc47dcd42017-02-09 23:09:57 -08003623
3624static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
3625{
3626 if (!dev->iommu_fwspec)
3627 of_iommu_configure(dev, dev->of_node);
3628 return 0;
3629}
3630
Patrick Daly1f8a2882016-09-12 17:32:05 -07003631static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003632static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3633{
Robin Murphy67b65a32016-04-13 18:12:57 +01003634 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003635 struct resource *res;
3636 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003637 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01003638 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01003639 bool legacy_binding;
3640
3641 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
3642 if (legacy_binding && !using_generic_binding) {
3643 if (!using_legacy_binding)
3644 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
3645 using_legacy_binding = true;
3646 } else if (!legacy_binding && !using_legacy_binding) {
3647 using_generic_binding = true;
3648 } else {
3649 dev_err(dev, "not probing due to mismatched DT properties\n");
3650 return -ENODEV;
3651 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003652
3653 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3654 if (!smmu) {
3655 dev_err(dev, "failed to allocate arm_smmu_device\n");
3656 return -ENOMEM;
3657 }
3658 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003659 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003660 idr_init(&smmu->asid_idr);
3661 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003662
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003663 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01003664 smmu->version = data->version;
3665 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003666 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01003667
Will Deacon45ae7cf2013-06-24 18:31:25 +01003668 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003669 smmu->base = devm_ioremap_resource(dev, res);
3670 if (IS_ERR(smmu->base))
3671 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003672 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003673
3674 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3675 &smmu->num_global_irqs)) {
3676 dev_err(dev, "missing #global-interrupts property\n");
3677 return -ENODEV;
3678 }
3679
3680 num_irqs = 0;
3681 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3682 num_irqs++;
3683 if (num_irqs > smmu->num_global_irqs)
3684 smmu->num_context_irqs++;
3685 }
3686
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003687 if (!smmu->num_context_irqs) {
3688 dev_err(dev, "found %d interrupts but expected at least %d\n",
3689 num_irqs, smmu->num_global_irqs + 1);
3690 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003691 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003692
3693 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3694 GFP_KERNEL);
3695 if (!smmu->irqs) {
3696 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3697 return -ENOMEM;
3698 }
3699
3700 for (i = 0; i < num_irqs; ++i) {
3701 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003702
Will Deacon45ae7cf2013-06-24 18:31:25 +01003703 if (irq < 0) {
3704 dev_err(dev, "failed to get irq index %d\n", i);
3705 return -ENODEV;
3706 }
3707 smmu->irqs[i] = irq;
3708 }
3709
Dhaval Patel031d7462015-05-09 14:47:29 -07003710 parse_driver_options(smmu);
3711
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003712 smmu->pwr = arm_smmu_init_power_resources(pdev);
3713 if (IS_ERR(smmu->pwr))
3714 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003715
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003716 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003717 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003718 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003719
3720 err = arm_smmu_device_cfg_probe(smmu);
3721 if (err)
3722 goto out_power_off;
3723
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003724 err = arm_smmu_parse_impl_def_registers(smmu);
3725 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003726 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003727
Robin Murphyb7862e32016-04-13 18:13:03 +01003728 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003729 smmu->num_context_banks != smmu->num_context_irqs) {
3730 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08003731 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
3732 smmu->num_context_irqs, smmu->num_context_banks,
3733 smmu->num_context_banks);
3734 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003735 }
3736
Will Deacon45ae7cf2013-06-24 18:31:25 +01003737 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003738 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3739 NULL, arm_smmu_global_fault,
3740 IRQF_ONESHOT | IRQF_SHARED,
3741 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003742 if (err) {
3743 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3744 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01003745 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003746 }
3747 }
3748
Patrick Dalyd7476202016-09-08 18:23:28 -07003749 err = arm_smmu_arch_init(smmu);
3750 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003751 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07003752
Robin Murphy06e393e2016-09-12 17:13:55 +01003753 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003754 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01003755 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003756 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07003757
Patrick Daly8e3371a2017-02-13 22:14:53 -08003758 INIT_LIST_HEAD(&smmu->list);
3759 spin_lock(&arm_smmu_devices_lock);
3760 list_add(&smmu->list, &arm_smmu_devices);
3761 spin_unlock(&arm_smmu_devices_lock);
3762
Patrick Dalyc47dcd42017-02-09 23:09:57 -08003763 /* bus_set_iommu depends on this. */
3764 bus_for_each_dev(&platform_bus_type, NULL, NULL,
3765 arm_smmu_of_iommu_configure_fixup);
3766
Robin Murphy7e96c742016-09-14 15:26:46 +01003767 /* Oh, for a proper bus abstraction */
3768 if (!iommu_present(&platform_bus_type))
3769 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3770#ifdef CONFIG_ARM_AMBA
3771 if (!iommu_present(&amba_bustype))
3772 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
3773#endif
3774#ifdef CONFIG_PCI
3775 if (!iommu_present(&pci_bus_type)) {
3776 pci_request_acs();
3777 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
3778 }
3779#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003780 return 0;
3781
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003782out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003783 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003784
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003785out_exit_power_resources:
3786 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003787
Will Deacon45ae7cf2013-06-24 18:31:25 +01003788 return err;
3789}
3790
3791static int arm_smmu_device_remove(struct platform_device *pdev)
3792{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003793 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003794
3795 if (!smmu)
3796 return -ENODEV;
3797
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003798 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003799 return -EINVAL;
3800
Will Deaconecfadb62013-07-31 19:21:28 +01003801 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003802 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003803
Patrick Dalyc190d932016-08-30 17:23:28 -07003804 idr_destroy(&smmu->asid_idr);
3805
Will Deacon45ae7cf2013-06-24 18:31:25 +01003806 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003807 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003808 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003809
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003810 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003811
Will Deacon45ae7cf2013-06-24 18:31:25 +01003812 return 0;
3813}
3814
Will Deacon45ae7cf2013-06-24 18:31:25 +01003815static struct platform_driver arm_smmu_driver = {
3816 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003817 .name = "arm-smmu",
3818 .of_match_table = of_match_ptr(arm_smmu_of_match),
3819 },
3820 .probe = arm_smmu_device_dt_probe,
3821 .remove = arm_smmu_device_remove,
3822};
3823
3824static int __init arm_smmu_init(void)
3825{
Robin Murphy7e96c742016-09-14 15:26:46 +01003826 static bool registered;
3827 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003828
Robin Murphy7e96c742016-09-14 15:26:46 +01003829 if (!registered) {
3830 ret = platform_driver_register(&arm_smmu_driver);
3831 registered = !ret;
Wei Chen112c8982016-06-13 17:20:17 +08003832 }
Robin Murphy7e96c742016-09-14 15:26:46 +01003833 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003834}
3835
3836static void __exit arm_smmu_exit(void)
3837{
3838 return platform_driver_unregister(&arm_smmu_driver);
3839}
3840
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003841subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003842module_exit(arm_smmu_exit);
3843
Robin Murphy7e96c742016-09-14 15:26:46 +01003844static int __init arm_smmu_of_init(struct device_node *np)
3845{
3846 int ret = arm_smmu_init();
3847
3848 if (ret)
3849 return ret;
3850
3851 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
3852 return -ENODEV;
3853
3854 return 0;
3855}
3856IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
3857IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
3858IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
3859IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
3860IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
3861IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
3862IOMMU_OF_DECLARE(qcom_smmuv2, "qcom,smmu-v2", arm_smmu_of_init);
3863IOMMU_OF_DECLARE(qcom_mmu500, "qcom,qsmmu-v500", arm_smmu_of_init);
3864
Patrick Daly1f8a2882016-09-12 17:32:05 -07003865#define DEBUG_SID_HALT_REG 0x0
3866#define DEBUG_SID_HALT_VAL (0x1 << 16)
3867
3868#define DEBUG_SR_HALT_ACK_REG 0x20
3869#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
3870
3871#define TBU_DBG_TIMEOUT_US 30000
3872
3873struct qsmmuv500_tbu_device {
3874 struct list_head list;
3875 struct device *dev;
3876 struct arm_smmu_device *smmu;
3877 void __iomem *base;
3878 void __iomem *status_reg;
3879
3880 struct arm_smmu_power_resources *pwr;
3881
3882 /* Protects halt count */
3883 spinlock_t halt_lock;
3884 u32 halt_count;
3885};
3886
3887static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
3888{
3889 struct qsmmuv500_tbu_device *tbu;
3890 struct list_head *list = smmu->archdata;
3891 int ret = 0;
3892
3893 list_for_each_entry(tbu, list, list) {
3894 ret = arm_smmu_power_on(tbu->pwr);
3895 if (ret)
3896 break;
3897 }
3898 if (!ret)
3899 return 0;
3900
3901 list_for_each_entry_continue_reverse(tbu, list, list) {
3902 arm_smmu_power_off(tbu->pwr);
3903 }
3904 return ret;
3905}
3906
3907static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
3908{
3909 struct qsmmuv500_tbu_device *tbu;
3910 struct list_head *list = smmu->archdata;
3911
3912 list_for_each_entry_reverse(tbu, list, list) {
3913 arm_smmu_power_off(tbu->pwr);
3914 }
3915}
3916
3917static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
3918{
3919 unsigned long flags;
3920 u32 val;
3921 void __iomem *base;
3922
3923 spin_lock_irqsave(&tbu->halt_lock, flags);
3924 if (tbu->halt_count) {
3925 tbu->halt_count++;
3926 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3927 return 0;
3928 }
3929
3930 base = tbu->base;
3931 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
3932 val |= DEBUG_SID_HALT_VAL;
3933 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
3934
3935 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
3936 val, (val & DEBUG_SR_HALT_ACK_VAL),
3937 0, TBU_DBG_TIMEOUT_US)) {
3938 dev_err(tbu->dev, "Couldn't halt TBU!\n");
3939 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3940 return -ETIMEDOUT;
3941 }
3942
3943 tbu->halt_count = 1;
3944 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3945 return 0;
3946}
3947
3948static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
3949{
3950 unsigned long flags;
3951 u32 val;
3952 void __iomem *base;
3953
3954 spin_lock_irqsave(&tbu->halt_lock, flags);
3955 if (!tbu->halt_count) {
3956 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
3957 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3958 return;
3959
3960 } else if (tbu->halt_count > 1) {
3961 tbu->halt_count--;
3962 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3963 return;
3964 }
3965
3966 base = tbu->base;
3967 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
3968 val &= ~DEBUG_SID_HALT_VAL;
3969 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
3970
3971 tbu->halt_count = 0;
3972 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3973}
3974
3975static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
3976{
3977 struct qsmmuv500_tbu_device *tbu;
3978 struct list_head *list = smmu->archdata;
3979 int ret = 0;
3980
3981 list_for_each_entry(tbu, list, list) {
3982 ret = qsmmuv500_tbu_halt(tbu);
3983 if (ret)
3984 break;
3985 }
3986
3987 if (!ret)
3988 return 0;
3989
3990 list_for_each_entry_continue_reverse(tbu, list, list) {
3991 qsmmuv500_tbu_resume(tbu);
3992 }
3993 return ret;
3994}
3995
3996static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
3997{
3998 struct qsmmuv500_tbu_device *tbu;
3999 struct list_head *list = smmu->archdata;
4000
4001 list_for_each_entry(tbu, list, list) {
4002 qsmmuv500_tbu_resume(tbu);
4003 }
4004}
4005
4006static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
4007{
4008 int i, ret;
4009 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
4010
4011 ret = qsmmuv500_tbu_power_on_all(smmu);
4012 if (ret)
4013 return;
4014
4015 /* Program implementation defined registers */
4016 qsmmuv500_halt_all(smmu);
4017 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
4018 writel_relaxed(regs[i].value,
4019 ARM_SMMU_GR0(smmu) + regs[i].offset);
4020 qsmmuv500_resume_all(smmu);
4021 qsmmuv500_tbu_power_off_all(smmu);
4022}
4023
4024static int qsmmuv500_tbu_register(struct device *dev, void *data)
4025{
4026 struct arm_smmu_device *smmu = data;
4027 struct qsmmuv500_tbu_device *tbu;
4028 struct list_head *list = smmu->archdata;
4029
4030 if (!dev->driver) {
4031 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4032 return -EINVAL;
4033 }
4034
4035 tbu = dev_get_drvdata(dev);
4036
4037 INIT_LIST_HEAD(&tbu->list);
4038 tbu->smmu = smmu;
4039 list_add(&tbu->list, list);
4040 return 0;
4041}
4042
4043static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4044{
4045 struct device *dev = smmu->dev;
4046 struct list_head *list;
4047 int ret;
4048
4049 list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
4050 if (!list)
4051 return -ENOMEM;
4052
4053 INIT_LIST_HEAD(list);
4054 smmu->archdata = list;
4055
4056 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4057 if (ret)
4058 return ret;
4059
4060 /* Attempt to register child devices */
4061 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
4062 if (ret)
4063 return -EINVAL;
4064
4065 return 0;
4066}
4067
4068struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
4069 .init = qsmmuv500_arch_init,
4070 .device_reset = qsmmuv500_device_reset,
4071};
4072
4073static const struct of_device_id qsmmuv500_tbu_of_match[] = {
4074 {.compatible = "qcom,qsmmuv500-tbu"},
4075 {}
4076};
4077
4078static int qsmmuv500_tbu_probe(struct platform_device *pdev)
4079{
4080 struct resource *res;
4081 struct device *dev = &pdev->dev;
4082 struct qsmmuv500_tbu_device *tbu;
4083
4084 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
4085 if (!tbu)
4086 return -ENOMEM;
4087
4088 INIT_LIST_HEAD(&tbu->list);
4089 tbu->dev = dev;
4090 spin_lock_init(&tbu->halt_lock);
4091
4092 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
4093 tbu->base = devm_ioremap_resource(dev, res);
4094 if (IS_ERR(tbu->base))
4095 return PTR_ERR(tbu->base);
4096
4097 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
4098 tbu->status_reg = devm_ioremap_resource(dev, res);
4099 if (IS_ERR(tbu->status_reg))
4100 return PTR_ERR(tbu->status_reg);
4101
4102 tbu->pwr = arm_smmu_init_power_resources(pdev);
4103 if (IS_ERR(tbu->pwr))
4104 return PTR_ERR(tbu->pwr);
4105
4106 dev_set_drvdata(dev, tbu);
4107 return 0;
4108}
4109
4110static struct platform_driver qsmmuv500_tbu_driver = {
4111 .driver = {
4112 .name = "qsmmuv500-tbu",
4113 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
4114 },
4115 .probe = qsmmuv500_tbu_probe,
4116};
4117
4118static int __init qsmmuv500_tbu_init(void)
4119{
4120 return platform_driver_register(&qsmmuv500_tbu_driver);
4121}
4122subsys_initcall(qsmmuv500_tbu_init);
4123
Will Deacon45ae7cf2013-06-24 18:31:25 +01004124MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
4125MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
4126MODULE_LICENSE("GPL v2");