blob: a680032a48060b79f0b5efb7b9e893ab6cd86c12 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070050#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070051#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070052#include <linux/msm-bus.h>
53#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55#include <linux/amba/bus.h>
56
Will Deacon518f7132014-11-14 17:17:54 +000057#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* Maximum number of context banks per SMMU */
60#define ARM_SMMU_MAX_CBS 128
61
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* SMMU global address space */
63#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010064#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000066/*
67 * SMMU global address space with conditional offset to access secure
68 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 * nsGFSYNR0: 0x450)
70 */
71#define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu)->base + \
73 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
74 ? 0x400 : 0))
75
Robin Murphyf9a05f02016-04-13 18:13:01 +010076/*
77 * Some 64-bit registers only make sense to write atomically, but in such
78 * cases all the data relevant to AArch32 formats lies within the lower word,
79 * therefore this actually makes more sense than it might first appear.
80 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#endif
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087/* Configuration registers */
88#define ARM_SMMU_GR0_sCR0 0x0
89#define sCR0_CLIENTPD (1 << 0)
90#define sCR0_GFRE (1 << 1)
91#define sCR0_GFIE (1 << 2)
92#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100131#define ID0_NUMSMRG_SHIFT 0
132#define ID0_NUMSMRG_MASK 0xff
133
134#define ID1_PAGESIZE (1 << 31)
135#define ID1_NUMPAGENDXB_SHIFT 28
136#define ID1_NUMPAGENDXB_MASK 7
137#define ID1_NUMS2CB_SHIFT 16
138#define ID1_NUMS2CB_MASK 0xff
139#define ID1_NUMCB_SHIFT 0
140#define ID1_NUMCB_MASK 0xff
141
142#define ID2_OAS_SHIFT 4
143#define ID2_OAS_MASK 0xf
144#define ID2_IAS_SHIFT 0
145#define ID2_IAS_MASK 0xf
146#define ID2_UBS_SHIFT 8
147#define ID2_UBS_MASK 0xf
148#define ID2_PTFS_4K (1 << 12)
149#define ID2_PTFS_16K (1 << 13)
150#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800151#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152
Peng Fan3ca37122016-05-03 21:50:30 +0800153#define ID7_MAJOR_SHIFT 4
154#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157#define ARM_SMMU_GR0_TLBIVMID 0x64
158#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
159#define ARM_SMMU_GR0_TLBIALLH 0x6c
160#define ARM_SMMU_GR0_sTLBGSYNC 0x70
161#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
162#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800163#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
165/* Stream mapping registers */
166#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
167#define SMR_VALID (1 << 31)
168#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700169#define SMR_MASK_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100177enum arm_smmu_s2cr_type {
178 S2CR_TYPE_TRANS,
179 S2CR_TYPE_BYPASS,
180 S2CR_TYPE_FAULT,
181};
182
183#define S2CR_PRIVCFG_SHIFT 24
184#define S2CR_PRIVCFG_MASK 0x3
185enum arm_smmu_s2cr_privcfg {
186 S2CR_PRIVCFG_DEFAULT,
187 S2CR_PRIVCFG_DIPAN,
188 S2CR_PRIVCFG_UNPRIV,
189 S2CR_PRIVCFG_PRIV,
190};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191
192/* Context bank attribute registers */
193#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
194#define CBAR_VMID_SHIFT 0
195#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000196#define CBAR_S1_BPSHCFG_SHIFT 8
197#define CBAR_S1_BPSHCFG_MASK 3
198#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100199#define CBAR_S1_MEMATTR_SHIFT 12
200#define CBAR_S1_MEMATTR_MASK 0xf
201#define CBAR_S1_MEMATTR_WB 0xf
202#define CBAR_TYPE_SHIFT 16
203#define CBAR_TYPE_MASK 0x3
204#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
205#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
206#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
207#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
208#define CBAR_IRPTNDX_SHIFT 24
209#define CBAR_IRPTNDX_MASK 0xff
210
Shalaj Jain04059c52015-03-03 13:34:59 -0800211#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
212#define CBFRSYNRA_SID_MASK (0xffff)
213
Will Deacon45ae7cf2013-06-24 18:31:25 +0100214#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
215#define CBA2R_RW64_32BIT (0 << 0)
216#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800217#define CBA2R_VMID_SHIFT 16
218#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219
220/* Translation context bank */
221#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100222#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223
224#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100225#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_RESUME 0x8
227#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100228#define ARM_SMMU_CB_TTBR0 0x20
229#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600231#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100232#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000233#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100234#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700236#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100237#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100238#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000239#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100240#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700241#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000242#define ARM_SMMU_CB_S1_TLBIVAL 0x620
243#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
244#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700245#define ARM_SMMU_CB_TLBSYNC 0x7f0
246#define ARM_SMMU_CB_TLBSTATUS 0x7f4
247#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100248#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000249#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100250
251#define SCTLR_S1_ASIDPNE (1 << 12)
252#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530253#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100254#define SCTLR_CFIE (1 << 6)
255#define SCTLR_CFRE (1 << 5)
256#define SCTLR_E (1 << 4)
257#define SCTLR_AFE (1 << 2)
258#define SCTLR_TRE (1 << 1)
259#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100260
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100261#define ARM_MMU500_ACTLR_CPRE (1 << 1)
262
Peng Fan3ca37122016-05-03 21:50:30 +0800263#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
264
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700265#define ARM_SMMU_IMPL_DEF0(smmu) \
266 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
267#define ARM_SMMU_IMPL_DEF1(smmu) \
268 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000269#define CB_PAR_F (1 << 0)
270
271#define ATSR_ACTIVE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define RESUME_RETRY (0 << 0)
274#define RESUME_TERMINATE (1 << 0)
275
Will Deacon45ae7cf2013-06-24 18:31:25 +0100276#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100277#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100279#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280
281#define FSR_MULTI (1 << 31)
282#define FSR_SS (1 << 30)
283#define FSR_UUT (1 << 8)
284#define FSR_ASF (1 << 7)
285#define FSR_TLBLKF (1 << 6)
286#define FSR_TLBMCF (1 << 5)
287#define FSR_EF (1 << 4)
288#define FSR_PF (1 << 3)
289#define FSR_AFF (1 << 2)
290#define FSR_TF (1 << 1)
291
Mitchel Humpherys29073202014-07-08 09:52:18 -0700292#define FSR_IGN (FSR_AFF | FSR_ASF | \
293 FSR_TLBMCF | FSR_TLBLKF)
294#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100295 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
297#define FSYNR0_WNR (1 << 4)
298
Will Deacon4cf740b2014-07-14 19:47:39 +0100299static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000300module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100301MODULE_PARM_DESC(force_stage,
302 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800303static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000304module_param(disable_bypass, bool, S_IRUGO);
305MODULE_PARM_DESC(disable_bypass,
306 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100307
Robin Murphy09360402014-08-28 17:51:59 +0100308enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100309 ARM_SMMU_V1,
310 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100311 ARM_SMMU_V2,
312};
313
Robin Murphy67b65a32016-04-13 18:12:57 +0100314enum arm_smmu_implementation {
315 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100316 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100317 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700318 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700319 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100320};
321
Patrick Dalyd7476202016-09-08 18:23:28 -0700322struct arm_smmu_device;
323struct arm_smmu_arch_ops {
324 int (*init)(struct arm_smmu_device *smmu);
325 void (*device_reset)(struct arm_smmu_device *smmu);
326 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
327 dma_addr_t iova);
Patrick Dalyd7476202016-09-08 18:23:28 -0700328};
329
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700330struct arm_smmu_impl_def_reg {
331 u32 offset;
332 u32 value;
333};
334
Robin Murphya754fd12016-09-12 17:13:50 +0100335struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100336 struct iommu_group *group;
337 int count;
Robin Murphya754fd12016-09-12 17:13:50 +0100338 enum arm_smmu_s2cr_type type;
339 enum arm_smmu_s2cr_privcfg privcfg;
340 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700341 bool cb_handoff;
Robin Murphya754fd12016-09-12 17:13:50 +0100342};
343
344#define s2cr_init_val (struct arm_smmu_s2cr){ \
345 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700346 .cb_handoff = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100347}
348
Will Deacon45ae7cf2013-06-24 18:31:25 +0100349struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100350 u16 mask;
351 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100352 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100353};
354
Will Deacona9a1b0b2014-05-01 18:05:08 +0100355struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100356 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100357 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100358};
Robin Murphy468f4942016-09-12 17:13:49 +0100359#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100360#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
361#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000362#define fwspec_smendx(fw, i) \
363 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100364#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000365 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100366
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700367/*
368 * Describes resources required for on/off power operation.
369 * Separate reference count is provided for atomic/nonatomic
370 * operations.
371 */
372struct arm_smmu_power_resources {
373 struct platform_device *pdev;
374 struct device *dev;
375
376 struct clk **clocks;
377 int num_clocks;
378
379 struct regulator_bulk_data *gdscs;
380 int num_gdscs;
381
382 uint32_t bus_client;
383 struct msm_bus_scale_pdata *bus_dt_data;
384
385 /* Protects power_count */
386 struct mutex power_lock;
387 int power_count;
388
389 /* Protects clock_refs_count */
390 spinlock_t clock_refs_lock;
391 int clock_refs_count;
392};
393
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394struct arm_smmu_device {
395 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100396
397 void __iomem *base;
398 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100399 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100400
401#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
402#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
403#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
404#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
405#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000406#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800407#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100408#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
409#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
410#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
411#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
412#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100413 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000414
415#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800416#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Patrick Daly59b6d202017-06-12 13:12:15 -0700417#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700418#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Patrick Daly4423d3e2017-05-04 18:17:51 -0700419#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
Patrick Dalye7069342017-07-11 12:35:55 -0700420#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000421 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100422 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100423 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100424
425 u32 num_context_banks;
426 u32 num_s2_context_banks;
427 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
428 atomic_t irptndx;
429
430 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100431 u16 streamid_mask;
432 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100433 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100434 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100435 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100436
Will Deacon518f7132014-11-14 17:17:54 +0000437 unsigned long va_size;
438 unsigned long ipa_size;
439 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100440 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100441
442 u32 num_global_irqs;
443 u32 num_context_irqs;
444 unsigned int *irqs;
445
Patrick Daly8e3371a2017-02-13 22:14:53 -0800446 struct list_head list;
447
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800448 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700449 /* Specific to QCOM */
450 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
451 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800452
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700453 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700454
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800455 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700456
457 /* protects idr */
458 struct mutex idr_mutex;
459 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700460
461 struct arm_smmu_arch_ops *arch_ops;
462 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100463};
464
Robin Murphy7602b872016-04-28 17:12:09 +0100465enum arm_smmu_context_fmt {
466 ARM_SMMU_CTX_FMT_NONE,
467 ARM_SMMU_CTX_FMT_AARCH64,
468 ARM_SMMU_CTX_FMT_AARCH32_L,
469 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100470};
471
472struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100473 u8 cbndx;
474 u8 irptndx;
475 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600476 u32 procid;
477 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100478 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100479};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100480#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600481#define INVALID_CBNDX 0xff
482#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700483/*
484 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
485 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
486 */
487#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100488
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600489#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800490#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100491
Will Deaconc752ce42014-06-25 22:46:31 +0100492enum arm_smmu_domain_stage {
493 ARM_SMMU_DOMAIN_S1 = 0,
494 ARM_SMMU_DOMAIN_S2,
495 ARM_SMMU_DOMAIN_NESTED,
496};
497
Patrick Dalyc11d1082016-09-01 15:52:44 -0700498struct arm_smmu_pte_info {
499 void *virt_addr;
500 size_t size;
501 struct list_head entry;
502};
503
Will Deacon45ae7cf2013-06-24 18:31:25 +0100504struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100505 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800506 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000507 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700508 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000509 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100510 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100511 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000512 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700513 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700514 u32 secure_vmid;
515 struct list_head pte_info_list;
516 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700517 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700518 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100519 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100520};
521
Patrick Daly8e3371a2017-02-13 22:14:53 -0800522static DEFINE_SPINLOCK(arm_smmu_devices_lock);
523static LIST_HEAD(arm_smmu_devices);
524
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000525struct arm_smmu_option_prop {
526 u32 opt;
527 const char *prop;
528};
529
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800530static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
531
Robin Murphy7e96c742016-09-14 15:26:46 +0100532static bool using_legacy_binding, using_generic_binding;
533
Mitchel Humpherys29073202014-07-08 09:52:18 -0700534static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000535 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800536 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700537 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700538 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700539 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700540 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000541 { 0, NULL},
542};
543
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800544static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
545 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700546static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
547 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600548static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800549
Patrick Dalyc11d1082016-09-01 15:52:44 -0700550static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
551static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700552static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700553static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
554
Patrick Dalyd7476202016-09-08 18:23:28 -0700555static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
556static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
557
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700558static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
559 dma_addr_t iova);
560
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800561static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
562
Patrick Dalyda688822017-05-17 20:12:48 -0700563static int arm_smmu_alloc_cb(struct iommu_domain *domain,
564 struct arm_smmu_device *smmu,
565 struct device *dev);
566
Joerg Roedel1d672632015-03-26 13:43:10 +0100567static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
568{
569 return container_of(dom, struct arm_smmu_domain, domain);
570}
571
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000572static void parse_driver_options(struct arm_smmu_device *smmu)
573{
574 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700575
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000576 do {
577 if (of_property_read_bool(smmu->dev->of_node,
578 arm_smmu_options[i].prop)) {
579 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700580 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000581 arm_smmu_options[i].prop);
582 }
583 } while (arm_smmu_options[++i].opt);
584}
585
Patrick Dalyc190d932016-08-30 17:23:28 -0700586static bool is_dynamic_domain(struct iommu_domain *domain)
587{
588 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
589
590 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
591}
592
Liam Mark53cf2342016-12-20 11:36:07 -0800593static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
594{
595 if (smmu_domain->attributes &
596 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
597 return true;
598 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
599 return smmu_domain->smmu->dev->archdata.dma_coherent;
600 else
601 return false;
602}
603
Patrick Dalye271f212016-10-04 13:24:49 -0700604static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
605{
606 return (smmu_domain->secure_vmid != VMID_INVAL);
607}
608
609static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
610{
611 if (arm_smmu_is_domain_secure(smmu_domain))
612 mutex_lock(&smmu_domain->assign_lock);
613}
614
615static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
616{
617 if (arm_smmu_is_domain_secure(smmu_domain))
618 mutex_unlock(&smmu_domain->assign_lock);
619}
620
Will Deacon8f68f8e2014-07-15 11:27:08 +0100621static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100622{
623 if (dev_is_pci(dev)) {
624 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700625
Will Deacona9a1b0b2014-05-01 18:05:08 +0100626 while (!pci_is_root_bus(bus))
627 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100628 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100629 }
630
Robin Murphyd5b41782016-09-14 15:21:39 +0100631 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100632}
633
Robin Murphyd5b41782016-09-14 15:21:39 +0100634static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100635{
Robin Murphyd5b41782016-09-14 15:21:39 +0100636 *((__be32 *)data) = cpu_to_be32(alias);
637 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100638}
639
Robin Murphyd5b41782016-09-14 15:21:39 +0100640static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100641{
Robin Murphyd5b41782016-09-14 15:21:39 +0100642 struct of_phandle_iterator *it = *(void **)data;
643 struct device_node *np = it->node;
644 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100645
Robin Murphyd5b41782016-09-14 15:21:39 +0100646 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
647 "#stream-id-cells", 0)
648 if (it->node == np) {
649 *(void **)data = dev;
650 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700651 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100652 it->node = np;
653 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100654}
655
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100656static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100657static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100658
Robin Murphy06e393e2016-09-12 17:13:55 +0100659static int arm_smmu_register_legacy_master(struct device *dev,
660 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100661{
Robin Murphy06e393e2016-09-12 17:13:55 +0100662 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100663 struct device_node *np;
664 struct of_phandle_iterator it;
665 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100666 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100667 __be32 pci_sid;
668 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100669
Stephen Boydfecdeef2017-03-01 16:53:19 -0800670 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100671 np = dev_get_dev_node(dev);
672 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
673 of_node_put(np);
674 return -ENODEV;
675 }
676
677 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100678 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
679 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100680 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100681 of_node_put(np);
682 if (err == 0)
683 return -ENODEV;
684 if (err < 0)
685 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100686
Robin Murphyd5b41782016-09-14 15:21:39 +0100687 if (dev_is_pci(dev)) {
688 /* "mmu-masters" assumes Stream ID == Requester ID */
689 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
690 &pci_sid);
691 it.cur = &pci_sid;
692 it.cur_count = 1;
693 }
694
Robin Murphy06e393e2016-09-12 17:13:55 +0100695 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
696 &arm_smmu_ops);
697 if (err)
698 return err;
699
700 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
701 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100702 return -ENOMEM;
703
Robin Murphy06e393e2016-09-12 17:13:55 +0100704 *smmu = dev_get_drvdata(smmu_dev);
705 of_phandle_iterator_args(&it, sids, it.cur_count);
706 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
707 kfree(sids);
708 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100709}
710
711static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
712{
713 int idx;
714
715 do {
716 idx = find_next_zero_bit(map, end, start);
717 if (idx == end)
718 return -ENOSPC;
719 } while (test_and_set_bit(idx, map));
720
721 return idx;
722}
723
724static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
725{
726 clear_bit(idx, map);
727}
728
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700729static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700730{
731 int i, ret = 0;
732
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700733 for (i = 0; i < pwr->num_clocks; ++i) {
734 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700735 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700736 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700737 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700738 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700739 break;
740 }
741 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700742 return ret;
743}
744
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700745static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700746{
747 int i;
748
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700749 for (i = pwr->num_clocks; i; --i)
750 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700751}
752
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700753static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700754{
755 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700756
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700757 for (i = 0; i < pwr->num_clocks; ++i) {
758 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700759 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700760 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700761 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700762 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700763 break;
764 }
765 }
766
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700767 return ret;
768}
Patrick Daly8befb662016-08-17 20:03:28 -0700769
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700770static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
771{
772 int i;
773
774 for (i = pwr->num_clocks; i; --i)
775 clk_disable(pwr->clocks[i - 1]);
776}
777
778static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
779{
780 if (!pwr->bus_client)
781 return 0;
782 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
783}
784
785static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
786{
787 if (!pwr->bus_client)
788 return;
789 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
790}
791
792/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
793static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
794{
795 int ret = 0;
796 unsigned long flags;
797
798 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
799 if (pwr->clock_refs_count > 0) {
800 pwr->clock_refs_count++;
801 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
802 return 0;
803 }
804
805 ret = arm_smmu_enable_clocks(pwr);
806 if (!ret)
807 pwr->clock_refs_count = 1;
808
809 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700810 return ret;
811}
812
813/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700814static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700815{
Patrick Daly8befb662016-08-17 20:03:28 -0700816 unsigned long flags;
817
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700818 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
819 if (pwr->clock_refs_count == 0) {
820 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
821 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
822 return;
823
824 } else if (pwr->clock_refs_count > 1) {
825 pwr->clock_refs_count--;
826 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700827 return;
828 }
829
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700830 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700831
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700832 pwr->clock_refs_count = 0;
833 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700834}
835
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700836static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700837{
838 int ret;
839
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700840 mutex_lock(&pwr->power_lock);
841 if (pwr->power_count > 0) {
842 pwr->power_count += 1;
843 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700844 return 0;
845 }
846
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700847 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700848 if (ret)
849 goto out_unlock;
850
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700851 ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700852 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700853 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700854
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700855 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700856 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700857 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -0700858
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700859 pwr->power_count = 1;
860 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700861 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700862
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700863out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700864 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700865out_disable_bus:
866 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700867out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700868 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700869 return ret;
870}
871
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700872static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700873{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700874 mutex_lock(&pwr->power_lock);
875 if (pwr->power_count == 0) {
876 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
877 mutex_unlock(&pwr->power_lock);
878 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700879
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700880 } else if (pwr->power_count > 1) {
881 pwr->power_count--;
882 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700883 return;
884 }
885
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700886 arm_smmu_unprepare_clocks(pwr);
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700887 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700888 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -0700889 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700890 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700891}
892
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700893static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700894{
895 int ret;
896
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700897 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700898 if (ret)
899 return ret;
900
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700901 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700902 if (ret)
903 goto out_disable;
904
905 return 0;
906
907out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700908 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700909 return ret;
910}
911
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700912static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700913{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700914 arm_smmu_power_off_atomic(pwr);
915 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700916}
917
918/*
919 * Must be used instead of arm_smmu_power_on if it may be called from
920 * atomic context
921 */
922static int arm_smmu_domain_power_on(struct iommu_domain *domain,
923 struct arm_smmu_device *smmu)
924{
925 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
926 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
927
928 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700929 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700930
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700931 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700932}
933
934/*
935 * Must be used instead of arm_smmu_power_on if it may be called from
936 * atomic context
937 */
938static void arm_smmu_domain_power_off(struct iommu_domain *domain,
939 struct arm_smmu_device *smmu)
940{
941 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
942 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
943
944 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700945 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700946 return;
947 }
948
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700949 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700950}
951
Will Deacon45ae7cf2013-06-24 18:31:25 +0100952/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700953static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
954 int cbndx)
955{
956 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
957 u32 val;
958
959 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
960 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
961 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700962 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700963 dev_err(smmu->dev, "TLBSYNC timeout!\n");
964}
965
Will Deacon518f7132014-11-14 17:17:54 +0000966static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100967{
968 int count = 0;
969 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
970
971 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
972 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
973 & sTLBGSTATUS_GSACTIVE) {
974 cpu_relax();
975 if (++count == TLB_LOOP_TIMEOUT) {
976 dev_err_ratelimited(smmu->dev,
977 "TLB sync timed out -- SMMU may be deadlocked\n");
978 return;
979 }
980 udelay(1);
981 }
982}
983
Will Deacon518f7132014-11-14 17:17:54 +0000984static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100985{
Will Deacon518f7132014-11-14 17:17:54 +0000986 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700987 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000988}
989
Patrick Daly8befb662016-08-17 20:03:28 -0700990/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000991static void arm_smmu_tlb_inv_context(void *cookie)
992{
993 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100994 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
995 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100996 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000997 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -0700998 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon1463fe42013-07-31 19:21:27 +0100999
Patrick Dalye7069342017-07-11 12:35:55 -07001000 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001001 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001002 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001003 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001004 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001005 } else if (stage1 && use_tlbiall) {
1006 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1007 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1008 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001009 } else {
1010 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001011 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001012 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001013 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001014 }
Will Deacon1463fe42013-07-31 19:21:27 +01001015}
1016
Will Deacon518f7132014-11-14 17:17:54 +00001017static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001018 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001019{
1020 struct arm_smmu_domain *smmu_domain = cookie;
1021 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1022 struct arm_smmu_device *smmu = smmu_domain->smmu;
1023 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1024 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001025 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001026
Patrick Dalye7069342017-07-11 12:35:55 -07001027 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001028 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1029 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1030
Robin Murphy7602b872016-04-28 17:12:09 +01001031 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001032 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001033 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001034 do {
1035 writel_relaxed(iova, reg);
1036 iova += granule;
1037 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001038 } else {
1039 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001040 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001041 do {
1042 writeq_relaxed(iova, reg);
1043 iova += granule >> 12;
1044 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001045 }
Patrick Dalye7069342017-07-11 12:35:55 -07001046 } else if (stage1 && use_tlbiall) {
1047 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1048 reg += ARM_SMMU_CB_S1_TLBIALL;
1049 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001050 } else if (smmu->version == ARM_SMMU_V2) {
1051 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1052 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1053 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001054 iova >>= 12;
1055 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001056 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001057 iova += granule >> 12;
1058 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001059 } else {
1060 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001061 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001062 }
1063}
1064
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001065struct arm_smmu_secure_pool_chunk {
1066 void *addr;
1067 size_t size;
1068 struct list_head list;
1069};
1070
1071static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1072 size_t size)
1073{
1074 struct arm_smmu_secure_pool_chunk *it;
1075
1076 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1077 if (it->size == size) {
1078 void *addr = it->addr;
1079
1080 list_del(&it->list);
1081 kfree(it);
1082 return addr;
1083 }
1084 }
1085
1086 return NULL;
1087}
1088
1089static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1090 void *addr, size_t size)
1091{
1092 struct arm_smmu_secure_pool_chunk *chunk;
1093
1094 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1095 if (!chunk)
1096 return -ENOMEM;
1097
1098 chunk->addr = addr;
1099 chunk->size = size;
1100 memset(addr, 0, size);
1101 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1102
1103 return 0;
1104}
1105
1106static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1107{
1108 struct arm_smmu_secure_pool_chunk *it, *i;
1109
1110 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1111 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1112 /* pages will be freed later (after being unassigned) */
1113 kfree(it);
1114 }
1115}
1116
Patrick Dalyc11d1082016-09-01 15:52:44 -07001117static void *arm_smmu_alloc_pages_exact(void *cookie,
1118 size_t size, gfp_t gfp_mask)
1119{
1120 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001121 void *page;
1122 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001123
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001124 if (!arm_smmu_is_domain_secure(smmu_domain))
1125 return alloc_pages_exact(size, gfp_mask);
1126
1127 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1128 if (page)
1129 return page;
1130
1131 page = alloc_pages_exact(size, gfp_mask);
1132 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001133 ret = arm_smmu_prepare_pgtable(page, cookie);
1134 if (ret) {
1135 free_pages_exact(page, size);
1136 return NULL;
1137 }
1138 }
1139
1140 return page;
1141}
1142
1143static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1144{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001145 struct arm_smmu_domain *smmu_domain = cookie;
1146
1147 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1148 free_pages_exact(virt, size);
1149 return;
1150 }
1151
1152 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1153 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001154}
1155
Will Deacon518f7132014-11-14 17:17:54 +00001156static struct iommu_gather_ops arm_smmu_gather_ops = {
1157 .tlb_flush_all = arm_smmu_tlb_inv_context,
1158 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1159 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001160 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1161 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001162};
1163
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001164static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1165 dma_addr_t iova, u32 fsr)
1166{
1167 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001168 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001169 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001170 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001171
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001172 phys = arm_smmu_iova_to_phys_hard(domain, iova);
1173 arm_smmu_tlb_inv_context(smmu_domain);
1174 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001175
Patrick Dalyad441dd2016-09-15 15:50:46 -07001176 if (phys != phys_post_tlbiall) {
1177 dev_err(smmu->dev,
1178 "ATOS results differed across TLBIALL...\n"
1179 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1180 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001181
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001182 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001183}
1184
Will Deacon45ae7cf2013-06-24 18:31:25 +01001185static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1186{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001187 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001188 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001189 unsigned long iova;
1190 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001191 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001192 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1193 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001194 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001195 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001196 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001197 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001198 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001199 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001200 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001201
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001202 static DEFINE_RATELIMIT_STATE(_rs,
1203 DEFAULT_RATELIMIT_INTERVAL,
1204 DEFAULT_RATELIMIT_BURST);
1205
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001206 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001207 if (ret)
1208 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001209
Shalaj Jain04059c52015-03-03 13:34:59 -08001210 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001211 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001212 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1213
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001214 if (!(fsr & FSR_FAULT)) {
1215 ret = IRQ_NONE;
1216 goto out_power_off;
1217 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001218
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001219 if (fatal_asf && (fsr & FSR_ASF)) {
1220 dev_err(smmu->dev,
1221 "Took an address size fault. Refusing to recover.\n");
1222 BUG();
1223 }
1224
Will Deacon45ae7cf2013-06-24 18:31:25 +01001225 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001226 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001227 if (fsr & FSR_TF)
1228 flags |= IOMMU_FAULT_TRANSLATION;
1229 if (fsr & FSR_PF)
1230 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001231 if (fsr & FSR_EF)
1232 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001233 if (fsr & FSR_SS)
1234 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001235
Robin Murphyf9a05f02016-04-13 18:13:01 +01001236 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001237 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001238 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1239 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001240 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1241 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001242 dev_dbg(smmu->dev,
1243 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1244 iova, fsr, fsynr, cfg->cbndx);
1245 dev_dbg(smmu->dev,
1246 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001247 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001248 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001249 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001250 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1251 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001252 if (__ratelimit(&_rs)) {
1253 dev_err(smmu->dev,
1254 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1255 iova, fsr, fsynr, cfg->cbndx);
1256 dev_err(smmu->dev, "FAR = %016lx\n",
1257 (unsigned long)iova);
1258 dev_err(smmu->dev,
1259 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1260 fsr,
1261 (fsr & 0x02) ? "TF " : "",
1262 (fsr & 0x04) ? "AFF " : "",
1263 (fsr & 0x08) ? "PF " : "",
1264 (fsr & 0x10) ? "EF " : "",
1265 (fsr & 0x20) ? "TLBMCF " : "",
1266 (fsr & 0x40) ? "TLBLKF " : "",
1267 (fsr & 0x80) ? "MHF " : "",
1268 (fsr & 0x40000000) ? "SS " : "",
1269 (fsr & 0x80000000) ? "MULTI " : "");
1270 dev_err(smmu->dev,
1271 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001272 if (!phys_soft)
1273 dev_err(smmu->dev,
1274 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1275 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001276 if (phys_atos)
1277 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1278 &phys_atos);
1279 else
1280 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001281 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1282 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001283 ret = IRQ_NONE;
1284 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001285 if (!non_fatal_fault) {
1286 dev_err(smmu->dev,
1287 "Unhandled arm-smmu context fault!\n");
1288 BUG();
1289 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001290 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001291
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001292 /*
1293 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1294 * if stalled. This is required to keep the IOMMU client stalled on
1295 * the outstanding fault. This gives the client a chance to take any
1296 * debug action and then terminate the stalled transaction.
1297 * So, the sequence in case of stall on fault should be:
1298 * 1) Do not clear FSR or write to RESUME here
1299 * 2) Client takes any debug action
1300 * 3) Client terminates the stalled transaction and resumes the IOMMU
1301 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1302 * not before so that the fault remains outstanding. This ensures
1303 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1304 * need to be terminated.
1305 */
1306 if (tmp != -EBUSY) {
1307 /* Clear the faulting FSR */
1308 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001309
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001310 /*
1311 * Barrier required to ensure that the FSR is cleared
1312 * before resuming SMMU operation
1313 */
1314 wmb();
1315
1316 /* Retry or terminate any stalled transactions */
1317 if (fsr & FSR_SS)
1318 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1319 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001320
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001321out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001322 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001323
Patrick Daly5ba28112016-08-30 19:18:52 -07001324 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001325}
1326
1327static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1328{
1329 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1330 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001331 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001332
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001333 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001334 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001335
Will Deacon45ae7cf2013-06-24 18:31:25 +01001336 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1337 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1338 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1339 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1340
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001341 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001342 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001343 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001344 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001345
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346 dev_err_ratelimited(smmu->dev,
1347 "Unexpected global fault, this could be serious\n");
1348 dev_err_ratelimited(smmu->dev,
1349 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1350 gfsr, gfsynr0, gfsynr1, gfsynr2);
1351
1352 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001353 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001354 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001355}
1356
Will Deacon518f7132014-11-14 17:17:54 +00001357static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1358 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001359{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001360 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001361 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001362 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001363 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1364 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001365 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001366
Will Deacon45ae7cf2013-06-24 18:31:25 +01001367 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001368 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1369 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001370
Will Deacon4a1c93c2015-03-04 12:21:03 +00001371 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001372 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1373 reg = CBA2R_RW64_64BIT;
1374 else
1375 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001376 /* 16-bit VMIDs live in CBA2R */
1377 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001378 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001379
Will Deacon4a1c93c2015-03-04 12:21:03 +00001380 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1381 }
1382
Will Deacon45ae7cf2013-06-24 18:31:25 +01001383 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001384 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001385 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001386 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001387
Will Deacon57ca90f2014-02-06 14:59:05 +00001388 /*
1389 * Use the weakest shareability/memory types, so they are
1390 * overridden by the ttbcr/pte.
1391 */
1392 if (stage1) {
1393 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1394 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001395 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1396 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001397 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001398 }
Will Deacon44680ee2014-06-25 11:29:12 +01001399 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001400
Will Deacon518f7132014-11-14 17:17:54 +00001401 /* TTBRs */
1402 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001403 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404
Robin Murphyb94df6f2016-08-11 17:44:06 +01001405 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1406 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1407 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1408 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1409 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1410 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1411 } else {
1412 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1413 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1414 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1415 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1416 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1417 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1418 }
Will Deacon518f7132014-11-14 17:17:54 +00001419 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001420 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001421 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001422 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001423
Will Deacon518f7132014-11-14 17:17:54 +00001424 /* TTBCR */
1425 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001426 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1427 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1428 reg2 = 0;
1429 } else {
1430 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1431 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1432 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001433 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001434 if (smmu->version > ARM_SMMU_V1)
1435 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001436 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001437 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001438 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001439 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001440
Will Deacon518f7132014-11-14 17:17:54 +00001441 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001442 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001443 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1444 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1445 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1446 } else {
1447 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1448 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1449 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001450 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001451 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001452 }
1453
Will Deacon45ae7cf2013-06-24 18:31:25 +01001454 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001455 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001456
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301457 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
1458 reg &= ~SCTLR_CFCFG;
1459 reg |= SCTLR_HUPCF;
1460 }
1461
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001462 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1463 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1464 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001465 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001466 if (stage1)
1467 reg |= SCTLR_S1_ASIDPNE;
1468#ifdef __BIG_ENDIAN
1469 reg |= SCTLR_E;
1470#endif
Will Deacon25724842013-08-21 13:49:53 +01001471 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001472}
1473
Patrick Dalyc190d932016-08-30 17:23:28 -07001474static int arm_smmu_init_asid(struct iommu_domain *domain,
1475 struct arm_smmu_device *smmu)
1476{
1477 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1478 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1479 bool dynamic = is_dynamic_domain(domain);
1480 int ret;
1481
1482 if (!dynamic) {
1483 cfg->asid = cfg->cbndx + 1;
1484 } else {
1485 mutex_lock(&smmu->idr_mutex);
1486 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1487 smmu->num_context_banks + 2,
1488 MAX_ASID + 1, GFP_KERNEL);
1489
1490 mutex_unlock(&smmu->idr_mutex);
1491 if (ret < 0) {
1492 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1493 ret);
1494 return ret;
1495 }
1496 cfg->asid = ret;
1497 }
1498 return 0;
1499}
1500
1501static void arm_smmu_free_asid(struct iommu_domain *domain)
1502{
1503 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1504 struct arm_smmu_device *smmu = smmu_domain->smmu;
1505 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1506 bool dynamic = is_dynamic_domain(domain);
1507
1508 if (cfg->asid == INVALID_ASID || !dynamic)
1509 return;
1510
1511 mutex_lock(&smmu->idr_mutex);
1512 idr_remove(&smmu->asid_idr, cfg->asid);
1513 mutex_unlock(&smmu->idr_mutex);
1514}
1515
Will Deacon45ae7cf2013-06-24 18:31:25 +01001516static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001517 struct arm_smmu_device *smmu,
1518 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001519{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001520 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001521 unsigned long ias, oas;
1522 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001523 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001524 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001525 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001526 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001527 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001528 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001529
Will Deacon518f7132014-11-14 17:17:54 +00001530 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001531 if (smmu_domain->smmu)
1532 goto out_unlock;
1533
Patrick Dalyc190d932016-08-30 17:23:28 -07001534 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1535 smmu_domain->cfg.asid = INVALID_ASID;
1536
Patrick Dalyc190d932016-08-30 17:23:28 -07001537 dynamic = is_dynamic_domain(domain);
1538 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1539 dev_err(smmu->dev, "dynamic domains not supported\n");
1540 ret = -EPERM;
1541 goto out_unlock;
1542 }
1543
Will Deaconc752ce42014-06-25 22:46:31 +01001544 /*
1545 * Mapping the requested stage onto what we support is surprisingly
1546 * complicated, mainly because the spec allows S1+S2 SMMUs without
1547 * support for nested translation. That means we end up with the
1548 * following table:
1549 *
1550 * Requested Supported Actual
1551 * S1 N S1
1552 * S1 S1+S2 S1
1553 * S1 S2 S2
1554 * S1 S1 S1
1555 * N N N
1556 * N S1+S2 S2
1557 * N S2 S2
1558 * N S1 S1
1559 *
1560 * Note that you can't actually request stage-2 mappings.
1561 */
1562 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1563 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1564 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1565 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1566
Robin Murphy7602b872016-04-28 17:12:09 +01001567 /*
1568 * Choosing a suitable context format is even more fiddly. Until we
1569 * grow some way for the caller to express a preference, and/or move
1570 * the decision into the io-pgtable code where it arguably belongs,
1571 * just aim for the closest thing to the rest of the system, and hope
1572 * that the hardware isn't esoteric enough that we can't assume AArch64
1573 * support to be a superset of AArch32 support...
1574 */
1575 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1576 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001577 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1578 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1579 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1580 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1581 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001582 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1583 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1584 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1585 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1586 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1587
1588 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1589 ret = -EINVAL;
1590 goto out_unlock;
1591 }
1592
Will Deaconc752ce42014-06-25 22:46:31 +01001593 switch (smmu_domain->stage) {
1594 case ARM_SMMU_DOMAIN_S1:
1595 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1596 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001597 ias = smmu->va_size;
1598 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001599 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001600 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001601 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1602 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001603 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001604 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001605 ias = min(ias, 32UL);
1606 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001607 } else {
1608 fmt = ARM_V7S;
1609 ias = min(ias, 32UL);
1610 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001611 }
Will Deaconc752ce42014-06-25 22:46:31 +01001612 break;
1613 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001614 /*
1615 * We will likely want to change this if/when KVM gets
1616 * involved.
1617 */
Will Deaconc752ce42014-06-25 22:46:31 +01001618 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001619 cfg->cbar = CBAR_TYPE_S2_TRANS;
1620 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001621 ias = smmu->ipa_size;
1622 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001623 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001624 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001625 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001626 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001627 ias = min(ias, 40UL);
1628 oas = min(oas, 40UL);
1629 }
Will Deaconc752ce42014-06-25 22:46:31 +01001630 break;
1631 default:
1632 ret = -EINVAL;
1633 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001634 }
1635
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001636 if (is_fast)
1637 fmt = ARM_V8L_FAST;
1638
Patrick Dalyce6786f2016-11-09 14:19:23 -08001639 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1640 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001641 if (is_iommu_pt_coherent(smmu_domain))
1642 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001643
Patrick Dalyda688822017-05-17 20:12:48 -07001644 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1645 if (ret < 0)
1646 goto out_unlock;
1647 cfg->cbndx = ret;
1648
Robin Murphyb7862e32016-04-13 18:13:03 +01001649 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001650 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1651 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001652 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001653 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001654 }
1655
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001656 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001657 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001658 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001659 .ias = ias,
1660 .oas = oas,
1661 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001662 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001663 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001664
Will Deacon518f7132014-11-14 17:17:54 +00001665 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001666 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001667 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1668 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001669 if (!pgtbl_ops) {
1670 ret = -ENOMEM;
1671 goto out_clear_smmu;
1672 }
1673
Patrick Dalyc11d1082016-09-01 15:52:44 -07001674 /*
1675 * assign any page table memory that might have been allocated
1676 * during alloc_io_pgtable_ops
1677 */
Patrick Dalye271f212016-10-04 13:24:49 -07001678 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001679 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001680 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001681
Robin Murphyd5466352016-05-09 17:20:09 +01001682 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001683 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001684 domain->geometry.aperture_end = (1UL << ias) - 1;
1685 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001686
Patrick Dalyc190d932016-08-30 17:23:28 -07001687 /* Assign an asid */
1688 ret = arm_smmu_init_asid(domain, smmu);
1689 if (ret)
1690 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001691
Patrick Dalyc190d932016-08-30 17:23:28 -07001692 if (!dynamic) {
1693 /* Initialise the context bank with our page table cfg */
1694 arm_smmu_init_context_bank(smmu_domain,
1695 &smmu_domain->pgtbl_cfg);
1696
1697 /*
1698 * Request context fault interrupt. Do this last to avoid the
1699 * handler seeing a half-initialised domain state.
1700 */
1701 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1702 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001703 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1704 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001705 if (ret < 0) {
1706 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1707 cfg->irptndx, irq);
1708 cfg->irptndx = INVALID_IRPTNDX;
1709 goto out_clear_smmu;
1710 }
1711 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001712 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001713 }
Will Deacon518f7132014-11-14 17:17:54 +00001714 mutex_unlock(&smmu_domain->init_mutex);
1715
1716 /* Publish page table ops for map/unmap */
1717 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001718 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719
Will Deacon518f7132014-11-14 17:17:54 +00001720out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001721 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001722 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001723out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001724 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001725 return ret;
1726}
1727
Patrick Daly77db4f92016-10-14 15:34:10 -07001728static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1729{
1730 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1731 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1732 smmu_domain->secure_vmid = VMID_INVAL;
1733}
1734
Will Deacon45ae7cf2013-06-24 18:31:25 +01001735static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1736{
Joerg Roedel1d672632015-03-26 13:43:10 +01001737 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001738 struct arm_smmu_device *smmu = smmu_domain->smmu;
1739 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001740 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001741 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001742 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001743 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001744
Robin Murphy7e96c742016-09-14 15:26:46 +01001745 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001746 return;
1747
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001748 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001749 if (ret) {
1750 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1751 smmu);
1752 return;
1753 }
1754
Patrick Dalyc190d932016-08-30 17:23:28 -07001755 dynamic = is_dynamic_domain(domain);
1756 if (dynamic) {
1757 arm_smmu_free_asid(domain);
1758 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001759 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001760 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001761 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001762 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001763 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001764 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001765 return;
1766 }
1767
Will Deacon518f7132014-11-14 17:17:54 +00001768 /*
1769 * Disable the context bank and free the page tables before freeing
1770 * it.
1771 */
Will Deacon44680ee2014-06-25 11:29:12 +01001772 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001773 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001774
Will Deacon44680ee2014-06-25 11:29:12 +01001775 if (cfg->irptndx != INVALID_IRPTNDX) {
1776 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001777 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001778 }
1779
Markus Elfring44830b02015-11-06 18:32:41 +01001780 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001781 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001782 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001783 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001784 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001785 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001786
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001787 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001788 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001789}
1790
Joerg Roedel1d672632015-03-26 13:43:10 +01001791static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001792{
1793 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001794
Patrick Daly09801312016-08-29 17:02:52 -07001795 /* Do not support DOMAIN_DMA for now */
1796 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001797 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001798 /*
1799 * Allocate the domain and initialise some of its data structures.
1800 * We can't really do anything meaningful until we've added a
1801 * master.
1802 */
1803 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1804 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001805 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001806
Robin Murphy7e96c742016-09-14 15:26:46 +01001807 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1808 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001809 kfree(smmu_domain);
1810 return NULL;
1811 }
1812
Will Deacon518f7132014-11-14 17:17:54 +00001813 mutex_init(&smmu_domain->init_mutex);
1814 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001815 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1816 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001817 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001818 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001819 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001820
1821 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001822}
1823
Joerg Roedel1d672632015-03-26 13:43:10 +01001824static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001825{
Joerg Roedel1d672632015-03-26 13:43:10 +01001826 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001827
1828 /*
1829 * Free the domain resources. We assume that all devices have
1830 * already been detached.
1831 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001832 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001833 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001834 kfree(smmu_domain);
1835}
1836
Robin Murphy468f4942016-09-12 17:13:49 +01001837static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1838{
1839 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01001840 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01001841
1842 if (smr->valid)
1843 reg |= SMR_VALID;
1844 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1845}
1846
Robin Murphya754fd12016-09-12 17:13:50 +01001847static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1848{
1849 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1850 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1851 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1852 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1853
1854 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1855}
1856
1857static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1858{
1859 arm_smmu_write_s2cr(smmu, idx);
1860 if (smmu->smrs)
1861 arm_smmu_write_smr(smmu, idx);
1862}
1863
Robin Murphy6668f692016-09-12 17:13:54 +01001864static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01001865{
1866 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01001867 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001868
Robin Murphy6668f692016-09-12 17:13:54 +01001869 /* Stream indexing is blissfully easy */
1870 if (!smrs)
1871 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01001872
Robin Murphy6668f692016-09-12 17:13:54 +01001873 /* Validating SMRs is... less so */
1874 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1875 if (!smrs[i].valid) {
1876 /*
1877 * Note the first free entry we come across, which
1878 * we'll claim in the end if nothing else matches.
1879 */
1880 if (free_idx < 0)
1881 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01001882 continue;
1883 }
Robin Murphy6668f692016-09-12 17:13:54 +01001884 /*
1885 * If the new entry is _entirely_ matched by an existing entry,
1886 * then reuse that, with the guarantee that there also cannot
1887 * be any subsequent conflicting entries. In normal use we'd
1888 * expect simply identical entries for this case, but there's
1889 * no harm in accommodating the generalisation.
1890 */
1891 if ((mask & smrs[i].mask) == mask &&
1892 !((id ^ smrs[i].id) & ~smrs[i].mask))
1893 return i;
1894 /*
1895 * If the new entry has any other overlap with an existing one,
1896 * though, then there always exists at least one stream ID
1897 * which would cause a conflict, and we can't allow that risk.
1898 */
1899 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1900 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001901 }
1902
Robin Murphy6668f692016-09-12 17:13:54 +01001903 return free_idx;
1904}
1905
1906static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1907{
1908 if (--smmu->s2crs[idx].count)
1909 return false;
1910
1911 smmu->s2crs[idx] = s2cr_init_val;
1912 if (smmu->smrs)
1913 smmu->smrs[idx].valid = false;
1914
1915 return true;
1916}
1917
1918static int arm_smmu_master_alloc_smes(struct device *dev)
1919{
Robin Murphy06e393e2016-09-12 17:13:55 +01001920 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1921 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01001922 struct arm_smmu_device *smmu = cfg->smmu;
1923 struct arm_smmu_smr *smrs = smmu->smrs;
1924 struct iommu_group *group;
1925 int i, idx, ret;
1926
1927 mutex_lock(&smmu->stream_map_mutex);
1928 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01001929 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001930 u16 sid = fwspec->ids[i];
1931 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1932
Robin Murphy6668f692016-09-12 17:13:54 +01001933 if (idx != INVALID_SMENDX) {
1934 ret = -EEXIST;
1935 goto out_err;
1936 }
1937
Robin Murphy7e96c742016-09-14 15:26:46 +01001938 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01001939 if (ret < 0)
1940 goto out_err;
1941
1942 idx = ret;
1943 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001944 smrs[idx].id = sid;
1945 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01001946 smrs[idx].valid = true;
1947 }
1948 smmu->s2crs[idx].count++;
1949 cfg->smendx[i] = (s16)idx;
1950 }
1951
1952 group = iommu_group_get_for_dev(dev);
1953 if (!group)
1954 group = ERR_PTR(-ENOMEM);
1955 if (IS_ERR(group)) {
1956 ret = PTR_ERR(group);
1957 goto out_err;
1958 }
1959 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01001960
Will Deacon45ae7cf2013-06-24 18:31:25 +01001961 /* It worked! Now, poke the actual hardware */
Robin Murphy06e393e2016-09-12 17:13:55 +01001962 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001963 arm_smmu_write_sme(smmu, idx);
1964 smmu->s2crs[idx].group = group;
1965 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001966
Robin Murphy6668f692016-09-12 17:13:54 +01001967 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001968 return 0;
1969
Robin Murphy6668f692016-09-12 17:13:54 +01001970out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01001971 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01001972 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01001973 cfg->smendx[i] = INVALID_SMENDX;
1974 }
Robin Murphy6668f692016-09-12 17:13:54 +01001975 mutex_unlock(&smmu->stream_map_mutex);
1976 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001977}
1978
Robin Murphy06e393e2016-09-12 17:13:55 +01001979static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980{
Robin Murphy06e393e2016-09-12 17:13:55 +01001981 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1982 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01001983 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001984
Robin Murphy6668f692016-09-12 17:13:54 +01001985 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01001986 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001987 if (arm_smmu_free_sme(smmu, idx))
1988 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01001989 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001990 }
Robin Murphy6668f692016-09-12 17:13:54 +01001991 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001992}
1993
Will Deacon45ae7cf2013-06-24 18:31:25 +01001994static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01001995 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001996{
Will Deacon44680ee2014-06-25 11:29:12 +01001997 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01001998 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1999 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2000 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002001 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002002
Robin Murphy06e393e2016-09-12 17:13:55 +01002003 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphya754fd12016-09-12 17:13:50 +01002004 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy6668f692016-09-12 17:13:54 +01002005 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002006
2007 s2cr[idx].type = type;
2008 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2009 s2cr[idx].cbndx = cbndx;
2010 arm_smmu_write_s2cr(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002011 }
2012
2013 return 0;
2014}
2015
Patrick Daly09801312016-08-29 17:02:52 -07002016static void arm_smmu_detach_dev(struct iommu_domain *domain,
2017 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002018{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002019 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002020 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Daly09801312016-08-29 17:02:52 -07002021 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002022 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002023
2024 if (dynamic)
2025 return;
2026
Patrick Daly09801312016-08-29 17:02:52 -07002027 if (!smmu) {
2028 dev_err(dev, "Domain not attached; cannot detach!\n");
2029 return;
2030 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002031
Patrick Daly8befb662016-08-17 20:03:28 -07002032 /* Remove additional vote for atomic power */
2033 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002034 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
2035 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002036 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002037}
2038
Patrick Dalye271f212016-10-04 13:24:49 -07002039static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002040{
Patrick Dalye271f212016-10-04 13:24:49 -07002041 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002042 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2043 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2044 int source_vmid = VMID_HLOS;
2045 struct arm_smmu_pte_info *pte_info, *temp;
2046
Patrick Dalye271f212016-10-04 13:24:49 -07002047 if (!arm_smmu_is_domain_secure(smmu_domain))
2048 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002049
Patrick Dalye271f212016-10-04 13:24:49 -07002050 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002051 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2052 PAGE_SIZE, &source_vmid, 1,
2053 dest_vmids, dest_perms, 2);
2054 if (WARN_ON(ret))
2055 break;
2056 }
2057
2058 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2059 entry) {
2060 list_del(&pte_info->entry);
2061 kfree(pte_info);
2062 }
Patrick Dalye271f212016-10-04 13:24:49 -07002063 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002064}
2065
2066static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2067{
2068 int ret;
2069 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002070 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002071 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2072 struct arm_smmu_pte_info *pte_info, *temp;
2073
Patrick Dalye271f212016-10-04 13:24:49 -07002074 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002075 return;
2076
2077 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2078 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2079 PAGE_SIZE, source_vmlist, 2,
2080 &dest_vmids, &dest_perms, 1);
2081 if (WARN_ON(ret))
2082 break;
2083 free_pages_exact(pte_info->virt_addr, pte_info->size);
2084 }
2085
2086 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2087 entry) {
2088 list_del(&pte_info->entry);
2089 kfree(pte_info);
2090 }
2091}
2092
2093static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2094{
2095 struct arm_smmu_domain *smmu_domain = cookie;
2096 struct arm_smmu_pte_info *pte_info;
2097
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002098 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002099
2100 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2101 if (!pte_info)
2102 return;
2103
2104 pte_info->virt_addr = addr;
2105 pte_info->size = size;
2106 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2107}
2108
2109static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2110{
2111 struct arm_smmu_domain *smmu_domain = cookie;
2112 struct arm_smmu_pte_info *pte_info;
2113
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002114 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002115
2116 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2117 if (!pte_info)
2118 return -ENOMEM;
2119 pte_info->virt_addr = addr;
2120 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2121 return 0;
2122}
2123
Will Deacon45ae7cf2013-06-24 18:31:25 +01002124static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2125{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002126 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002127 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002128 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002129 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002130 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002131
Robin Murphy06e393e2016-09-12 17:13:55 +01002132 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002133 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2134 return -ENXIO;
2135 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002136
Robin Murphy4f79b142016-10-17 12:06:21 +01002137 /*
2138 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2139 * domains between of_xlate() and add_device() - we have no way to cope
2140 * with that, so until ARM gets converted to rely on groups and default
2141 * domains, just say no (but more politely than by dereferencing NULL).
2142 * This should be at least a WARN_ON once that's sorted.
2143 */
2144 if (!fwspec->iommu_priv)
2145 return -ENODEV;
2146
Robin Murphy06e393e2016-09-12 17:13:55 +01002147 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002148
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002149 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002150 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002151 if (ret)
2152 return ret;
2153
Will Deacon518f7132014-11-14 17:17:54 +00002154 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002155 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002156 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002157 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002158
Patrick Dalyc190d932016-08-30 17:23:28 -07002159 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002160 if (is_dynamic_domain(domain)) {
2161 ret = 0;
2162 goto out_power_off;
2163 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002164
Will Deacon45ae7cf2013-06-24 18:31:25 +01002165 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002166 * Sanity check the domain. We don't support domains across
2167 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002168 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002169 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002170 dev_err(dev,
2171 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002172 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002173 ret = -EINVAL;
2174 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002175 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002176
2177 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002178 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002179
2180out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002181 /*
2182 * Keep an additional vote for non-atomic power until domain is
2183 * detached
2184 */
2185 if (!ret && atomic_domain) {
2186 WARN_ON(arm_smmu_power_on(smmu->pwr));
2187 arm_smmu_power_off_atomic(smmu->pwr);
2188 }
2189
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002190 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002191
Will Deacon45ae7cf2013-06-24 18:31:25 +01002192 return ret;
2193}
2194
Will Deacon45ae7cf2013-06-24 18:31:25 +01002195static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002196 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002197{
Will Deacon518f7132014-11-14 17:17:54 +00002198 int ret;
2199 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002200 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002201 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002202
Will Deacon518f7132014-11-14 17:17:54 +00002203 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002204 return -ENODEV;
2205
Patrick Dalye271f212016-10-04 13:24:49 -07002206 arm_smmu_secure_domain_lock(smmu_domain);
2207
Will Deacon518f7132014-11-14 17:17:54 +00002208 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2209 ret = ops->map(ops, iova, paddr, size, prot);
2210 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002211
2212 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002213 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002214
Will Deacon518f7132014-11-14 17:17:54 +00002215 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002216}
2217
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002218static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2219 dma_addr_t iova)
2220{
2221 uint64_t ret;
2222 unsigned long flags;
2223 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2224 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2225
2226 if (!ops)
2227 return 0;
2228
2229 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2230 ret = ops->iova_to_pte(ops, iova);
2231 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2232 return ret;
2233}
2234
Will Deacon45ae7cf2013-06-24 18:31:25 +01002235static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2236 size_t size)
2237{
Will Deacon518f7132014-11-14 17:17:54 +00002238 size_t ret;
2239 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002240 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002241 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002242
Will Deacon518f7132014-11-14 17:17:54 +00002243 if (!ops)
2244 return 0;
2245
Patrick Daly8befb662016-08-17 20:03:28 -07002246 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002247 if (ret)
2248 return ret;
2249
Patrick Dalye271f212016-10-04 13:24:49 -07002250 arm_smmu_secure_domain_lock(smmu_domain);
2251
Will Deacon518f7132014-11-14 17:17:54 +00002252 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2253 ret = ops->unmap(ops, iova, size);
2254 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002255
Patrick Daly8befb662016-08-17 20:03:28 -07002256 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002257 /*
2258 * While splitting up block mappings, we might allocate page table
2259 * memory during unmap, so the vmids needs to be assigned to the
2260 * memory here as well.
2261 */
2262 arm_smmu_assign_table(smmu_domain);
2263 /* Also unassign any pages that were free'd during unmap */
2264 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002265 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002266 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002267}
2268
Patrick Daly88d321d2017-02-09 18:02:13 -08002269#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002270static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2271 struct scatterlist *sg, unsigned int nents, int prot)
2272{
2273 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002274 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002275 unsigned long flags;
2276 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2277 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002278 unsigned int idx_start, idx_end;
2279 struct scatterlist *sg_start, *sg_end;
2280 unsigned long __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002281
2282 if (!ops)
2283 return -ENODEV;
2284
Patrick Daly8befb662016-08-17 20:03:28 -07002285 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002286 if (ret)
2287 return ret;
2288
Patrick Daly88d321d2017-02-09 18:02:13 -08002289 __saved_iova_start = iova;
2290 idx_start = idx_end = 0;
2291 sg_start = sg_end = sg;
2292 while (idx_end < nents) {
2293 batch_size = sg_end->length;
2294 sg_end = sg_next(sg_end);
2295 idx_end++;
2296 while ((idx_end < nents) &&
2297 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002298
Patrick Daly88d321d2017-02-09 18:02:13 -08002299 batch_size += sg_end->length;
2300 sg_end = sg_next(sg_end);
2301 idx_end++;
2302 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002303
Patrick Daly88d321d2017-02-09 18:02:13 -08002304 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2305 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2306 prot, &size);
2307 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2308 /* Returns 0 on error */
2309 if (!ret) {
2310 size_to_unmap = iova + size - __saved_iova_start;
2311 goto out;
2312 }
2313
2314 iova += batch_size;
2315 idx_start = idx_end;
2316 sg_start = sg_end;
2317 }
2318
2319out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002320 arm_smmu_assign_table(smmu_domain);
2321
Patrick Daly88d321d2017-02-09 18:02:13 -08002322 if (size_to_unmap) {
2323 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2324 iova = __saved_iova_start;
2325 }
2326 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
2327 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002328}
2329
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002330static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002331 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002332{
Joerg Roedel1d672632015-03-26 13:43:10 +01002333 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002334 struct arm_smmu_device *smmu = smmu_domain->smmu;
2335 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2336 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2337 struct device *dev = smmu->dev;
2338 void __iomem *cb_base;
2339 u32 tmp;
2340 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002341 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002342
2343 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2344
Robin Murphy661d9622015-05-27 17:09:34 +01002345 /* ATS1 registers can only be written atomically */
2346 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002347 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002348 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2349 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002350 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002351
2352 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2353 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002354 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002355 dev_err(dev,
2356 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2357 &iova, &phys);
2358 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002359 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002360 }
2361
Robin Murphyf9a05f02016-04-13 18:13:01 +01002362 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002363 if (phys & CB_PAR_F) {
2364 dev_err(dev, "translation fault!\n");
2365 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002366 phys = 0;
2367 } else {
2368 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002369 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002370
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002371 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002372}
2373
Will Deacon45ae7cf2013-06-24 18:31:25 +01002374static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002375 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002376{
Will Deacon518f7132014-11-14 17:17:54 +00002377 phys_addr_t ret;
2378 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002379 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002380 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002381
Will Deacon518f7132014-11-14 17:17:54 +00002382 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002383 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002384
Will Deacon518f7132014-11-14 17:17:54 +00002385 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002386 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002387 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002388
Will Deacon518f7132014-11-14 17:17:54 +00002389 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002390}
2391
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002392/*
2393 * This function can sleep, and cannot be called from atomic context. Will
2394 * power on register block if required. This restriction does not apply to the
2395 * original iova_to_phys() op.
2396 */
2397static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2398 dma_addr_t iova)
2399{
2400 phys_addr_t ret = 0;
2401 unsigned long flags;
2402 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002403
Patrick Dalyad441dd2016-09-15 15:50:46 -07002404 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002405 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2406 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002407 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002408 return ret;
2409 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002410
2411 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2412 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2413 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002414 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002415
2416 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2417
2418 return ret;
2419}
2420
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002421static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002422{
Will Deacond0948942014-06-24 17:30:10 +01002423 switch (cap) {
2424 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002425 /*
2426 * Return true here as the SMMU can always send out coherent
2427 * requests.
2428 */
2429 return true;
Will Deacond0948942014-06-24 17:30:10 +01002430 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002431 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002432 case IOMMU_CAP_NOEXEC:
2433 return true;
Will Deacond0948942014-06-24 17:30:10 +01002434 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002435 return false;
Will Deacond0948942014-06-24 17:30:10 +01002436 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002437}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002438
Patrick Daly8e3371a2017-02-13 22:14:53 -08002439static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2440{
2441 struct arm_smmu_device *smmu;
2442 unsigned long flags;
2443
2444 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2445 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2446 if (smmu->dev->of_node == np) {
2447 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2448 return smmu;
2449 }
2450 }
2451 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2452 return NULL;
2453}
2454
Robin Murphy7e96c742016-09-14 15:26:46 +01002455static int arm_smmu_match_node(struct device *dev, void *data)
2456{
2457 return dev->of_node == data;
2458}
2459
2460static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2461{
2462 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2463 np, arm_smmu_match_node);
2464 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002465 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002466}
2467
Will Deacon03edb222015-01-19 14:27:33 +00002468static int arm_smmu_add_device(struct device *dev)
2469{
Robin Murphy06e393e2016-09-12 17:13:55 +01002470 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002471 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002472 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002473 int i, ret;
2474
Robin Murphy7e96c742016-09-14 15:26:46 +01002475 if (using_legacy_binding) {
2476 ret = arm_smmu_register_legacy_master(dev, &smmu);
2477 fwspec = dev->iommu_fwspec;
2478 if (ret)
2479 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002480 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002481 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2482 if (!smmu)
2483 return -ENODEV;
2484 } else {
2485 return -ENODEV;
2486 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002487
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002488 ret = arm_smmu_power_on(smmu->pwr);
2489 if (ret)
2490 goto out_free;
2491
Robin Murphyd5b41782016-09-14 15:21:39 +01002492 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002493 for (i = 0; i < fwspec->num_ids; i++) {
2494 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002495 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002496
Robin Murphy06e393e2016-09-12 17:13:55 +01002497 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002498 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002499 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002500 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002501 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002502 if (mask & ~smmu->smr_mask_mask) {
2503 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2504 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002505 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002506 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002507 }
Will Deacon03edb222015-01-19 14:27:33 +00002508
Robin Murphy06e393e2016-09-12 17:13:55 +01002509 ret = -ENOMEM;
2510 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2511 GFP_KERNEL);
2512 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002513 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002514
2515 cfg->smmu = smmu;
2516 fwspec->iommu_priv = cfg;
2517 while (i--)
2518 cfg->smendx[i] = INVALID_SMENDX;
2519
Robin Murphy6668f692016-09-12 17:13:54 +01002520 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002521 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002522 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002523
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002524 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002525 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002526
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002527out_pwr_off:
2528 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002529out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002530 if (fwspec)
2531 kfree(fwspec->iommu_priv);
2532 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002533 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002534}
2535
Will Deacon45ae7cf2013-06-24 18:31:25 +01002536static void arm_smmu_remove_device(struct device *dev)
2537{
Robin Murphy06e393e2016-09-12 17:13:55 +01002538 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002539 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002540
Robin Murphy06e393e2016-09-12 17:13:55 +01002541 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002542 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002543
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002544 smmu = fwspec_smmu(fwspec);
2545 if (arm_smmu_power_on(smmu->pwr)) {
2546 WARN_ON(1);
2547 return;
2548 }
2549
Robin Murphy06e393e2016-09-12 17:13:55 +01002550 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002551 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002552 kfree(fwspec->iommu_priv);
2553 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002554 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002555}
2556
Joerg Roedelaf659932015-10-21 23:51:41 +02002557static struct iommu_group *arm_smmu_device_group(struct device *dev)
2558{
Robin Murphy06e393e2016-09-12 17:13:55 +01002559 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2560 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002561 struct iommu_group *group = NULL;
2562 int i, idx;
2563
Robin Murphy06e393e2016-09-12 17:13:55 +01002564 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002565 if (group && smmu->s2crs[idx].group &&
2566 group != smmu->s2crs[idx].group)
2567 return ERR_PTR(-EINVAL);
2568
2569 group = smmu->s2crs[idx].group;
2570 }
2571
2572 if (group)
2573 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02002574
2575 if (dev_is_pci(dev))
2576 group = pci_device_group(dev);
2577 else
2578 group = generic_device_group(dev);
2579
Joerg Roedelaf659932015-10-21 23:51:41 +02002580 return group;
2581}
2582
Will Deaconc752ce42014-06-25 22:46:31 +01002583static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2584 enum iommu_attr attr, void *data)
2585{
Joerg Roedel1d672632015-03-26 13:43:10 +01002586 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002587 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002588
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002589 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01002590 switch (attr) {
2591 case DOMAIN_ATTR_NESTING:
2592 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002593 ret = 0;
2594 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002595 case DOMAIN_ATTR_PT_BASE_ADDR:
2596 *((phys_addr_t *)data) =
2597 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002598 ret = 0;
2599 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002600 case DOMAIN_ATTR_CONTEXT_BANK:
2601 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002602 if (smmu_domain->smmu == NULL) {
2603 ret = -ENODEV;
2604 break;
2605 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002606 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2607 ret = 0;
2608 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002609 case DOMAIN_ATTR_TTBR0: {
2610 u64 val;
2611 struct arm_smmu_device *smmu = smmu_domain->smmu;
2612 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002613 if (smmu == NULL) {
2614 ret = -ENODEV;
2615 break;
2616 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002617 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2618 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2619 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2620 << (TTBRn_ASID_SHIFT);
2621 *((u64 *)data) = val;
2622 ret = 0;
2623 break;
2624 }
2625 case DOMAIN_ATTR_CONTEXTIDR:
2626 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002627 if (smmu_domain->smmu == NULL) {
2628 ret = -ENODEV;
2629 break;
2630 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002631 *((u32 *)data) = smmu_domain->cfg.procid;
2632 ret = 0;
2633 break;
2634 case DOMAIN_ATTR_PROCID:
2635 *((u32 *)data) = smmu_domain->cfg.procid;
2636 ret = 0;
2637 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002638 case DOMAIN_ATTR_DYNAMIC:
2639 *((int *)data) = !!(smmu_domain->attributes
2640 & (1 << DOMAIN_ATTR_DYNAMIC));
2641 ret = 0;
2642 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002643 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2644 *((int *)data) = !!(smmu_domain->attributes
2645 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2646 ret = 0;
2647 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002648 case DOMAIN_ATTR_S1_BYPASS:
2649 *((int *)data) = !!(smmu_domain->attributes
2650 & (1 << DOMAIN_ATTR_S1_BYPASS));
2651 ret = 0;
2652 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002653 case DOMAIN_ATTR_SECURE_VMID:
2654 *((int *)data) = smmu_domain->secure_vmid;
2655 ret = 0;
2656 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002657 case DOMAIN_ATTR_PGTBL_INFO: {
2658 struct iommu_pgtbl_info *info = data;
2659
2660 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2661 ret = -ENODEV;
2662 break;
2663 }
2664 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2665 ret = 0;
2666 break;
2667 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002668 case DOMAIN_ATTR_FAST:
2669 *((int *)data) = !!(smmu_domain->attributes
2670 & (1 << DOMAIN_ATTR_FAST));
2671 ret = 0;
2672 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002673 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2674 *((int *)data) = !!(smmu_domain->attributes &
2675 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2676 ret = 0;
2677 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002678 case DOMAIN_ATTR_EARLY_MAP:
2679 *((int *)data) = !!(smmu_domain->attributes
2680 & (1 << DOMAIN_ATTR_EARLY_MAP));
2681 ret = 0;
2682 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002683 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002684 if (!smmu_domain->smmu) {
2685 ret = -ENODEV;
2686 break;
2687 }
Liam Mark53cf2342016-12-20 11:36:07 -08002688 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
2689 ret = 0;
2690 break;
2691 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
2692 *((int *)data) = !!(smmu_domain->attributes
2693 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002694 ret = 0;
2695 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05302696 case DOMAIN_ATTR_CB_STALL_DISABLE:
2697 *((int *)data) = !!(smmu_domain->attributes
2698 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
2699 ret = 0;
2700 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002701 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002702 ret = -ENODEV;
2703 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002704 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002705 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002706 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002707}
2708
2709static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2710 enum iommu_attr attr, void *data)
2711{
Will Deacon518f7132014-11-14 17:17:54 +00002712 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002713 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002714
Will Deacon518f7132014-11-14 17:17:54 +00002715 mutex_lock(&smmu_domain->init_mutex);
2716
Will Deaconc752ce42014-06-25 22:46:31 +01002717 switch (attr) {
2718 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002719 if (smmu_domain->smmu) {
2720 ret = -EPERM;
2721 goto out_unlock;
2722 }
2723
Will Deaconc752ce42014-06-25 22:46:31 +01002724 if (*(int *)data)
2725 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2726 else
2727 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2728
Will Deacon518f7132014-11-14 17:17:54 +00002729 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002730 case DOMAIN_ATTR_PROCID:
2731 if (smmu_domain->smmu != NULL) {
2732 dev_err(smmu_domain->smmu->dev,
2733 "cannot change procid attribute while attached\n");
2734 ret = -EBUSY;
2735 break;
2736 }
2737 smmu_domain->cfg.procid = *((u32 *)data);
2738 ret = 0;
2739 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002740 case DOMAIN_ATTR_DYNAMIC: {
2741 int dynamic = *((int *)data);
2742
2743 if (smmu_domain->smmu != NULL) {
2744 dev_err(smmu_domain->smmu->dev,
2745 "cannot change dynamic attribute while attached\n");
2746 ret = -EBUSY;
2747 break;
2748 }
2749
2750 if (dynamic)
2751 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2752 else
2753 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2754 ret = 0;
2755 break;
2756 }
2757 case DOMAIN_ATTR_CONTEXT_BANK:
2758 /* context bank can't be set while attached */
2759 if (smmu_domain->smmu != NULL) {
2760 ret = -EBUSY;
2761 break;
2762 }
2763 /* ... and it can only be set for dynamic contexts. */
2764 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2765 ret = -EINVAL;
2766 break;
2767 }
2768
2769 /* this will be validated during attach */
2770 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2771 ret = 0;
2772 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002773 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2774 u32 non_fatal_faults = *((int *)data);
2775
2776 if (non_fatal_faults)
2777 smmu_domain->attributes |=
2778 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2779 else
2780 smmu_domain->attributes &=
2781 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2782 ret = 0;
2783 break;
2784 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002785 case DOMAIN_ATTR_S1_BYPASS: {
2786 int bypass = *((int *)data);
2787
2788 /* bypass can't be changed while attached */
2789 if (smmu_domain->smmu != NULL) {
2790 ret = -EBUSY;
2791 break;
2792 }
2793 if (bypass)
2794 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2795 else
2796 smmu_domain->attributes &=
2797 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2798
2799 ret = 0;
2800 break;
2801 }
Patrick Daly8befb662016-08-17 20:03:28 -07002802 case DOMAIN_ATTR_ATOMIC:
2803 {
2804 int atomic_ctx = *((int *)data);
2805
2806 /* can't be changed while attached */
2807 if (smmu_domain->smmu != NULL) {
2808 ret = -EBUSY;
2809 break;
2810 }
2811 if (atomic_ctx)
2812 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2813 else
2814 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2815 break;
2816 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002817 case DOMAIN_ATTR_SECURE_VMID:
2818 if (smmu_domain->secure_vmid != VMID_INVAL) {
2819 ret = -ENODEV;
2820 WARN(1, "secure vmid already set!");
2821 break;
2822 }
2823 smmu_domain->secure_vmid = *((int *)data);
2824 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002825 case DOMAIN_ATTR_FAST:
2826 if (*((int *)data))
2827 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2828 ret = 0;
2829 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002830 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2831 /* can't be changed while attached */
2832 if (smmu_domain->smmu != NULL) {
2833 ret = -EBUSY;
2834 break;
2835 }
2836 if (*((int *)data))
2837 smmu_domain->attributes |=
2838 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
2839 ret = 0;
2840 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002841 case DOMAIN_ATTR_EARLY_MAP: {
2842 int early_map = *((int *)data);
2843
2844 ret = 0;
2845 if (early_map) {
2846 smmu_domain->attributes |=
2847 1 << DOMAIN_ATTR_EARLY_MAP;
2848 } else {
2849 if (smmu_domain->smmu)
2850 ret = arm_smmu_enable_s1_translations(
2851 smmu_domain);
2852
2853 if (!ret)
2854 smmu_domain->attributes &=
2855 ~(1 << DOMAIN_ATTR_EARLY_MAP);
2856 }
2857 break;
2858 }
Liam Mark53cf2342016-12-20 11:36:07 -08002859 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
2860 int force_coherent = *((int *)data);
2861
2862 if (smmu_domain->smmu != NULL) {
2863 dev_err(smmu_domain->smmu->dev,
2864 "cannot change force coherent attribute while attached\n");
2865 ret = -EBUSY;
2866 break;
2867 }
2868
2869 if (force_coherent)
2870 smmu_domain->attributes |=
2871 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
2872 else
2873 smmu_domain->attributes &=
2874 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
2875
2876 ret = 0;
2877 break;
2878 }
2879
Charan Teja Reddyc682e472017-04-20 19:11:20 +05302880 case DOMAIN_ATTR_CB_STALL_DISABLE:
2881 if (*((int *)data))
2882 smmu_domain->attributes |=
2883 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
2884 ret = 0;
2885 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002886 default:
Will Deacon518f7132014-11-14 17:17:54 +00002887 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002888 }
Will Deacon518f7132014-11-14 17:17:54 +00002889
2890out_unlock:
2891 mutex_unlock(&smmu_domain->init_mutex);
2892 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002893}
2894
Robin Murphy7e96c742016-09-14 15:26:46 +01002895static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
2896{
2897 u32 fwid = 0;
2898
2899 if (args->args_count > 0)
2900 fwid |= (u16)args->args[0];
2901
2902 if (args->args_count > 1)
2903 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
2904
2905 return iommu_fwspec_add_ids(dev, &fwid, 1);
2906}
2907
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002908static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
2909{
2910 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2911 struct arm_smmu_device *smmu = smmu_domain->smmu;
2912 void __iomem *cb_base;
2913 u32 reg;
2914 int ret;
2915
2916 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2917 ret = arm_smmu_power_on(smmu->pwr);
2918 if (ret)
2919 return ret;
2920
2921 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
2922 reg |= SCTLR_M;
2923
2924 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
2925 arm_smmu_power_off(smmu->pwr);
2926 return ret;
2927}
2928
Liam Mark3ba41cf2016-12-09 14:39:04 -08002929static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
2930 dma_addr_t iova)
2931{
2932 bool ret;
2933 unsigned long flags;
2934 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2935 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2936
2937 if (!ops)
2938 return false;
2939
2940 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2941 ret = ops->is_iova_coherent(ops, iova);
2942 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2943 return ret;
2944}
2945
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002946static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2947 unsigned long flags)
2948{
2949 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2950 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2951 struct arm_smmu_device *smmu;
2952 void __iomem *cb_base;
2953
2954 if (!smmu_domain->smmu) {
2955 pr_err("Can't trigger faults on non-attached domains\n");
2956 return;
2957 }
2958
2959 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002960 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002961 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002962
2963 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2964 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2965 flags, cfg->cbndx);
2966 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002967 /* give the interrupt time to fire... */
2968 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002969
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002970 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002971}
2972
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002973static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2974 unsigned long offset)
2975{
2976 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2977 struct arm_smmu_device *smmu;
2978 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2979 void __iomem *cb_base;
2980 unsigned long val;
2981
2982 if (offset >= SZ_4K) {
2983 pr_err("Invalid offset: 0x%lx\n", offset);
2984 return 0;
2985 }
2986
2987 smmu = smmu_domain->smmu;
2988 if (!smmu) {
2989 WARN(1, "Can't read registers of a detached domain\n");
2990 val = 0;
2991 return val;
2992 }
2993
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002994 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002995 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002996
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002997 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2998 val = readl_relaxed(cb_base + offset);
2999
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003000 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003001 return val;
3002}
3003
3004static void arm_smmu_reg_write(struct iommu_domain *domain,
3005 unsigned long offset, unsigned long val)
3006{
3007 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3008 struct arm_smmu_device *smmu;
3009 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3010 void __iomem *cb_base;
3011
3012 if (offset >= SZ_4K) {
3013 pr_err("Invalid offset: 0x%lx\n", offset);
3014 return;
3015 }
3016
3017 smmu = smmu_domain->smmu;
3018 if (!smmu) {
3019 WARN(1, "Can't read registers of a detached domain\n");
3020 return;
3021 }
3022
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003023 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003024 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003025
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003026 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3027 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003028
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003029 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003030}
3031
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003032static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3033{
3034 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
3035}
3036
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003037static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3038{
3039 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3040
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003041 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003042}
3043
3044static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3045{
3046 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3047
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003048 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003049}
3050
Will Deacon518f7132014-11-14 17:17:54 +00003051static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003052 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003053 .domain_alloc = arm_smmu_domain_alloc,
3054 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003055 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003056 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003057 .map = arm_smmu_map,
3058 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003059 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003060 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003061 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003062 .add_device = arm_smmu_add_device,
3063 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003064 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003065 .domain_get_attr = arm_smmu_domain_get_attr,
3066 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003067 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003068 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003069 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003070 .reg_read = arm_smmu_reg_read,
3071 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003072 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003073 .enable_config_clocks = arm_smmu_enable_config_clocks,
3074 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003075 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003076 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003077};
3078
Patrick Dalyad441dd2016-09-15 15:50:46 -07003079#define IMPL_DEF1_MICRO_MMU_CTRL 0
3080#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3081#define MICRO_MMU_CTRL_IDLE (1 << 3)
3082
3083/* Definitions for implementation-defined registers */
3084#define ACTLR_QCOM_OSH_SHIFT 28
3085#define ACTLR_QCOM_OSH 1
3086
3087#define ACTLR_QCOM_ISH_SHIFT 29
3088#define ACTLR_QCOM_ISH 1
3089
3090#define ACTLR_QCOM_NSH_SHIFT 30
3091#define ACTLR_QCOM_NSH 1
3092
3093static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003094{
3095 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003096 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003097
3098 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3099 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3100 0, 30000)) {
3101 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3102 return -EBUSY;
3103 }
3104
3105 return 0;
3106}
3107
Patrick Dalyad441dd2016-09-15 15:50:46 -07003108static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003109{
3110 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3111 u32 reg;
3112
3113 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3114 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3115 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3116
Patrick Dalyad441dd2016-09-15 15:50:46 -07003117 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003118}
3119
Patrick Dalyad441dd2016-09-15 15:50:46 -07003120static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003121{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003122 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003123}
3124
Patrick Dalyad441dd2016-09-15 15:50:46 -07003125static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003126{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003127 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003128}
3129
Patrick Dalyad441dd2016-09-15 15:50:46 -07003130static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003131{
3132 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3133 u32 reg;
3134
3135 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3136 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3137 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3138}
3139
Patrick Dalyad441dd2016-09-15 15:50:46 -07003140static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003141{
3142 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003143 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003144 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003145 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003146
Patrick Dalyad441dd2016-09-15 15:50:46 -07003147 /*
3148 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3149 * to prevent table walks with an inconsistent state.
3150 */
3151 for (i = 0; i < smmu->num_context_banks; ++i) {
3152 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3153 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3154 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3155 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3156 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3157 }
3158
3159 /* Program implementation defined registers */
3160 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003161 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3162 writel_relaxed(regs[i].value,
3163 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003164 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003165}
3166
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003167static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3168 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003169{
3170 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3171 struct arm_smmu_device *smmu = smmu_domain->smmu;
3172 int ret;
3173 phys_addr_t phys = 0;
3174 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003175 u32 sctlr, sctlr_orig, fsr;
3176 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003177
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003178 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003179 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003180 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003181
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003182 spin_lock_irqsave(&smmu->atos_lock, flags);
3183 cb_base = ARM_SMMU_CB_BASE(smmu) +
3184 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003185
3186 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003187 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003188 qsmmuv2_wait_for_halt(smmu);
3189
3190 /* clear FSR to allow ATOS to log any faults */
3191 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3192 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3193
3194 /* disable stall mode momentarily */
3195 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3196 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3197 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3198
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003199 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003200
3201 /* restore SCTLR */
3202 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3203
3204 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003205 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3206
3207 arm_smmu_power_off(smmu_domain->smmu->pwr);
3208 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003209}
3210
3211struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3212 .device_reset = qsmmuv2_device_reset,
3213 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003214};
3215
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003216static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003217{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003218 int i;
3219 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003220 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003221 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003222
Peng Fan3ca37122016-05-03 21:50:30 +08003223 /*
3224 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3225 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3226 * bit is only present in MMU-500r2 onwards.
3227 */
3228 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3229 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3230 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3231 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3232 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3233 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3234 }
3235
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003236 /* Make sure all context banks are disabled and clear CB_FSR */
3237 for (i = 0; i < smmu->num_context_banks; ++i) {
3238 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3239 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3240 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003241 /*
3242 * Disable MMU-500's not-particularly-beneficial next-page
3243 * prefetcher for the sake of errata #841119 and #826419.
3244 */
3245 if (smmu->model == ARM_MMU500) {
3246 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3247 reg &= ~ARM_MMU500_ACTLR_CPRE;
3248 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3249 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003250 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003251}
3252
3253static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3254{
3255 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003256 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003257 u32 reg;
3258
3259 /* clear global FSR */
3260 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3261 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3262
Robin Murphy468f4942016-09-12 17:13:49 +01003263 /*
3264 * Reset stream mapping groups: Initial values mark all SMRn as
3265 * invalid and all S2CRn as bypass unless overridden.
3266 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003267 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3268 for (i = 0; i < smmu->num_mapping_groups; ++i)
3269 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003270
Patrick Daly59b6d202017-06-12 13:12:15 -07003271 arm_smmu_context_bank_reset(smmu);
3272 }
Will Deacon1463fe42013-07-31 19:21:27 +01003273
Will Deacon45ae7cf2013-06-24 18:31:25 +01003274 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003275 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3276 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3277
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003278 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003279
Will Deacon45ae7cf2013-06-24 18:31:25 +01003280 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003281 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003282
3283 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003284 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003285
Robin Murphy25a1c962016-02-10 14:25:33 +00003286 /* Enable client access, handling unmatched streams as appropriate */
3287 reg &= ~sCR0_CLIENTPD;
3288 if (disable_bypass)
3289 reg |= sCR0_USFCFG;
3290 else
3291 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003292
3293 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003294 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003295
3296 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003297 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003298
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003299 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3300 reg |= sCR0_VMID16EN;
3301
Will Deacon45ae7cf2013-06-24 18:31:25 +01003302 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003303 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003304 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003305
3306 /* Manage any implementation defined features */
3307 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003308}
3309
3310static int arm_smmu_id_size_to_bits(int size)
3311{
3312 switch (size) {
3313 case 0:
3314 return 32;
3315 case 1:
3316 return 36;
3317 case 2:
3318 return 40;
3319 case 3:
3320 return 42;
3321 case 4:
3322 return 44;
3323 case 5:
3324 default:
3325 return 48;
3326 }
3327}
3328
Patrick Dalyda688822017-05-17 20:12:48 -07003329
3330/*
3331 * Some context banks needs to be transferred from bootloader to HLOS in a way
3332 * that allows ongoing traffic. The current expectation is that these context
3333 * banks operate in bypass mode.
3334 * Additionally, there must be exactly one device in devicetree with stream-ids
3335 * overlapping those used by the bootloader.
3336 */
3337static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3338 struct arm_smmu_device *smmu,
3339 struct device *dev)
3340{
3341 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003342 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003343 u32 i, idx;
3344 int cb = -EINVAL;
3345 bool dynamic;
3346
Patrick Dalye72526b2017-07-18 16:21:44 -07003347 /*
3348 * Dynamic domains have already set cbndx through domain attribute.
3349 * Verify that they picked a valid value.
3350 */
Patrick Dalyda688822017-05-17 20:12:48 -07003351 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07003352 if (dynamic) {
3353 cb = smmu_domain->cfg.cbndx;
3354 if (cb < smmu->num_context_banks)
3355 return cb;
3356 else
3357 return -EINVAL;
3358 }
Patrick Dalyda688822017-05-17 20:12:48 -07003359
3360 mutex_lock(&smmu->stream_map_mutex);
3361 for_each_cfg_sme(fwspec, i, idx) {
3362 if (smmu->s2crs[idx].cb_handoff)
3363 cb = smmu->s2crs[idx].cbndx;
3364 }
3365
3366 if (cb < 0) {
3367 mutex_unlock(&smmu->stream_map_mutex);
3368 return __arm_smmu_alloc_bitmap(smmu->context_map,
3369 smmu->num_s2_context_banks,
3370 smmu->num_context_banks);
3371 }
3372
3373 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07003374 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Patrick Dalyda688822017-05-17 20:12:48 -07003375 smmu->s2crs[i].cb_handoff = false;
3376 smmu->s2crs[i].count -= 1;
3377 }
3378 }
3379 mutex_unlock(&smmu->stream_map_mutex);
3380
3381 return cb;
3382}
3383
3384static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
3385{
3386 u32 i, raw_smr, raw_s2cr;
3387 struct arm_smmu_smr smr;
3388 struct arm_smmu_s2cr s2cr;
3389
3390 for (i = 0; i < smmu->num_mapping_groups; i++) {
3391 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3392 ARM_SMMU_GR0_SMR(i));
3393 if (!(raw_smr & SMR_VALID))
3394 continue;
3395
3396 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3397 smr.id = (u16)raw_smr;
3398 smr.valid = true;
3399
3400 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3401 ARM_SMMU_GR0_S2CR(i));
3402 s2cr.group = NULL;
3403 s2cr.count = 1;
3404 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
3405 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
3406 S2CR_PRIVCFG_MASK;
3407 s2cr.cbndx = (u8)raw_s2cr;
3408 s2cr.cb_handoff = true;
3409
3410 if (s2cr.type != S2CR_TYPE_TRANS)
3411 continue;
3412
3413 smmu->smrs[i] = smr;
3414 smmu->s2crs[i] = s2cr;
3415 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
3416 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
3417 raw_smr, raw_s2cr, s2cr.cbndx);
3418 }
3419
3420 return 0;
3421}
3422
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003423static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3424{
3425 struct device *dev = smmu->dev;
3426 int i, ntuples, ret;
3427 u32 *tuples;
3428 struct arm_smmu_impl_def_reg *regs, *regit;
3429
3430 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3431 return 0;
3432
3433 ntuples /= sizeof(u32);
3434 if (ntuples % 2) {
3435 dev_err(dev,
3436 "Invalid number of attach-impl-defs registers: %d\n",
3437 ntuples);
3438 return -EINVAL;
3439 }
3440
3441 regs = devm_kmalloc(
3442 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3443 GFP_KERNEL);
3444 if (!regs)
3445 return -ENOMEM;
3446
3447 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3448 if (!tuples)
3449 return -ENOMEM;
3450
3451 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3452 tuples, ntuples);
3453 if (ret)
3454 return ret;
3455
3456 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3457 regit->offset = tuples[i];
3458 regit->value = tuples[i + 1];
3459 }
3460
3461 devm_kfree(dev, tuples);
3462
3463 smmu->impl_def_attach_registers = regs;
3464 smmu->num_impl_def_attach_registers = ntuples / 2;
3465
3466 return 0;
3467}
3468
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003469
3470static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003471{
3472 const char *cname;
3473 struct property *prop;
3474 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003475 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003476
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003477 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003478 of_property_count_strings(dev->of_node, "clock-names");
3479
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003480 if (pwr->num_clocks < 1) {
3481 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003482 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003483 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003484
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003485 pwr->clocks = devm_kzalloc(
3486 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003487 GFP_KERNEL);
3488
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003489 if (!pwr->clocks)
3490 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003491
3492 i = 0;
3493 of_property_for_each_string(dev->of_node, "clock-names",
3494 prop, cname) {
3495 struct clk *c = devm_clk_get(dev, cname);
3496
3497 if (IS_ERR(c)) {
3498 dev_err(dev, "Couldn't get clock: %s",
3499 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003500 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003501 }
3502
3503 if (clk_get_rate(c) == 0) {
3504 long rate = clk_round_rate(c, 1000);
3505
3506 clk_set_rate(c, rate);
3507 }
3508
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003509 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003510
3511 ++i;
3512 }
3513 return 0;
3514}
3515
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003516static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003517{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003518 const char *cname;
3519 struct property *prop;
3520 int i, ret = 0;
3521 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003522
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003523 pwr->num_gdscs =
3524 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3525
3526 if (pwr->num_gdscs < 1) {
3527 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003528 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003529 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003530
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003531 pwr->gdscs = devm_kzalloc(
3532 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3533
3534 if (!pwr->gdscs)
3535 return -ENOMEM;
3536
3537 i = 0;
3538 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3539 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07003540 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003541
3542 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3543 return ret;
3544}
3545
3546static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3547{
3548 struct device *dev = pwr->dev;
3549
3550 /* We don't want the bus APIs to print an error message */
3551 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3552 dev_dbg(dev, "No bus scaling info\n");
3553 return 0;
3554 }
3555
3556 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3557 if (!pwr->bus_dt_data) {
3558 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3559 return -EINVAL;
3560 }
3561
3562 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3563 if (!pwr->bus_client) {
3564 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003565 return -EINVAL;
3566 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003567
3568 return 0;
3569}
3570
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003571/*
3572 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3573 */
3574static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3575 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003576{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003577 struct arm_smmu_power_resources *pwr;
3578 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003579
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003580 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3581 if (!pwr)
3582 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003583
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003584 pwr->dev = &pdev->dev;
3585 pwr->pdev = pdev;
3586 mutex_init(&pwr->power_lock);
3587 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003588
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003589 ret = arm_smmu_init_clocks(pwr);
3590 if (ret)
3591 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003592
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003593 ret = arm_smmu_init_regulators(pwr);
3594 if (ret)
3595 return ERR_PTR(ret);
3596
3597 ret = arm_smmu_init_bus_scaling(pwr);
3598 if (ret)
3599 return ERR_PTR(ret);
3600
3601 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003602}
3603
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003604/*
Patrick Dalyabeee952017-04-13 18:14:59 -07003605 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003606 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003607static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003608{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003609 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003610}
3611
Will Deacon45ae7cf2013-06-24 18:31:25 +01003612static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3613{
3614 unsigned long size;
3615 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3616 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003617 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003618 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003619
Mitchel Humpherysba822582015-10-20 11:37:41 -07003620 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3621 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003622 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003623
3624 /* ID0 */
3625 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003626
3627 /* Restrict available stages based on module parameter */
3628 if (force_stage == 1)
3629 id &= ~(ID0_S2TS | ID0_NTS);
3630 else if (force_stage == 2)
3631 id &= ~(ID0_S1TS | ID0_NTS);
3632
Will Deacon45ae7cf2013-06-24 18:31:25 +01003633 if (id & ID0_S1TS) {
3634 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003635 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003636 }
3637
3638 if (id & ID0_S2TS) {
3639 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003640 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003641 }
3642
3643 if (id & ID0_NTS) {
3644 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003645 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003646 }
3647
3648 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003649 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003650 dev_err(smmu->dev, "\tno translation support!\n");
3651 return -ENODEV;
3652 }
3653
Robin Murphyb7862e32016-04-13 18:13:03 +01003654 if ((id & ID0_S1TS) &&
3655 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003656 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003657 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003658 }
3659
Robin Murphybae2c2d2015-07-29 19:46:05 +01003660 /*
3661 * In order for DMA API calls to work properly, we must defer to what
3662 * the DT says about coherency, regardless of what the hardware claims.
3663 * Fortunately, this also opens up a workaround for systems where the
3664 * ID register value has ended up configured incorrectly.
3665 */
3666 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3667 cttw_reg = !!(id & ID0_CTTW);
3668 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003669 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003670 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003671 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003672 cttw_dt ? "" : "non-");
3673 if (cttw_dt != cttw_reg)
3674 dev_notice(smmu->dev,
3675 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003676
Robin Murphy53867802016-09-12 17:13:48 +01003677 /* Max. number of entries we have for stream matching/indexing */
3678 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3679 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003680 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003681 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08003682 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003683
3684 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003685 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3686 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003687 dev_err(smmu->dev,
3688 "stream-matching supported, but no SMRs present!\n");
3689 return -ENODEV;
3690 }
3691
Robin Murphy53867802016-09-12 17:13:48 +01003692 /*
3693 * SMR.ID bits may not be preserved if the corresponding MASK
3694 * bits are set, so check each one separately. We can reject
3695 * masters later if they try to claim IDs outside these masks.
3696 */
Patrick Daly937de532016-12-12 18:44:09 -08003697 for (i = 0; i < size; i++) {
3698 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
3699 if (!(smr & SMR_VALID))
3700 break;
3701 }
3702 if (i == size) {
3703 dev_err(smmu->dev,
3704 "Unable to compute streamid_masks\n");
3705 return -ENODEV;
3706 }
3707
Robin Murphy53867802016-09-12 17:13:48 +01003708 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003709 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3710 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003711 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003712
Robin Murphy53867802016-09-12 17:13:48 +01003713 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003714 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3715 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003716 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003717
Robin Murphy468f4942016-09-12 17:13:49 +01003718 /* Zero-initialised to mark as invalid */
3719 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3720 GFP_KERNEL);
3721 if (!smmu->smrs)
3722 return -ENOMEM;
3723
Robin Murphy53867802016-09-12 17:13:48 +01003724 dev_notice(smmu->dev,
3725 "\tstream matching with %lu register groups, mask 0x%x",
3726 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003727 }
Robin Murphya754fd12016-09-12 17:13:50 +01003728 /* s2cr->type == 0 means translation, so initialise explicitly */
3729 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3730 GFP_KERNEL);
3731 if (!smmu->s2crs)
3732 return -ENOMEM;
3733 for (i = 0; i < size; i++)
3734 smmu->s2crs[i] = s2cr_init_val;
3735
Robin Murphy53867802016-09-12 17:13:48 +01003736 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003737 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003738
Robin Murphy7602b872016-04-28 17:12:09 +01003739 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3740 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3741 if (!(id & ID0_PTFS_NO_AARCH32S))
3742 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3743 }
3744
Will Deacon45ae7cf2013-06-24 18:31:25 +01003745 /* ID1 */
3746 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003747 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003748
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003749 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003750 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003751 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003752 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003753 dev_warn(smmu->dev,
3754 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3755 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003756
Will Deacon518f7132014-11-14 17:17:54 +00003757 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003758 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3759 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3760 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3761 return -ENODEV;
3762 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003763 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003764 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003765 /*
3766 * Cavium CN88xx erratum #27704.
3767 * Ensure ASID and VMID allocation is unique across all SMMUs in
3768 * the system.
3769 */
3770 if (smmu->model == CAVIUM_SMMUV2) {
3771 smmu->cavium_id_base =
3772 atomic_add_return(smmu->num_context_banks,
3773 &cavium_smmu_context_count);
3774 smmu->cavium_id_base -= smmu->num_context_banks;
3775 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003776
3777 /* ID2 */
3778 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3779 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003780 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003781
Will Deacon518f7132014-11-14 17:17:54 +00003782 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003783 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003784 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003785
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003786 if (id & ID2_VMID16)
3787 smmu->features |= ARM_SMMU_FEAT_VMID16;
3788
Robin Murphyf1d84542015-03-04 16:41:05 +00003789 /*
3790 * What the page table walker can address actually depends on which
3791 * descriptor format is in use, but since a) we don't know that yet,
3792 * and b) it can vary per context bank, this will have to do...
3793 */
3794 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3795 dev_warn(smmu->dev,
3796 "failed to set DMA mask for table walker\n");
3797
Robin Murphyb7862e32016-04-13 18:13:03 +01003798 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003799 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003800 if (smmu->version == ARM_SMMU_V1_64K)
3801 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003802 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003803 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003804 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003805 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003806 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003807 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003808 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003809 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003810 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003811 }
3812
Robin Murphy7602b872016-04-28 17:12:09 +01003813 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003814 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003815 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003816 if (smmu->features &
3817 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003818 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003819 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003820 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003821 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003822 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003823
Robin Murphyd5466352016-05-09 17:20:09 +01003824 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3825 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3826 else
3827 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003828 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003829 smmu->pgsize_bitmap);
3830
Will Deacon518f7132014-11-14 17:17:54 +00003831
Will Deacon28d60072014-09-01 16:24:48 +01003832 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003833 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3834 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003835
3836 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003837 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3838 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003839
Will Deacon45ae7cf2013-06-24 18:31:25 +01003840 return 0;
3841}
3842
Patrick Dalyd7476202016-09-08 18:23:28 -07003843static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
3844{
3845 if (!smmu->arch_ops)
3846 return 0;
3847 if (!smmu->arch_ops->init)
3848 return 0;
3849 return smmu->arch_ops->init(smmu);
3850}
3851
3852static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
3853{
3854 if (!smmu->arch_ops)
3855 return;
3856 if (!smmu->arch_ops->device_reset)
3857 return;
3858 return smmu->arch_ops->device_reset(smmu);
3859}
3860
Robin Murphy67b65a32016-04-13 18:12:57 +01003861struct arm_smmu_match_data {
3862 enum arm_smmu_arch_version version;
3863 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003864 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01003865};
3866
Patrick Dalyd7476202016-09-08 18:23:28 -07003867#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
3868static struct arm_smmu_match_data name = { \
3869.version = ver, \
3870.model = imp, \
3871.arch_ops = ops, \
3872} \
Robin Murphy67b65a32016-04-13 18:12:57 +01003873
Patrick Daly1f8a2882016-09-12 17:32:05 -07003874struct arm_smmu_arch_ops qsmmuv500_arch_ops;
3875
Patrick Dalyd7476202016-09-08 18:23:28 -07003876ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
3877ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
3878ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
3879ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
3880ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003881ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07003882ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
3883 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01003884
Joerg Roedel09b52692014-10-02 12:24:45 +02003885static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003886 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3887 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3888 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003889 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003890 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003891 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003892 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07003893 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01003894 { },
3895};
3896MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3897
Patrick Dalyc47dcd42017-02-09 23:09:57 -08003898
3899static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
3900{
3901 if (!dev->iommu_fwspec)
3902 of_iommu_configure(dev, dev->of_node);
3903 return 0;
3904}
3905
Patrick Daly000a2f22017-02-13 22:18:12 -08003906static int arm_smmu_add_device_fixup(struct device *dev, void *data)
3907{
3908 struct iommu_ops *ops = data;
3909
3910 ops->add_device(dev);
3911 return 0;
3912}
3913
Patrick Daly1f8a2882016-09-12 17:32:05 -07003914static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003915static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3916{
Robin Murphy67b65a32016-04-13 18:12:57 +01003917 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003918 struct resource *res;
3919 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003920 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01003921 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01003922 bool legacy_binding;
3923
3924 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
3925 if (legacy_binding && !using_generic_binding) {
3926 if (!using_legacy_binding)
3927 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
3928 using_legacy_binding = true;
3929 } else if (!legacy_binding && !using_legacy_binding) {
3930 using_generic_binding = true;
3931 } else {
3932 dev_err(dev, "not probing due to mismatched DT properties\n");
3933 return -ENODEV;
3934 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003935
3936 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3937 if (!smmu) {
3938 dev_err(dev, "failed to allocate arm_smmu_device\n");
3939 return -ENOMEM;
3940 }
3941 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003942 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003943 idr_init(&smmu->asid_idr);
3944 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003945
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003946 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01003947 smmu->version = data->version;
3948 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003949 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01003950
Will Deacon45ae7cf2013-06-24 18:31:25 +01003951 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003952 smmu->base = devm_ioremap_resource(dev, res);
3953 if (IS_ERR(smmu->base))
3954 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003955 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003956
3957 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3958 &smmu->num_global_irqs)) {
3959 dev_err(dev, "missing #global-interrupts property\n");
3960 return -ENODEV;
3961 }
3962
3963 num_irqs = 0;
3964 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3965 num_irqs++;
3966 if (num_irqs > smmu->num_global_irqs)
3967 smmu->num_context_irqs++;
3968 }
3969
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003970 if (!smmu->num_context_irqs) {
3971 dev_err(dev, "found %d interrupts but expected at least %d\n",
3972 num_irqs, smmu->num_global_irqs + 1);
3973 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003974 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003975
3976 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3977 GFP_KERNEL);
3978 if (!smmu->irqs) {
3979 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3980 return -ENOMEM;
3981 }
3982
3983 for (i = 0; i < num_irqs; ++i) {
3984 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003985
Will Deacon45ae7cf2013-06-24 18:31:25 +01003986 if (irq < 0) {
3987 dev_err(dev, "failed to get irq index %d\n", i);
3988 return -ENODEV;
3989 }
3990 smmu->irqs[i] = irq;
3991 }
3992
Dhaval Patel031d7462015-05-09 14:47:29 -07003993 parse_driver_options(smmu);
3994
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003995 smmu->pwr = arm_smmu_init_power_resources(pdev);
3996 if (IS_ERR(smmu->pwr))
3997 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003998
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003999 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004000 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004001 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004002
4003 err = arm_smmu_device_cfg_probe(smmu);
4004 if (err)
4005 goto out_power_off;
4006
Patrick Dalyda688822017-05-17 20:12:48 -07004007 err = arm_smmu_handoff_cbs(smmu);
4008 if (err)
4009 goto out_power_off;
4010
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004011 err = arm_smmu_parse_impl_def_registers(smmu);
4012 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004013 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004014
Robin Murphyb7862e32016-04-13 18:13:03 +01004015 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004016 smmu->num_context_banks != smmu->num_context_irqs) {
4017 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004018 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4019 smmu->num_context_irqs, smmu->num_context_banks,
4020 smmu->num_context_banks);
4021 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004022 }
4023
Will Deacon45ae7cf2013-06-24 18:31:25 +01004024 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004025 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4026 NULL, arm_smmu_global_fault,
4027 IRQF_ONESHOT | IRQF_SHARED,
4028 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004029 if (err) {
4030 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4031 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004032 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004033 }
4034 }
4035
Patrick Dalyd7476202016-09-08 18:23:28 -07004036 err = arm_smmu_arch_init(smmu);
4037 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004038 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004039
Robin Murphy06e393e2016-09-12 17:13:55 +01004040 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004041 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004042 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004043 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004044
Patrick Daly8e3371a2017-02-13 22:14:53 -08004045 INIT_LIST_HEAD(&smmu->list);
4046 spin_lock(&arm_smmu_devices_lock);
4047 list_add(&smmu->list, &arm_smmu_devices);
4048 spin_unlock(&arm_smmu_devices_lock);
4049
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004050 /* bus_set_iommu depends on this. */
4051 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4052 arm_smmu_of_iommu_configure_fixup);
4053
Robin Murphy7e96c742016-09-14 15:26:46 +01004054 /* Oh, for a proper bus abstraction */
4055 if (!iommu_present(&platform_bus_type))
4056 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004057 else
4058 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4059 arm_smmu_add_device_fixup);
Robin Murphy7e96c742016-09-14 15:26:46 +01004060#ifdef CONFIG_ARM_AMBA
4061 if (!iommu_present(&amba_bustype))
4062 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4063#endif
4064#ifdef CONFIG_PCI
4065 if (!iommu_present(&pci_bus_type)) {
4066 pci_request_acs();
4067 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4068 }
4069#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004070 return 0;
4071
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004072out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004073 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004074
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004075out_exit_power_resources:
4076 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004077
Will Deacon45ae7cf2013-06-24 18:31:25 +01004078 return err;
4079}
4080
4081static int arm_smmu_device_remove(struct platform_device *pdev)
4082{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004083 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004084
4085 if (!smmu)
4086 return -ENODEV;
4087
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004088 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004089 return -EINVAL;
4090
Will Deaconecfadb62013-07-31 19:21:28 +01004091 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004092 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004093
Patrick Dalyc190d932016-08-30 17:23:28 -07004094 idr_destroy(&smmu->asid_idr);
4095
Will Deacon45ae7cf2013-06-24 18:31:25 +01004096 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004097 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004098 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004099
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004100 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004101
Will Deacon45ae7cf2013-06-24 18:31:25 +01004102 return 0;
4103}
4104
Will Deacon45ae7cf2013-06-24 18:31:25 +01004105static struct platform_driver arm_smmu_driver = {
4106 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004107 .name = "arm-smmu",
4108 .of_match_table = of_match_ptr(arm_smmu_of_match),
4109 },
4110 .probe = arm_smmu_device_dt_probe,
4111 .remove = arm_smmu_device_remove,
4112};
4113
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004114static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004115static int __init arm_smmu_init(void)
4116{
Robin Murphy7e96c742016-09-14 15:26:46 +01004117 static bool registered;
4118 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004119
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004120 if (registered)
4121 return 0;
4122
4123 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4124 if (ret)
4125 return ret;
4126
4127 ret = platform_driver_register(&arm_smmu_driver);
4128 registered = !ret;
Robin Murphy7e96c742016-09-14 15:26:46 +01004129 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004130}
4131
4132static void __exit arm_smmu_exit(void)
4133{
4134 return platform_driver_unregister(&arm_smmu_driver);
4135}
4136
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004137subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004138module_exit(arm_smmu_exit);
4139
Robin Murphy7e96c742016-09-14 15:26:46 +01004140static int __init arm_smmu_of_init(struct device_node *np)
4141{
4142 int ret = arm_smmu_init();
4143
4144 if (ret)
4145 return ret;
4146
4147 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4148 return -ENODEV;
4149
4150 return 0;
4151}
4152IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4153IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4154IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4155IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4156IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4157IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004158
Patrick Dalya0fddb62017-03-27 19:26:59 -07004159#define TCU_HW_VERSION_HLOS1 (0x18)
4160
Patrick Daly1f8a2882016-09-12 17:32:05 -07004161#define DEBUG_SID_HALT_REG 0x0
4162#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004163#define DEBUG_SID_HALT_SID_MASK 0x3ff
4164
4165#define DEBUG_VA_ADDR_REG 0x8
4166
4167#define DEBUG_TXN_TRIGG_REG 0x18
4168#define DEBUG_TXN_AXPROT_SHIFT 6
4169#define DEBUG_TXN_AXCACHE_SHIFT 2
4170#define DEBUG_TRX_WRITE (0x1 << 1)
4171#define DEBUG_TXN_READ (0x0 << 1)
4172#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004173
4174#define DEBUG_SR_HALT_ACK_REG 0x20
4175#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004176#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4177
4178#define DEBUG_PAR_REG 0x28
4179#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4180#define DEBUG_PAR_PA_SHIFT 12
4181#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004182
4183#define TBU_DBG_TIMEOUT_US 30000
4184
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004185struct qsmmuv500_archdata {
4186 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004187 void __iomem *tcu_base;
4188 u32 version;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004189};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004190#define get_qsmmuv500_archdata(smmu) \
4191 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004192
Patrick Daly1f8a2882016-09-12 17:32:05 -07004193struct qsmmuv500_tbu_device {
4194 struct list_head list;
4195 struct device *dev;
4196 struct arm_smmu_device *smmu;
4197 void __iomem *base;
4198 void __iomem *status_reg;
4199
4200 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004201 u32 sid_start;
4202 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004203
4204 /* Protects halt count */
4205 spinlock_t halt_lock;
4206 u32 halt_count;
4207};
4208
4209static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
4210{
4211 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004212 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004213 int ret = 0;
4214
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004215 list_for_each_entry(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004216 ret = arm_smmu_power_on(tbu->pwr);
4217 if (ret)
4218 break;
4219 }
4220 if (!ret)
4221 return 0;
4222
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004223 list_for_each_entry_continue_reverse(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004224 arm_smmu_power_off(tbu->pwr);
4225 }
4226 return ret;
4227}
4228
4229static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
4230{
4231 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004232 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004233
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004234 list_for_each_entry_reverse(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004235 arm_smmu_power_off(tbu->pwr);
4236 }
4237}
4238
4239static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
4240{
4241 unsigned long flags;
4242 u32 val;
4243 void __iomem *base;
4244
4245 spin_lock_irqsave(&tbu->halt_lock, flags);
4246 if (tbu->halt_count) {
4247 tbu->halt_count++;
4248 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4249 return 0;
4250 }
4251
4252 base = tbu->base;
4253 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4254 val |= DEBUG_SID_HALT_VAL;
4255 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4256
4257 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
4258 val, (val & DEBUG_SR_HALT_ACK_VAL),
4259 0, TBU_DBG_TIMEOUT_US)) {
4260 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4261 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4262 return -ETIMEDOUT;
4263 }
4264
4265 tbu->halt_count = 1;
4266 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4267 return 0;
4268}
4269
4270static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4271{
4272 unsigned long flags;
4273 u32 val;
4274 void __iomem *base;
4275
4276 spin_lock_irqsave(&tbu->halt_lock, flags);
4277 if (!tbu->halt_count) {
4278 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4279 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4280 return;
4281
4282 } else if (tbu->halt_count > 1) {
4283 tbu->halt_count--;
4284 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4285 return;
4286 }
4287
4288 base = tbu->base;
4289 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4290 val &= ~DEBUG_SID_HALT_VAL;
4291 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4292
4293 tbu->halt_count = 0;
4294 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4295}
4296
4297static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
4298{
4299 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004300 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004301 int ret = 0;
4302
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004303 list_for_each_entry(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004304 ret = qsmmuv500_tbu_halt(tbu);
4305 if (ret)
4306 break;
4307 }
4308
4309 if (!ret)
4310 return 0;
4311
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004312 list_for_each_entry_continue_reverse(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004313 qsmmuv500_tbu_resume(tbu);
4314 }
4315 return ret;
4316}
4317
4318static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
4319{
4320 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004321 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004322
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004323 list_for_each_entry(tbu, &data->tbus, list) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004324 qsmmuv500_tbu_resume(tbu);
4325 }
4326}
4327
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004328static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4329 struct arm_smmu_device *smmu, u32 sid)
4330{
4331 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004332 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004333
4334 list_for_each_entry(tbu, &data->tbus, list) {
4335 if (tbu->sid_start <= sid &&
4336 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004337 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004338 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004339 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004340}
4341
Patrick Daly1f8a2882016-09-12 17:32:05 -07004342static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
4343{
4344 int i, ret;
4345 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
4346
4347 ret = qsmmuv500_tbu_power_on_all(smmu);
4348 if (ret)
4349 return;
4350
4351 /* Program implementation defined registers */
4352 qsmmuv500_halt_all(smmu);
4353 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
4354 writel_relaxed(regs[i].value,
4355 ARM_SMMU_GR0(smmu) + regs[i].offset);
4356 qsmmuv500_resume_all(smmu);
4357 qsmmuv500_tbu_power_off_all(smmu);
4358}
4359
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004360static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
4361 struct qsmmuv500_tbu_device *tbu,
4362 unsigned long *flags)
4363{
4364 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004365 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004366 u32 val;
4367
4368 spin_lock_irqsave(&smmu->atos_lock, *flags);
4369 /* The status register is not accessible on version 1.0 */
4370 if (data->version == 0x01000000)
4371 return 0;
4372
4373 if (readl_poll_timeout_atomic(tbu->status_reg,
4374 val, (val == 0x1), 0,
4375 TBU_DBG_TIMEOUT_US)) {
4376 dev_err(tbu->dev, "ECATS hw busy!\n");
4377 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4378 return -ETIMEDOUT;
4379 }
4380
4381 return 0;
4382}
4383
4384static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
4385 struct qsmmuv500_tbu_device *tbu,
4386 unsigned long *flags)
4387{
4388 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004389 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004390
4391 /* The status register is not accessible on version 1.0 */
4392 if (data->version != 0x01000000)
4393 writel_relaxed(0, tbu->status_reg);
4394 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4395}
4396
4397/*
4398 * Zero means failure.
4399 */
4400static phys_addr_t qsmmuv500_iova_to_phys(
4401 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
4402{
4403 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4404 struct arm_smmu_device *smmu = smmu_domain->smmu;
4405 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4406 struct qsmmuv500_tbu_device *tbu;
4407 int ret;
4408 phys_addr_t phys = 0;
4409 u64 val, fsr;
4410 unsigned long flags;
4411 void __iomem *cb_base;
4412 u32 sctlr_orig, sctlr;
4413 int needs_redo = 0;
4414
4415 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4416 tbu = qsmmuv500_find_tbu(smmu, sid);
4417 if (!tbu)
4418 return 0;
4419
4420 ret = arm_smmu_power_on(tbu->pwr);
4421 if (ret)
4422 return 0;
4423
4424 /*
4425 * Disable client transactions & wait for existing operations to
4426 * complete.
4427 */
4428 ret = qsmmuv500_tbu_halt(tbu);
4429 if (ret)
4430 goto out_power_off;
4431
4432 /* Only one concurrent atos operation */
4433 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
4434 if (ret)
4435 goto out_resume;
4436
4437 /*
4438 * We can be called from an interrupt handler with FSR already set
4439 * so terminate the faulting transaction prior to starting ecats.
4440 * No new racing faults can occur since we in the halted state.
4441 * ECATS can trigger the fault interrupt, so disable it temporarily
4442 * and check for an interrupt manually.
4443 */
4444 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4445 if (fsr & FSR_FAULT) {
4446 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4447 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4448 }
4449 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4450 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4451 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4452
4453redo:
4454 /* Set address and stream-id */
4455 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
4456 val |= sid & DEBUG_SID_HALT_SID_MASK;
4457 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
4458 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
4459
4460 /*
4461 * Write-back Read and Write-Allocate
4462 * Priviledged, nonsecure, data transaction
4463 * Read operation.
4464 */
4465 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
4466 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
4467 val |= DEBUG_TXN_TRIGGER;
4468 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
4469
4470 ret = 0;
4471 if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG,
4472 val, !(val & DEBUG_SR_ECATS_RUNNING_VAL),
4473 0, TBU_DBG_TIMEOUT_US)) {
4474 dev_err(tbu->dev, "ECATS translation timed out!\n");
4475 }
4476
4477 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4478 if (fsr & FSR_FAULT) {
4479 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
4480 val);
4481 ret = -EINVAL;
4482
4483 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
4484 /*
4485 * Clear pending interrupts
4486 * Barrier required to ensure that the FSR is cleared
4487 * before resuming SMMU operation
4488 */
4489 wmb();
4490 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4491 }
4492
4493 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
4494 if (val & DEBUG_PAR_FAULT_VAL) {
4495 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
4496 val);
4497 ret = -EINVAL;
4498 }
4499
4500 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
4501 if (ret < 0)
4502 phys = 0;
4503
4504 /* Reset hardware */
4505 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
4506 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
4507
4508 /*
4509 * After a failed translation, the next successful translation will
4510 * incorrectly be reported as a failure.
4511 */
4512 if (!phys && needs_redo++ < 2)
4513 goto redo;
4514
4515 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4516 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
4517
4518out_resume:
4519 qsmmuv500_tbu_resume(tbu);
4520
4521out_power_off:
4522 arm_smmu_power_off(tbu->pwr);
4523
4524 return phys;
4525}
4526
4527static phys_addr_t qsmmuv500_iova_to_phys_hard(
4528 struct iommu_domain *domain, dma_addr_t iova)
4529{
4530 u16 sid;
4531 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4532 struct iommu_fwspec *fwspec;
4533
4534 /* Select a sid */
4535 fwspec = smmu_domain->dev->iommu_fwspec;
4536 sid = (u16)fwspec->ids[0];
4537
4538 return qsmmuv500_iova_to_phys(domain, iova, sid);
4539}
4540
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004541static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004542{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004543 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004544 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004545 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004546
4547 if (!dev->driver) {
4548 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4549 return -EINVAL;
4550 }
4551
4552 tbu = dev_get_drvdata(dev);
4553
4554 INIT_LIST_HEAD(&tbu->list);
4555 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004556 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004557 return 0;
4558}
4559
4560static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4561{
Patrick Dalya0fddb62017-03-27 19:26:59 -07004562 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004563 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004564 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004565 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004566 int ret;
4567
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004568 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
4569 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004570 return -ENOMEM;
4571
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004572 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07004573
4574 pdev = container_of(dev, struct platform_device, dev);
4575 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
4576 data->tcu_base = devm_ioremap_resource(dev, res);
4577 if (IS_ERR(data->tcu_base))
4578 return PTR_ERR(data->tcu_base);
4579
4580 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004581 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004582
4583 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4584 if (ret)
4585 return ret;
4586
4587 /* Attempt to register child devices */
4588 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
4589 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07004590 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004591
4592 return 0;
4593}
4594
4595struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
4596 .init = qsmmuv500_arch_init,
4597 .device_reset = qsmmuv500_device_reset,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004598 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly1f8a2882016-09-12 17:32:05 -07004599};
4600
4601static const struct of_device_id qsmmuv500_tbu_of_match[] = {
4602 {.compatible = "qcom,qsmmuv500-tbu"},
4603 {}
4604};
4605
4606static int qsmmuv500_tbu_probe(struct platform_device *pdev)
4607{
4608 struct resource *res;
4609 struct device *dev = &pdev->dev;
4610 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004611 const __be32 *cell;
4612 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004613
4614 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
4615 if (!tbu)
4616 return -ENOMEM;
4617
4618 INIT_LIST_HEAD(&tbu->list);
4619 tbu->dev = dev;
4620 spin_lock_init(&tbu->halt_lock);
4621
4622 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
4623 tbu->base = devm_ioremap_resource(dev, res);
4624 if (IS_ERR(tbu->base))
4625 return PTR_ERR(tbu->base);
4626
4627 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
4628 tbu->status_reg = devm_ioremap_resource(dev, res);
4629 if (IS_ERR(tbu->status_reg))
4630 return PTR_ERR(tbu->status_reg);
4631
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004632 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
4633 if (!cell || len < 8)
4634 return -EINVAL;
4635
4636 tbu->sid_start = of_read_number(cell, 1);
4637 tbu->num_sids = of_read_number(cell + 1, 1);
4638
Patrick Daly1f8a2882016-09-12 17:32:05 -07004639 tbu->pwr = arm_smmu_init_power_resources(pdev);
4640 if (IS_ERR(tbu->pwr))
4641 return PTR_ERR(tbu->pwr);
4642
4643 dev_set_drvdata(dev, tbu);
4644 return 0;
4645}
4646
4647static struct platform_driver qsmmuv500_tbu_driver = {
4648 .driver = {
4649 .name = "qsmmuv500-tbu",
4650 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
4651 },
4652 .probe = qsmmuv500_tbu_probe,
4653};
4654
Will Deacon45ae7cf2013-06-24 18:31:25 +01004655MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
4656MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
4657MODULE_LICENSE("GPL v2");