blob: d49fd55a4fb903db63956e88041281d7a0fe4401 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070050#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070051#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070052#include <linux/msm-bus.h>
53#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55#include <linux/amba/bus.h>
56
Will Deacon518f7132014-11-14 17:17:54 +000057#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* Maximum number of context banks per SMMU */
60#define ARM_SMMU_MAX_CBS 128
61
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* SMMU global address space */
63#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010064#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000066/*
67 * SMMU global address space with conditional offset to access secure
68 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 * nsGFSYNR0: 0x450)
70 */
71#define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu)->base + \
73 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
74 ? 0x400 : 0))
75
Robin Murphyf9a05f02016-04-13 18:13:01 +010076/*
77 * Some 64-bit registers only make sense to write atomically, but in such
78 * cases all the data relevant to AArch32 formats lies within the lower word,
79 * therefore this actually makes more sense than it might first appear.
80 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#endif
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087/* Configuration registers */
88#define ARM_SMMU_GR0_sCR0 0x0
89#define sCR0_CLIENTPD (1 << 0)
90#define sCR0_GFRE (1 << 1)
91#define sCR0_GFIE (1 << 2)
92#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100131#define ID0_NUMSMRG_SHIFT 0
132#define ID0_NUMSMRG_MASK 0xff
133
134#define ID1_PAGESIZE (1 << 31)
135#define ID1_NUMPAGENDXB_SHIFT 28
136#define ID1_NUMPAGENDXB_MASK 7
137#define ID1_NUMS2CB_SHIFT 16
138#define ID1_NUMS2CB_MASK 0xff
139#define ID1_NUMCB_SHIFT 0
140#define ID1_NUMCB_MASK 0xff
141
142#define ID2_OAS_SHIFT 4
143#define ID2_OAS_MASK 0xf
144#define ID2_IAS_SHIFT 0
145#define ID2_IAS_MASK 0xf
146#define ID2_UBS_SHIFT 8
147#define ID2_UBS_MASK 0xf
148#define ID2_PTFS_4K (1 << 12)
149#define ID2_PTFS_16K (1 << 13)
150#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800151#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152
Peng Fan3ca37122016-05-03 21:50:30 +0800153#define ID7_MAJOR_SHIFT 4
154#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157#define ARM_SMMU_GR0_TLBIVMID 0x64
158#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
159#define ARM_SMMU_GR0_TLBIALLH 0x6c
160#define ARM_SMMU_GR0_sTLBGSYNC 0x70
161#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
162#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800163#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
165/* Stream mapping registers */
166#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
167#define SMR_VALID (1 << 31)
168#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700169#define SMR_MASK_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100177enum arm_smmu_s2cr_type {
178 S2CR_TYPE_TRANS,
179 S2CR_TYPE_BYPASS,
180 S2CR_TYPE_FAULT,
181};
182
183#define S2CR_PRIVCFG_SHIFT 24
184#define S2CR_PRIVCFG_MASK 0x3
185enum arm_smmu_s2cr_privcfg {
186 S2CR_PRIVCFG_DEFAULT,
187 S2CR_PRIVCFG_DIPAN,
188 S2CR_PRIVCFG_UNPRIV,
189 S2CR_PRIVCFG_PRIV,
190};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191
192/* Context bank attribute registers */
193#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
194#define CBAR_VMID_SHIFT 0
195#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000196#define CBAR_S1_BPSHCFG_SHIFT 8
197#define CBAR_S1_BPSHCFG_MASK 3
198#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100199#define CBAR_S1_MEMATTR_SHIFT 12
200#define CBAR_S1_MEMATTR_MASK 0xf
201#define CBAR_S1_MEMATTR_WB 0xf
202#define CBAR_TYPE_SHIFT 16
203#define CBAR_TYPE_MASK 0x3
204#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
205#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
206#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
207#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
208#define CBAR_IRPTNDX_SHIFT 24
209#define CBAR_IRPTNDX_MASK 0xff
210
Shalaj Jain04059c52015-03-03 13:34:59 -0800211#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
212#define CBFRSYNRA_SID_MASK (0xffff)
213
Will Deacon45ae7cf2013-06-24 18:31:25 +0100214#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
215#define CBA2R_RW64_32BIT (0 << 0)
216#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800217#define CBA2R_VMID_SHIFT 16
218#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219
220/* Translation context bank */
221#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100222#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223
224#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100225#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_RESUME 0x8
227#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100228#define ARM_SMMU_CB_TTBR0 0x20
229#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600231#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100232#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000233#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100234#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700236#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100237#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100238#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000239#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100240#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700241#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000242#define ARM_SMMU_CB_S1_TLBIVAL 0x620
243#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
244#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700245#define ARM_SMMU_CB_TLBSYNC 0x7f0
246#define ARM_SMMU_CB_TLBSTATUS 0x7f4
247#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100248#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000249#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100250
251#define SCTLR_S1_ASIDPNE (1 << 12)
252#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530253#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100254#define SCTLR_CFIE (1 << 6)
255#define SCTLR_CFRE (1 << 5)
256#define SCTLR_E (1 << 4)
257#define SCTLR_AFE (1 << 2)
258#define SCTLR_TRE (1 << 1)
259#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100260
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100261#define ARM_MMU500_ACTLR_CPRE (1 << 1)
262
Peng Fan3ca37122016-05-03 21:50:30 +0800263#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
264
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700265#define ARM_SMMU_IMPL_DEF0(smmu) \
266 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
267#define ARM_SMMU_IMPL_DEF1(smmu) \
268 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000269#define CB_PAR_F (1 << 0)
270
271#define ATSR_ACTIVE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define RESUME_RETRY (0 << 0)
274#define RESUME_TERMINATE (1 << 0)
275
Will Deacon45ae7cf2013-06-24 18:31:25 +0100276#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100277#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100279#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280
281#define FSR_MULTI (1 << 31)
282#define FSR_SS (1 << 30)
283#define FSR_UUT (1 << 8)
284#define FSR_ASF (1 << 7)
285#define FSR_TLBLKF (1 << 6)
286#define FSR_TLBMCF (1 << 5)
287#define FSR_EF (1 << 4)
288#define FSR_PF (1 << 3)
289#define FSR_AFF (1 << 2)
290#define FSR_TF (1 << 1)
291
Mitchel Humpherys29073202014-07-08 09:52:18 -0700292#define FSR_IGN (FSR_AFF | FSR_ASF | \
293 FSR_TLBMCF | FSR_TLBLKF)
294#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100295 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
297#define FSYNR0_WNR (1 << 4)
298
Will Deacon4cf740b2014-07-14 19:47:39 +0100299static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000300module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100301MODULE_PARM_DESC(force_stage,
302 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800303static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000304module_param(disable_bypass, bool, S_IRUGO);
305MODULE_PARM_DESC(disable_bypass,
306 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100307
Robin Murphy09360402014-08-28 17:51:59 +0100308enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100309 ARM_SMMU_V1,
310 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100311 ARM_SMMU_V2,
312};
313
Robin Murphy67b65a32016-04-13 18:12:57 +0100314enum arm_smmu_implementation {
315 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100316 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100317 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700318 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700319 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100320};
321
abickett8d352ff2017-09-01 10:29:23 -0700322struct arm_smmu_device;
323struct arm_smmu_arch_ops {
324 int (*init)(struct arm_smmu_device *smmu);
325 void (*device_reset)(struct arm_smmu_device *smmu);
326 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
327 dma_addr_t iova);
328};
329
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700330struct arm_smmu_impl_def_reg {
331 u32 offset;
332 u32 value;
333};
334
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700335/*
336 * attach_count
337 * The SMR and S2CR registers are only programmed when the number of
338 * devices attached to the iommu using these registers is > 0. This
339 * is required for the "SID switch" use case for secure display.
340 * Protected by stream_map_mutex.
341 */
Robin Murphya754fd12016-09-12 17:13:50 +0100342struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100343 struct iommu_group *group;
344 int count;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700345 int attach_count;
Robin Murphya754fd12016-09-12 17:13:50 +0100346 enum arm_smmu_s2cr_type type;
347 enum arm_smmu_s2cr_privcfg privcfg;
348 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700349 bool cb_handoff;
Robin Murphya754fd12016-09-12 17:13:50 +0100350};
351
352#define s2cr_init_val (struct arm_smmu_s2cr){ \
353 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700354 .cb_handoff = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100355}
356
Will Deacon45ae7cf2013-06-24 18:31:25 +0100357struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100358 u16 mask;
359 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100360 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100361};
362
Will Deacona9a1b0b2014-05-01 18:05:08 +0100363struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100364 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100365 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100366};
Robin Murphy468f4942016-09-12 17:13:49 +0100367#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100368#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
369#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000370#define fwspec_smendx(fw, i) \
371 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100372#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000373 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100374
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700375/*
376 * Describes resources required for on/off power operation.
377 * Separate reference count is provided for atomic/nonatomic
378 * operations.
379 */
380struct arm_smmu_power_resources {
381 struct platform_device *pdev;
382 struct device *dev;
383
384 struct clk **clocks;
385 int num_clocks;
386
387 struct regulator_bulk_data *gdscs;
388 int num_gdscs;
389
390 uint32_t bus_client;
391 struct msm_bus_scale_pdata *bus_dt_data;
392
393 /* Protects power_count */
394 struct mutex power_lock;
395 int power_count;
396
397 /* Protects clock_refs_count */
398 spinlock_t clock_refs_lock;
399 int clock_refs_count;
Prakash Guptafad87ca2017-05-16 12:13:02 +0530400 int regulator_defer;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700401};
402
Will Deacon45ae7cf2013-06-24 18:31:25 +0100403struct arm_smmu_device {
404 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100405
406 void __iomem *base;
407 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100408 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100409
410#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
411#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
412#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
413#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
414#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000415#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800416#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100417#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
418#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
419#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
420#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
421#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100422 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000423
424#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800425#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Patrick Daly59b6d202017-06-12 13:12:15 -0700426#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700427#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Patrick Daly4423d3e2017-05-04 18:17:51 -0700428#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
Patrick Dalye7069342017-07-11 12:35:55 -0700429#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
Patrick Daly62ba1922017-08-30 16:47:18 -0700430#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 6)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000431 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100432 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100433 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100434
435 u32 num_context_banks;
436 u32 num_s2_context_banks;
437 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
438 atomic_t irptndx;
439
440 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100441 u16 streamid_mask;
442 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100443 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100444 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100445 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100446
Will Deacon518f7132014-11-14 17:17:54 +0000447 unsigned long va_size;
448 unsigned long ipa_size;
449 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100450 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100451
452 u32 num_global_irqs;
453 u32 num_context_irqs;
454 unsigned int *irqs;
455
Patrick Daly8e3371a2017-02-13 22:14:53 -0800456 struct list_head list;
457
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800458 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700459 /* Specific to QCOM */
460 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
461 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800462
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700463 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700464
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800465 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700466
467 /* protects idr */
468 struct mutex idr_mutex;
469 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700470
471 struct arm_smmu_arch_ops *arch_ops;
472 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100473};
474
Robin Murphy7602b872016-04-28 17:12:09 +0100475enum arm_smmu_context_fmt {
476 ARM_SMMU_CTX_FMT_NONE,
477 ARM_SMMU_CTX_FMT_AARCH64,
478 ARM_SMMU_CTX_FMT_AARCH32_L,
479 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480};
481
482struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100483 u8 cbndx;
484 u8 irptndx;
485 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600486 u32 procid;
487 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100488 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100489};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100490#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600491#define INVALID_CBNDX 0xff
492#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700493/*
494 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
495 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
496 */
497#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100498
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600499#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800500#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100501
Will Deaconc752ce42014-06-25 22:46:31 +0100502enum arm_smmu_domain_stage {
503 ARM_SMMU_DOMAIN_S1 = 0,
504 ARM_SMMU_DOMAIN_S2,
505 ARM_SMMU_DOMAIN_NESTED,
506};
507
Patrick Dalyc11d1082016-09-01 15:52:44 -0700508struct arm_smmu_pte_info {
509 void *virt_addr;
510 size_t size;
511 struct list_head entry;
512};
513
Will Deacon45ae7cf2013-06-24 18:31:25 +0100514struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100515 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800516 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000517 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700518 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000519 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100520 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100521 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000522 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700523 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700524 u32 secure_vmid;
525 struct list_head pte_info_list;
526 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700527 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700528 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100529 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100530};
531
Patrick Daly8e3371a2017-02-13 22:14:53 -0800532static DEFINE_SPINLOCK(arm_smmu_devices_lock);
533static LIST_HEAD(arm_smmu_devices);
534
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000535struct arm_smmu_option_prop {
536 u32 opt;
537 const char *prop;
538};
539
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800540static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
541
Robin Murphy7e96c742016-09-14 15:26:46 +0100542static bool using_legacy_binding, using_generic_binding;
543
Mitchel Humpherys29073202014-07-08 09:52:18 -0700544static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000545 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800546 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700547 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700548 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700549 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700550 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Patrick Daly62ba1922017-08-30 16:47:18 -0700551 { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000552 { 0, NULL},
553};
554
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800555static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
556 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700557static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
558 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600559static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800560
Patrick Dalyc11d1082016-09-01 15:52:44 -0700561static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
562static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700563static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700564static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
565
abickett8d352ff2017-09-01 10:29:23 -0700566static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
567static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
568
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700569static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
570 dma_addr_t iova);
571
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800572static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
573
Patrick Dalyda688822017-05-17 20:12:48 -0700574static int arm_smmu_alloc_cb(struct iommu_domain *domain,
575 struct arm_smmu_device *smmu,
576 struct device *dev);
577
Joerg Roedel1d672632015-03-26 13:43:10 +0100578static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
579{
580 return container_of(dom, struct arm_smmu_domain, domain);
581}
582
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000583static void parse_driver_options(struct arm_smmu_device *smmu)
584{
585 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700586
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000587 do {
588 if (of_property_read_bool(smmu->dev->of_node,
589 arm_smmu_options[i].prop)) {
590 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700591 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000592 arm_smmu_options[i].prop);
593 }
594 } while (arm_smmu_options[++i].opt);
595}
596
Patrick Dalyc190d932016-08-30 17:23:28 -0700597static bool is_dynamic_domain(struct iommu_domain *domain)
598{
599 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
600
601 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
602}
603
Liam Mark53cf2342016-12-20 11:36:07 -0800604static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
605{
606 if (smmu_domain->attributes &
607 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
608 return true;
609 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
610 return smmu_domain->smmu->dev->archdata.dma_coherent;
611 else
612 return false;
613}
614
Patrick Dalye271f212016-10-04 13:24:49 -0700615static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
616{
617 return (smmu_domain->secure_vmid != VMID_INVAL);
618}
619
620static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
621{
622 if (arm_smmu_is_domain_secure(smmu_domain))
623 mutex_lock(&smmu_domain->assign_lock);
624}
625
626static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
627{
628 if (arm_smmu_is_domain_secure(smmu_domain))
629 mutex_unlock(&smmu_domain->assign_lock);
630}
631
Will Deacon8f68f8e2014-07-15 11:27:08 +0100632static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100633{
634 if (dev_is_pci(dev)) {
635 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700636
Will Deacona9a1b0b2014-05-01 18:05:08 +0100637 while (!pci_is_root_bus(bus))
638 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100639 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100640 }
641
Robin Murphyd5b41782016-09-14 15:21:39 +0100642 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100643}
644
Robin Murphyd5b41782016-09-14 15:21:39 +0100645static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100646{
Robin Murphyd5b41782016-09-14 15:21:39 +0100647 *((__be32 *)data) = cpu_to_be32(alias);
648 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100649}
650
Robin Murphyd5b41782016-09-14 15:21:39 +0100651static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100652{
Robin Murphyd5b41782016-09-14 15:21:39 +0100653 struct of_phandle_iterator *it = *(void **)data;
654 struct device_node *np = it->node;
655 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100656
Robin Murphyd5b41782016-09-14 15:21:39 +0100657 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
658 "#stream-id-cells", 0)
659 if (it->node == np) {
660 *(void **)data = dev;
661 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700662 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100663 it->node = np;
664 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100665}
666
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100667static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100668static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100669
Robin Murphy06e393e2016-09-12 17:13:55 +0100670static int arm_smmu_register_legacy_master(struct device *dev,
671 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100672{
Robin Murphy06e393e2016-09-12 17:13:55 +0100673 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100674 struct device_node *np;
675 struct of_phandle_iterator it;
676 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100677 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100678 __be32 pci_sid;
679 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100680
Stephen Boydfecdeef2017-03-01 16:53:19 -0800681 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100682 np = dev_get_dev_node(dev);
683 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
684 of_node_put(np);
685 return -ENODEV;
686 }
687
688 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100689 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
690 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100691 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100692 of_node_put(np);
693 if (err == 0)
694 return -ENODEV;
695 if (err < 0)
696 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100697
Robin Murphyd5b41782016-09-14 15:21:39 +0100698 if (dev_is_pci(dev)) {
699 /* "mmu-masters" assumes Stream ID == Requester ID */
700 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
701 &pci_sid);
702 it.cur = &pci_sid;
703 it.cur_count = 1;
704 }
705
Robin Murphy06e393e2016-09-12 17:13:55 +0100706 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
707 &arm_smmu_ops);
708 if (err)
709 return err;
710
711 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
712 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100713 return -ENOMEM;
714
Robin Murphy06e393e2016-09-12 17:13:55 +0100715 *smmu = dev_get_drvdata(smmu_dev);
716 of_phandle_iterator_args(&it, sids, it.cur_count);
717 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
718 kfree(sids);
719 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100720}
721
722static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
723{
724 int idx;
725
726 do {
727 idx = find_next_zero_bit(map, end, start);
728 if (idx == end)
729 return -ENOSPC;
730 } while (test_and_set_bit(idx, map));
731
732 return idx;
733}
734
735static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
736{
737 clear_bit(idx, map);
738}
739
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700740static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700741{
742 int i, ret = 0;
743
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700744 for (i = 0; i < pwr->num_clocks; ++i) {
745 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700746 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700747 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700748 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700749 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700750 break;
751 }
752 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700753 return ret;
754}
755
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700756static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700757{
758 int i;
759
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700760 for (i = pwr->num_clocks; i; --i)
761 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700762}
763
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700764static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700765{
766 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700767
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700768 for (i = 0; i < pwr->num_clocks; ++i) {
769 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700770 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700771 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700772 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700773 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700774 break;
775 }
776 }
777
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700778 return ret;
779}
Patrick Daly8befb662016-08-17 20:03:28 -0700780
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700781static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
782{
783 int i;
784
785 for (i = pwr->num_clocks; i; --i)
786 clk_disable(pwr->clocks[i - 1]);
787}
788
789static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
790{
791 if (!pwr->bus_client)
792 return 0;
793 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
794}
795
796static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
797{
798 if (!pwr->bus_client)
799 return;
800 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
801}
802
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700803static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
804{
805 struct regulator_bulk_data *consumers;
806 int num_consumers, ret;
807 int i;
808
809 num_consumers = pwr->num_gdscs;
810 consumers = pwr->gdscs;
811 for (i = 0; i < num_consumers; i++) {
812 ret = regulator_enable(consumers[i].consumer);
813 if (ret)
814 goto out;
815 }
816 return 0;
817
818out:
819 i -= 1;
820 for (; i >= 0; i--)
821 regulator_disable(consumers[i].consumer);
822 return ret;
823}
824
Prakash Guptafad87ca2017-05-16 12:13:02 +0530825static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
826{
827 struct regulator_bulk_data *consumers;
828 int i;
829 int num_consumers, ret, r;
830
831 num_consumers = pwr->num_gdscs;
832 consumers = pwr->gdscs;
833 for (i = num_consumers - 1; i >= 0; --i) {
834 ret = regulator_disable_deferred(consumers[i].consumer,
835 pwr->regulator_defer);
836 if (ret != 0)
837 goto err;
838 }
839
840 return 0;
841
842err:
843 pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
844 for (++i; i < num_consumers; ++i) {
845 r = regulator_enable(consumers[i].consumer);
846 if (r != 0)
847 pr_err("Failed to reename %s: %d\n",
848 consumers[i].supply, r);
849 }
850
851 return ret;
852}
853
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700854/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
855static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
856{
857 int ret = 0;
858 unsigned long flags;
859
860 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
861 if (pwr->clock_refs_count > 0) {
862 pwr->clock_refs_count++;
863 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
864 return 0;
865 }
866
867 ret = arm_smmu_enable_clocks(pwr);
868 if (!ret)
869 pwr->clock_refs_count = 1;
870
871 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700872 return ret;
873}
874
875/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700876static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700877{
Patrick Daly8befb662016-08-17 20:03:28 -0700878 unsigned long flags;
879
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700880 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
881 if (pwr->clock_refs_count == 0) {
882 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
883 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
884 return;
885
886 } else if (pwr->clock_refs_count > 1) {
887 pwr->clock_refs_count--;
888 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700889 return;
890 }
891
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700892 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700893
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700894 pwr->clock_refs_count = 0;
895 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700896}
897
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700898static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700899{
900 int ret;
901
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700902 mutex_lock(&pwr->power_lock);
903 if (pwr->power_count > 0) {
904 pwr->power_count += 1;
905 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700906 return 0;
907 }
908
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700909 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700910 if (ret)
911 goto out_unlock;
912
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700913 ret = arm_smmu_enable_regulators(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700914 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700915 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700916
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700917 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700918 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700919 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -0700920
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700921 pwr->power_count = 1;
922 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700923 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700924
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700925out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700926 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700927out_disable_bus:
928 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700929out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700930 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700931 return ret;
932}
933
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700934static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700935{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700936 mutex_lock(&pwr->power_lock);
937 if (pwr->power_count == 0) {
938 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
939 mutex_unlock(&pwr->power_lock);
940 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700941
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700942 } else if (pwr->power_count > 1) {
943 pwr->power_count--;
944 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700945 return;
946 }
947
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700948 arm_smmu_unprepare_clocks(pwr);
Prakash Guptafad87ca2017-05-16 12:13:02 +0530949 arm_smmu_disable_regulators(pwr);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700950 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -0700951 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700952 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700953}
954
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700955static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700956{
957 int ret;
958
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700959 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700960 if (ret)
961 return ret;
962
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700963 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700964 if (ret)
965 goto out_disable;
966
967 return 0;
968
969out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700970 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700971 return ret;
972}
973
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700974static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700975{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700976 arm_smmu_power_off_atomic(pwr);
977 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700978}
979
980/*
981 * Must be used instead of arm_smmu_power_on if it may be called from
982 * atomic context
983 */
984static int arm_smmu_domain_power_on(struct iommu_domain *domain,
985 struct arm_smmu_device *smmu)
986{
987 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
988 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
989
990 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700991 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700992
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700993 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700994}
995
996/*
997 * Must be used instead of arm_smmu_power_on if it may be called from
998 * atomic context
999 */
1000static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1001 struct arm_smmu_device *smmu)
1002{
1003 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1004 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1005
1006 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001007 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001008 return;
1009 }
1010
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001011 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001012}
1013
Will Deacon45ae7cf2013-06-24 18:31:25 +01001014/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001015static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1016 int cbndx)
1017{
1018 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1019 u32 val;
1020
1021 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1022 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1023 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -07001024 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001025 dev_err(smmu->dev, "TLBSYNC timeout!\n");
1026}
1027
Will Deacon518f7132014-11-14 17:17:54 +00001028static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001029{
1030 int count = 0;
1031 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1032
1033 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1034 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1035 & sTLBGSTATUS_GSACTIVE) {
1036 cpu_relax();
1037 if (++count == TLB_LOOP_TIMEOUT) {
1038 dev_err_ratelimited(smmu->dev,
1039 "TLB sync timed out -- SMMU may be deadlocked\n");
1040 return;
1041 }
1042 udelay(1);
1043 }
1044}
1045
Will Deacon518f7132014-11-14 17:17:54 +00001046static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001047{
Will Deacon518f7132014-11-14 17:17:54 +00001048 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001049 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001050}
1051
Patrick Daly8befb662016-08-17 20:03:28 -07001052/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001053static void arm_smmu_tlb_inv_context(void *cookie)
1054{
1055 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +01001056 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1057 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001058 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001059 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -07001060 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon1463fe42013-07-31 19:21:27 +01001061
Patrick Dalye7069342017-07-11 12:35:55 -07001062 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001063 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001064 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001065 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001066 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001067 } else if (stage1 && use_tlbiall) {
1068 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1069 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1070 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001071 } else {
1072 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001073 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001074 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001075 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001076 }
Will Deacon1463fe42013-07-31 19:21:27 +01001077}
1078
Will Deacon518f7132014-11-14 17:17:54 +00001079static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001080 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001081{
1082 struct arm_smmu_domain *smmu_domain = cookie;
1083 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1084 struct arm_smmu_device *smmu = smmu_domain->smmu;
1085 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1086 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001087 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001088
Patrick Dalye7069342017-07-11 12:35:55 -07001089 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001090 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1091 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1092
Robin Murphy7602b872016-04-28 17:12:09 +01001093 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001094 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001095 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001096 do {
1097 writel_relaxed(iova, reg);
1098 iova += granule;
1099 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001100 } else {
1101 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001102 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001103 do {
1104 writeq_relaxed(iova, reg);
1105 iova += granule >> 12;
1106 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001107 }
Patrick Dalye7069342017-07-11 12:35:55 -07001108 } else if (stage1 && use_tlbiall) {
1109 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1110 reg += ARM_SMMU_CB_S1_TLBIALL;
1111 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001112 } else if (smmu->version == ARM_SMMU_V2) {
1113 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1114 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1115 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001116 iova >>= 12;
1117 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001118 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001119 iova += granule >> 12;
1120 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001121 } else {
1122 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001123 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001124 }
1125}
1126
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001127struct arm_smmu_secure_pool_chunk {
1128 void *addr;
1129 size_t size;
1130 struct list_head list;
1131};
1132
1133static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1134 size_t size)
1135{
1136 struct arm_smmu_secure_pool_chunk *it;
1137
1138 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1139 if (it->size == size) {
1140 void *addr = it->addr;
1141
1142 list_del(&it->list);
1143 kfree(it);
1144 return addr;
1145 }
1146 }
1147
1148 return NULL;
1149}
1150
1151static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1152 void *addr, size_t size)
1153{
1154 struct arm_smmu_secure_pool_chunk *chunk;
1155
1156 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1157 if (!chunk)
1158 return -ENOMEM;
1159
1160 chunk->addr = addr;
1161 chunk->size = size;
1162 memset(addr, 0, size);
1163 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1164
1165 return 0;
1166}
1167
1168static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1169{
1170 struct arm_smmu_secure_pool_chunk *it, *i;
1171
1172 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1173 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1174 /* pages will be freed later (after being unassigned) */
1175 kfree(it);
1176 }
1177}
1178
Patrick Dalyc11d1082016-09-01 15:52:44 -07001179static void *arm_smmu_alloc_pages_exact(void *cookie,
1180 size_t size, gfp_t gfp_mask)
1181{
1182 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001183 void *page;
1184 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001185
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001186 if (!arm_smmu_is_domain_secure(smmu_domain))
1187 return alloc_pages_exact(size, gfp_mask);
1188
1189 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1190 if (page)
1191 return page;
1192
1193 page = alloc_pages_exact(size, gfp_mask);
1194 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001195 ret = arm_smmu_prepare_pgtable(page, cookie);
1196 if (ret) {
1197 free_pages_exact(page, size);
1198 return NULL;
1199 }
1200 }
1201
1202 return page;
1203}
1204
1205static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1206{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001207 struct arm_smmu_domain *smmu_domain = cookie;
1208
1209 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1210 free_pages_exact(virt, size);
1211 return;
1212 }
1213
1214 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1215 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001216}
1217
Will Deacon518f7132014-11-14 17:17:54 +00001218static struct iommu_gather_ops arm_smmu_gather_ops = {
1219 .tlb_flush_all = arm_smmu_tlb_inv_context,
1220 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1221 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001222 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1223 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001224};
1225
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001226static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1227 dma_addr_t iova, u32 fsr)
1228{
1229 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001230 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001231 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001232 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001233
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001234 phys = arm_smmu_iova_to_phys_hard(domain, iova);
1235 arm_smmu_tlb_inv_context(smmu_domain);
1236 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001237
Patrick Dalyad441dd2016-09-15 15:50:46 -07001238 if (phys != phys_post_tlbiall) {
1239 dev_err(smmu->dev,
1240 "ATOS results differed across TLBIALL...\n"
1241 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1242 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001243
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001244 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001245}
1246
Will Deacon45ae7cf2013-06-24 18:31:25 +01001247static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1248{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001249 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001250 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001251 unsigned long iova;
1252 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001253 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001254 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1255 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001256 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001257 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001258 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001259 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001260 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001261 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001262 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001263
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001264 static DEFINE_RATELIMIT_STATE(_rs,
1265 DEFAULT_RATELIMIT_INTERVAL,
1266 DEFAULT_RATELIMIT_BURST);
1267
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001268 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001269 if (ret)
1270 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001271
Shalaj Jain04059c52015-03-03 13:34:59 -08001272 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001273 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1275
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001276 if (!(fsr & FSR_FAULT)) {
1277 ret = IRQ_NONE;
1278 goto out_power_off;
1279 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001280
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001281 if (fatal_asf && (fsr & FSR_ASF)) {
1282 dev_err(smmu->dev,
1283 "Took an address size fault. Refusing to recover.\n");
1284 BUG();
1285 }
1286
Will Deacon45ae7cf2013-06-24 18:31:25 +01001287 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001288 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001289 if (fsr & FSR_TF)
1290 flags |= IOMMU_FAULT_TRANSLATION;
1291 if (fsr & FSR_PF)
1292 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001293 if (fsr & FSR_EF)
1294 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001295 if (fsr & FSR_SS)
1296 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001297
Robin Murphyf9a05f02016-04-13 18:13:01 +01001298 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001299 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001300 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1301 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001302 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1303 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001304 dev_dbg(smmu->dev,
1305 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1306 iova, fsr, fsynr, cfg->cbndx);
1307 dev_dbg(smmu->dev,
1308 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001309 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001310 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001311 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001312 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1313 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001314 if (__ratelimit(&_rs)) {
1315 dev_err(smmu->dev,
1316 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1317 iova, fsr, fsynr, cfg->cbndx);
1318 dev_err(smmu->dev, "FAR = %016lx\n",
1319 (unsigned long)iova);
1320 dev_err(smmu->dev,
1321 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1322 fsr,
1323 (fsr & 0x02) ? "TF " : "",
1324 (fsr & 0x04) ? "AFF " : "",
1325 (fsr & 0x08) ? "PF " : "",
1326 (fsr & 0x10) ? "EF " : "",
1327 (fsr & 0x20) ? "TLBMCF " : "",
1328 (fsr & 0x40) ? "TLBLKF " : "",
1329 (fsr & 0x80) ? "MHF " : "",
1330 (fsr & 0x40000000) ? "SS " : "",
1331 (fsr & 0x80000000) ? "MULTI " : "");
1332 dev_err(smmu->dev,
1333 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001334 if (!phys_soft)
1335 dev_err(smmu->dev,
1336 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1337 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001338 if (phys_atos)
1339 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1340 &phys_atos);
1341 else
1342 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001343 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1344 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001345 ret = IRQ_NONE;
1346 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001347 if (!non_fatal_fault) {
1348 dev_err(smmu->dev,
1349 "Unhandled arm-smmu context fault!\n");
1350 BUG();
1351 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001352 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001353
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001354 /*
1355 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1356 * if stalled. This is required to keep the IOMMU client stalled on
1357 * the outstanding fault. This gives the client a chance to take any
1358 * debug action and then terminate the stalled transaction.
1359 * So, the sequence in case of stall on fault should be:
1360 * 1) Do not clear FSR or write to RESUME here
1361 * 2) Client takes any debug action
1362 * 3) Client terminates the stalled transaction and resumes the IOMMU
1363 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1364 * not before so that the fault remains outstanding. This ensures
1365 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1366 * need to be terminated.
1367 */
1368 if (tmp != -EBUSY) {
1369 /* Clear the faulting FSR */
1370 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001371
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001372 /*
1373 * Barrier required to ensure that the FSR is cleared
1374 * before resuming SMMU operation
1375 */
1376 wmb();
1377
1378 /* Retry or terminate any stalled transactions */
1379 if (fsr & FSR_SS)
1380 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1381 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001382
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001383out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001384 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001385
Patrick Daly5ba28112016-08-30 19:18:52 -07001386 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001387}
1388
1389static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1390{
1391 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1392 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001393 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001394
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001395 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001396 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001397
Will Deacon45ae7cf2013-06-24 18:31:25 +01001398 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1399 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1400 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1401 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1402
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001403 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001404 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001405 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001406 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001407
Will Deacon45ae7cf2013-06-24 18:31:25 +01001408 dev_err_ratelimited(smmu->dev,
1409 "Unexpected global fault, this could be serious\n");
1410 dev_err_ratelimited(smmu->dev,
1411 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1412 gfsr, gfsynr0, gfsynr1, gfsynr2);
1413
1414 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001415 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001416 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001417}
1418
Will Deacon518f7132014-11-14 17:17:54 +00001419static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1420 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001421{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001422 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001423 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001424 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001425 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1426 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001427 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001428
Will Deacon45ae7cf2013-06-24 18:31:25 +01001429 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001430 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1431 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001432
Will Deacon4a1c93c2015-03-04 12:21:03 +00001433 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001434 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1435 reg = CBA2R_RW64_64BIT;
1436 else
1437 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001438 /* 16-bit VMIDs live in CBA2R */
1439 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001440 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001441
Will Deacon4a1c93c2015-03-04 12:21:03 +00001442 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1443 }
1444
Will Deacon45ae7cf2013-06-24 18:31:25 +01001445 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001446 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001447 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001448 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001449
Will Deacon57ca90f2014-02-06 14:59:05 +00001450 /*
1451 * Use the weakest shareability/memory types, so they are
1452 * overridden by the ttbcr/pte.
1453 */
1454 if (stage1) {
1455 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1456 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001457 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1458 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001459 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001460 }
Will Deacon44680ee2014-06-25 11:29:12 +01001461 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001462
Will Deacon518f7132014-11-14 17:17:54 +00001463 /* TTBRs */
1464 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001465 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001466
Robin Murphyb94df6f2016-08-11 17:44:06 +01001467 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1468 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1469 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1470 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1471 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1472 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1473 } else {
1474 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1475 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1476 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1477 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1478 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1479 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1480 }
Will Deacon518f7132014-11-14 17:17:54 +00001481 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001482 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001483 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001484 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001485
Will Deacon518f7132014-11-14 17:17:54 +00001486 /* TTBCR */
1487 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001488 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1489 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1490 reg2 = 0;
1491 } else {
1492 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1493 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1494 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001495 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001496 if (smmu->version > ARM_SMMU_V1)
1497 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001498 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001499 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001500 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001501 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001502
Will Deacon518f7132014-11-14 17:17:54 +00001503 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001504 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001505 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1506 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1507 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1508 } else {
1509 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1510 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1511 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001512 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001513 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001514 }
1515
Will Deacon45ae7cf2013-06-24 18:31:25 +01001516 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001517 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001518
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301519 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
1520 reg &= ~SCTLR_CFCFG;
1521 reg |= SCTLR_HUPCF;
1522 }
1523
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001524 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1525 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1526 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001527 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001528 if (stage1)
1529 reg |= SCTLR_S1_ASIDPNE;
1530#ifdef __BIG_ENDIAN
1531 reg |= SCTLR_E;
1532#endif
Will Deacon25724842013-08-21 13:49:53 +01001533 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001534}
1535
Patrick Dalyc190d932016-08-30 17:23:28 -07001536static int arm_smmu_init_asid(struct iommu_domain *domain,
1537 struct arm_smmu_device *smmu)
1538{
1539 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1540 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1541 bool dynamic = is_dynamic_domain(domain);
1542 int ret;
1543
1544 if (!dynamic) {
1545 cfg->asid = cfg->cbndx + 1;
1546 } else {
1547 mutex_lock(&smmu->idr_mutex);
1548 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1549 smmu->num_context_banks + 2,
1550 MAX_ASID + 1, GFP_KERNEL);
1551
1552 mutex_unlock(&smmu->idr_mutex);
1553 if (ret < 0) {
1554 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1555 ret);
1556 return ret;
1557 }
1558 cfg->asid = ret;
1559 }
1560 return 0;
1561}
1562
1563static void arm_smmu_free_asid(struct iommu_domain *domain)
1564{
1565 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1566 struct arm_smmu_device *smmu = smmu_domain->smmu;
1567 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1568 bool dynamic = is_dynamic_domain(domain);
1569
1570 if (cfg->asid == INVALID_ASID || !dynamic)
1571 return;
1572
1573 mutex_lock(&smmu->idr_mutex);
1574 idr_remove(&smmu->asid_idr, cfg->asid);
1575 mutex_unlock(&smmu->idr_mutex);
1576}
1577
Will Deacon45ae7cf2013-06-24 18:31:25 +01001578static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001579 struct arm_smmu_device *smmu,
1580 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001581{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001582 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001583 unsigned long ias, oas;
1584 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001585 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001586 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001587 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001588 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001589 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001590 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001591
Will Deacon518f7132014-11-14 17:17:54 +00001592 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001593 if (smmu_domain->smmu)
1594 goto out_unlock;
1595
Patrick Dalyc190d932016-08-30 17:23:28 -07001596 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1597 smmu_domain->cfg.asid = INVALID_ASID;
1598
Patrick Dalyc190d932016-08-30 17:23:28 -07001599 dynamic = is_dynamic_domain(domain);
1600 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1601 dev_err(smmu->dev, "dynamic domains not supported\n");
1602 ret = -EPERM;
1603 goto out_unlock;
1604 }
1605
Will Deaconc752ce42014-06-25 22:46:31 +01001606 /*
1607 * Mapping the requested stage onto what we support is surprisingly
1608 * complicated, mainly because the spec allows S1+S2 SMMUs without
1609 * support for nested translation. That means we end up with the
1610 * following table:
1611 *
1612 * Requested Supported Actual
1613 * S1 N S1
1614 * S1 S1+S2 S1
1615 * S1 S2 S2
1616 * S1 S1 S1
1617 * N N N
1618 * N S1+S2 S2
1619 * N S2 S2
1620 * N S1 S1
1621 *
1622 * Note that you can't actually request stage-2 mappings.
1623 */
1624 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1625 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1626 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1627 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1628
Robin Murphy7602b872016-04-28 17:12:09 +01001629 /*
1630 * Choosing a suitable context format is even more fiddly. Until we
1631 * grow some way for the caller to express a preference, and/or move
1632 * the decision into the io-pgtable code where it arguably belongs,
1633 * just aim for the closest thing to the rest of the system, and hope
1634 * that the hardware isn't esoteric enough that we can't assume AArch64
1635 * support to be a superset of AArch32 support...
1636 */
1637 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1638 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001639 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1640 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1641 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1642 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1643 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001644 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1645 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1646 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1647 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1648 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1649
1650 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1651 ret = -EINVAL;
1652 goto out_unlock;
1653 }
1654
Will Deaconc752ce42014-06-25 22:46:31 +01001655 switch (smmu_domain->stage) {
1656 case ARM_SMMU_DOMAIN_S1:
1657 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1658 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001659 ias = smmu->va_size;
1660 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001661 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001662 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001663 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1664 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001665 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001666 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001667 ias = min(ias, 32UL);
1668 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001669 } else {
1670 fmt = ARM_V7S;
1671 ias = min(ias, 32UL);
1672 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001673 }
Will Deaconc752ce42014-06-25 22:46:31 +01001674 break;
1675 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001676 /*
1677 * We will likely want to change this if/when KVM gets
1678 * involved.
1679 */
Will Deaconc752ce42014-06-25 22:46:31 +01001680 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001681 cfg->cbar = CBAR_TYPE_S2_TRANS;
1682 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001683 ias = smmu->ipa_size;
1684 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001685 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001686 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001687 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001688 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001689 ias = min(ias, 40UL);
1690 oas = min(oas, 40UL);
1691 }
Will Deaconc752ce42014-06-25 22:46:31 +01001692 break;
1693 default:
1694 ret = -EINVAL;
1695 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001696 }
1697
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001698 if (is_fast)
1699 fmt = ARM_V8L_FAST;
1700
Patrick Dalyce6786f2016-11-09 14:19:23 -08001701 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1702 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001703 if (is_iommu_pt_coherent(smmu_domain))
1704 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001705
Patrick Dalyda688822017-05-17 20:12:48 -07001706 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1707 if (ret < 0)
1708 goto out_unlock;
1709 cfg->cbndx = ret;
1710
Robin Murphyb7862e32016-04-13 18:13:03 +01001711 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001712 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1713 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001714 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001715 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001716 }
1717
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001718 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001719 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001720 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001721 .ias = ias,
1722 .oas = oas,
1723 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001724 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001725 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001726
Will Deacon518f7132014-11-14 17:17:54 +00001727 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001728 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001729 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1730 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001731 if (!pgtbl_ops) {
1732 ret = -ENOMEM;
1733 goto out_clear_smmu;
1734 }
1735
Patrick Dalyc11d1082016-09-01 15:52:44 -07001736 /*
1737 * assign any page table memory that might have been allocated
1738 * during alloc_io_pgtable_ops
1739 */
Patrick Dalye271f212016-10-04 13:24:49 -07001740 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001741 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001742 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001743
Robin Murphyd5466352016-05-09 17:20:09 +01001744 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001745 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001746 domain->geometry.aperture_end = (1UL << ias) - 1;
1747 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001748
Patrick Dalyc190d932016-08-30 17:23:28 -07001749 /* Assign an asid */
1750 ret = arm_smmu_init_asid(domain, smmu);
1751 if (ret)
1752 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001753
Patrick Dalyc190d932016-08-30 17:23:28 -07001754 if (!dynamic) {
1755 /* Initialise the context bank with our page table cfg */
1756 arm_smmu_init_context_bank(smmu_domain,
1757 &smmu_domain->pgtbl_cfg);
1758
1759 /*
1760 * Request context fault interrupt. Do this last to avoid the
1761 * handler seeing a half-initialised domain state.
1762 */
1763 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1764 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001765 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1766 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001767 if (ret < 0) {
1768 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1769 cfg->irptndx, irq);
1770 cfg->irptndx = INVALID_IRPTNDX;
1771 goto out_clear_smmu;
1772 }
1773 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001774 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775 }
Will Deacon518f7132014-11-14 17:17:54 +00001776 mutex_unlock(&smmu_domain->init_mutex);
1777
1778 /* Publish page table ops for map/unmap */
1779 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001780 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001781
Will Deacon518f7132014-11-14 17:17:54 +00001782out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001783 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001784 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001785out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001786 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001787 return ret;
1788}
1789
Patrick Daly77db4f92016-10-14 15:34:10 -07001790static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1791{
1792 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1793 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1794 smmu_domain->secure_vmid = VMID_INVAL;
1795}
1796
Will Deacon45ae7cf2013-06-24 18:31:25 +01001797static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1798{
Joerg Roedel1d672632015-03-26 13:43:10 +01001799 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001800 struct arm_smmu_device *smmu = smmu_domain->smmu;
1801 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001802 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001803 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001804 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001805 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001806
Robin Murphy7e96c742016-09-14 15:26:46 +01001807 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001808 return;
1809
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001810 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001811 if (ret) {
1812 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1813 smmu);
1814 return;
1815 }
1816
Patrick Dalyc190d932016-08-30 17:23:28 -07001817 dynamic = is_dynamic_domain(domain);
1818 if (dynamic) {
1819 arm_smmu_free_asid(domain);
1820 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001821 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001822 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001823 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001824 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001825 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001826 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001827 return;
1828 }
1829
Will Deacon518f7132014-11-14 17:17:54 +00001830 /*
1831 * Disable the context bank and free the page tables before freeing
1832 * it.
1833 */
Will Deacon44680ee2014-06-25 11:29:12 +01001834 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001835 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001836
Will Deacon44680ee2014-06-25 11:29:12 +01001837 if (cfg->irptndx != INVALID_IRPTNDX) {
1838 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001839 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001840 }
1841
Markus Elfring44830b02015-11-06 18:32:41 +01001842 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001843 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001844 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001845 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001846 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001847 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001848
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001849 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001850 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001851}
1852
Joerg Roedel1d672632015-03-26 13:43:10 +01001853static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001854{
1855 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001856
Patrick Daly09801312016-08-29 17:02:52 -07001857 /* Do not support DOMAIN_DMA for now */
1858 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001859 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001860 /*
1861 * Allocate the domain and initialise some of its data structures.
1862 * We can't really do anything meaningful until we've added a
1863 * master.
1864 */
1865 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1866 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001867 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001868
Robin Murphy7e96c742016-09-14 15:26:46 +01001869 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1870 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001871 kfree(smmu_domain);
1872 return NULL;
1873 }
1874
Will Deacon518f7132014-11-14 17:17:54 +00001875 mutex_init(&smmu_domain->init_mutex);
1876 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001877 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1878 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001879 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001880 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001881 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001882
1883 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001884}
1885
Joerg Roedel1d672632015-03-26 13:43:10 +01001886static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001887{
Joerg Roedel1d672632015-03-26 13:43:10 +01001888 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001889
1890 /*
1891 * Free the domain resources. We assume that all devices have
1892 * already been detached.
1893 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001894 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001895 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001896 kfree(smmu_domain);
1897}
1898
Robin Murphy468f4942016-09-12 17:13:49 +01001899static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1900{
1901 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01001902 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01001903
1904 if (smr->valid)
1905 reg |= SMR_VALID;
1906 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1907}
1908
Robin Murphya754fd12016-09-12 17:13:50 +01001909static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1910{
1911 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1912 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1913 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1914 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1915
1916 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1917}
1918
1919static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1920{
1921 arm_smmu_write_s2cr(smmu, idx);
1922 if (smmu->smrs)
1923 arm_smmu_write_smr(smmu, idx);
1924}
1925
Robin Murphy6668f692016-09-12 17:13:54 +01001926static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01001927{
1928 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01001929 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001930
Robin Murphy6668f692016-09-12 17:13:54 +01001931 /* Stream indexing is blissfully easy */
1932 if (!smrs)
1933 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01001934
Robin Murphy6668f692016-09-12 17:13:54 +01001935 /* Validating SMRs is... less so */
1936 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1937 if (!smrs[i].valid) {
1938 /*
1939 * Note the first free entry we come across, which
1940 * we'll claim in the end if nothing else matches.
1941 */
1942 if (free_idx < 0)
1943 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01001944 continue;
1945 }
Robin Murphy6668f692016-09-12 17:13:54 +01001946 /*
1947 * If the new entry is _entirely_ matched by an existing entry,
1948 * then reuse that, with the guarantee that there also cannot
1949 * be any subsequent conflicting entries. In normal use we'd
1950 * expect simply identical entries for this case, but there's
1951 * no harm in accommodating the generalisation.
1952 */
1953 if ((mask & smrs[i].mask) == mask &&
1954 !((id ^ smrs[i].id) & ~smrs[i].mask))
1955 return i;
1956 /*
1957 * If the new entry has any other overlap with an existing one,
1958 * though, then there always exists at least one stream ID
1959 * which would cause a conflict, and we can't allow that risk.
1960 */
1961 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1962 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963 }
1964
Robin Murphy6668f692016-09-12 17:13:54 +01001965 return free_idx;
1966}
1967
1968static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1969{
1970 if (--smmu->s2crs[idx].count)
1971 return false;
1972
1973 smmu->s2crs[idx] = s2cr_init_val;
1974 if (smmu->smrs)
1975 smmu->smrs[idx].valid = false;
1976
1977 return true;
1978}
1979
1980static int arm_smmu_master_alloc_smes(struct device *dev)
1981{
Robin Murphy06e393e2016-09-12 17:13:55 +01001982 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1983 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01001984 struct arm_smmu_device *smmu = cfg->smmu;
1985 struct arm_smmu_smr *smrs = smmu->smrs;
1986 struct iommu_group *group;
1987 int i, idx, ret;
1988
1989 mutex_lock(&smmu->stream_map_mutex);
1990 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01001991 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001992 u16 sid = fwspec->ids[i];
1993 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1994
Robin Murphy6668f692016-09-12 17:13:54 +01001995 if (idx != INVALID_SMENDX) {
1996 ret = -EEXIST;
1997 goto out_err;
1998 }
1999
Robin Murphy7e96c742016-09-14 15:26:46 +01002000 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01002001 if (ret < 0)
2002 goto out_err;
2003
2004 idx = ret;
2005 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002006 smrs[idx].id = sid;
2007 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01002008 smrs[idx].valid = true;
2009 }
2010 smmu->s2crs[idx].count++;
2011 cfg->smendx[i] = (s16)idx;
2012 }
2013
2014 group = iommu_group_get_for_dev(dev);
2015 if (!group)
2016 group = ERR_PTR(-ENOMEM);
2017 if (IS_ERR(group)) {
2018 ret = PTR_ERR(group);
2019 goto out_err;
2020 }
2021 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01002022
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002023 /* It worked! Don't poke the actual hardware until we've attached */
2024 for_each_cfg_sme(fwspec, i, idx)
Robin Murphy6668f692016-09-12 17:13:54 +01002025 smmu->s2crs[idx].group = group;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002026
Robin Murphy6668f692016-09-12 17:13:54 +01002027 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002028 return 0;
2029
Robin Murphy6668f692016-09-12 17:13:54 +01002030out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01002031 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01002032 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01002033 cfg->smendx[i] = INVALID_SMENDX;
2034 }
Robin Murphy6668f692016-09-12 17:13:54 +01002035 mutex_unlock(&smmu->stream_map_mutex);
2036 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002037}
2038
Robin Murphy06e393e2016-09-12 17:13:55 +01002039static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002040{
Robin Murphy06e393e2016-09-12 17:13:55 +01002041 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
2042 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01002043 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01002044
Robin Murphy6668f692016-09-12 17:13:54 +01002045 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002046 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002047 if (arm_smmu_free_sme(smmu, idx))
2048 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01002049 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002050 }
Robin Murphy6668f692016-09-12 17:13:54 +01002051 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002052}
2053
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002054static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
2055 struct iommu_fwspec *fwspec)
2056{
2057 struct arm_smmu_device *smmu = smmu_domain->smmu;
2058 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2059 int i, idx;
2060 const struct iommu_gather_ops *tlb;
2061
2062 tlb = smmu_domain->pgtbl_cfg.tlb;
2063
2064 mutex_lock(&smmu->stream_map_mutex);
2065 for_each_cfg_sme(fwspec, i, idx) {
2066 WARN_ON(s2cr[idx].attach_count == 0);
2067 s2cr[idx].attach_count -= 1;
2068
2069 if (s2cr[idx].attach_count > 0)
2070 continue;
2071
2072 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2073 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2074 }
2075 mutex_unlock(&smmu->stream_map_mutex);
2076
2077 /* Ensure there are no stale mappings for this context bank */
2078 tlb->tlb_flush_all(smmu_domain);
2079}
2080
Will Deacon45ae7cf2013-06-24 18:31:25 +01002081static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01002082 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002083{
Will Deacon44680ee2014-06-25 11:29:12 +01002084 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002085 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2086 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2087 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002088 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002089
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002090 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002091 for_each_cfg_sme(fwspec, i, idx) {
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002092 if (s2cr[idx].attach_count++ > 0)
Robin Murphy6668f692016-09-12 17:13:54 +01002093 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002094
2095 s2cr[idx].type = type;
2096 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2097 s2cr[idx].cbndx = cbndx;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002098 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002099 }
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002100 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002101
2102 return 0;
2103}
2104
Patrick Daly09801312016-08-29 17:02:52 -07002105static void arm_smmu_detach_dev(struct iommu_domain *domain,
2106 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002107{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002108 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002109 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002110 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly09801312016-08-29 17:02:52 -07002111 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002112 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002113
2114 if (dynamic)
2115 return;
2116
Patrick Daly09801312016-08-29 17:02:52 -07002117 if (!smmu) {
2118 dev_err(dev, "Domain not attached; cannot detach!\n");
2119 return;
2120 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002121
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002122 arm_smmu_domain_remove_master(smmu_domain, fwspec);
2123
Patrick Daly8befb662016-08-17 20:03:28 -07002124 /* Remove additional vote for atomic power */
2125 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002126 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
2127 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002128 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002129}
2130
Patrick Dalye271f212016-10-04 13:24:49 -07002131static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002132{
Patrick Dalye271f212016-10-04 13:24:49 -07002133 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002134 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2135 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2136 int source_vmid = VMID_HLOS;
2137 struct arm_smmu_pte_info *pte_info, *temp;
2138
Patrick Dalye271f212016-10-04 13:24:49 -07002139 if (!arm_smmu_is_domain_secure(smmu_domain))
2140 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002141
Patrick Dalye271f212016-10-04 13:24:49 -07002142 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002143 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2144 PAGE_SIZE, &source_vmid, 1,
2145 dest_vmids, dest_perms, 2);
2146 if (WARN_ON(ret))
2147 break;
2148 }
2149
2150 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2151 entry) {
2152 list_del(&pte_info->entry);
2153 kfree(pte_info);
2154 }
Patrick Dalye271f212016-10-04 13:24:49 -07002155 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002156}
2157
2158static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2159{
2160 int ret;
2161 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002162 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002163 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2164 struct arm_smmu_pte_info *pte_info, *temp;
2165
Patrick Dalye271f212016-10-04 13:24:49 -07002166 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002167 return;
2168
2169 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2170 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2171 PAGE_SIZE, source_vmlist, 2,
2172 &dest_vmids, &dest_perms, 1);
2173 if (WARN_ON(ret))
2174 break;
2175 free_pages_exact(pte_info->virt_addr, pte_info->size);
2176 }
2177
2178 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2179 entry) {
2180 list_del(&pte_info->entry);
2181 kfree(pte_info);
2182 }
2183}
2184
2185static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2186{
2187 struct arm_smmu_domain *smmu_domain = cookie;
2188 struct arm_smmu_pte_info *pte_info;
2189
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002190 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002191
2192 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2193 if (!pte_info)
2194 return;
2195
2196 pte_info->virt_addr = addr;
2197 pte_info->size = size;
2198 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2199}
2200
2201static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2202{
2203 struct arm_smmu_domain *smmu_domain = cookie;
2204 struct arm_smmu_pte_info *pte_info;
2205
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002206 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002207
2208 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2209 if (!pte_info)
2210 return -ENOMEM;
2211 pte_info->virt_addr = addr;
2212 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2213 return 0;
2214}
2215
Will Deacon45ae7cf2013-06-24 18:31:25 +01002216static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2217{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002218 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002219 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002220 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002221 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002222 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002223
Robin Murphy06e393e2016-09-12 17:13:55 +01002224 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002225 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2226 return -ENXIO;
2227 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002228
Robin Murphy4f79b142016-10-17 12:06:21 +01002229 /*
2230 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2231 * domains between of_xlate() and add_device() - we have no way to cope
2232 * with that, so until ARM gets converted to rely on groups and default
2233 * domains, just say no (but more politely than by dereferencing NULL).
2234 * This should be at least a WARN_ON once that's sorted.
2235 */
2236 if (!fwspec->iommu_priv)
2237 return -ENODEV;
2238
Robin Murphy06e393e2016-09-12 17:13:55 +01002239 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002240
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002241 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002242 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002243 if (ret)
2244 return ret;
2245
Will Deacon518f7132014-11-14 17:17:54 +00002246 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002247 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002248 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002249 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002250
Patrick Dalyc190d932016-08-30 17:23:28 -07002251 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002252 if (is_dynamic_domain(domain)) {
2253 ret = 0;
2254 goto out_power_off;
2255 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002256
Will Deacon45ae7cf2013-06-24 18:31:25 +01002257 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002258 * Sanity check the domain. We don't support domains across
2259 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002260 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002261 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002262 dev_err(dev,
2263 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002264 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002265 ret = -EINVAL;
2266 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002267 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002268
2269 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002270 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002271
2272out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002273 /*
2274 * Keep an additional vote for non-atomic power until domain is
2275 * detached
2276 */
2277 if (!ret && atomic_domain) {
2278 WARN_ON(arm_smmu_power_on(smmu->pwr));
2279 arm_smmu_power_off_atomic(smmu->pwr);
2280 }
2281
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002282 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002283
Will Deacon45ae7cf2013-06-24 18:31:25 +01002284 return ret;
2285}
2286
Will Deacon45ae7cf2013-06-24 18:31:25 +01002287static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002288 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002289{
Will Deacon518f7132014-11-14 17:17:54 +00002290 int ret;
2291 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002292 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002293 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002294
Will Deacon518f7132014-11-14 17:17:54 +00002295 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002296 return -ENODEV;
2297
Patrick Dalye271f212016-10-04 13:24:49 -07002298 arm_smmu_secure_domain_lock(smmu_domain);
2299
Will Deacon518f7132014-11-14 17:17:54 +00002300 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2301 ret = ops->map(ops, iova, paddr, size, prot);
2302 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002303
2304 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002305 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002306
Will Deacon518f7132014-11-14 17:17:54 +00002307 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002308}
2309
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002310static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2311 dma_addr_t iova)
2312{
2313 uint64_t ret;
2314 unsigned long flags;
2315 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2316 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2317
2318 if (!ops)
2319 return 0;
2320
2321 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2322 ret = ops->iova_to_pte(ops, iova);
2323 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2324 return ret;
2325}
2326
Will Deacon45ae7cf2013-06-24 18:31:25 +01002327static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2328 size_t size)
2329{
Will Deacon518f7132014-11-14 17:17:54 +00002330 size_t ret;
2331 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002332 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002333 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002334
Will Deacon518f7132014-11-14 17:17:54 +00002335 if (!ops)
2336 return 0;
2337
Patrick Daly8befb662016-08-17 20:03:28 -07002338 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002339 if (ret)
2340 return ret;
2341
Patrick Dalye271f212016-10-04 13:24:49 -07002342 arm_smmu_secure_domain_lock(smmu_domain);
2343
Will Deacon518f7132014-11-14 17:17:54 +00002344 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2345 ret = ops->unmap(ops, iova, size);
2346 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002347
Patrick Daly8befb662016-08-17 20:03:28 -07002348 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002349 /*
2350 * While splitting up block mappings, we might allocate page table
2351 * memory during unmap, so the vmids needs to be assigned to the
2352 * memory here as well.
2353 */
2354 arm_smmu_assign_table(smmu_domain);
2355 /* Also unassign any pages that were free'd during unmap */
2356 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002357 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002358 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002359}
2360
Patrick Daly88d321d2017-02-09 18:02:13 -08002361#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002362static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2363 struct scatterlist *sg, unsigned int nents, int prot)
2364{
2365 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002366 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002367 unsigned long flags;
2368 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2369 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002370 unsigned int idx_start, idx_end;
2371 struct scatterlist *sg_start, *sg_end;
2372 unsigned long __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002373
2374 if (!ops)
2375 return -ENODEV;
2376
Patrick Daly8befb662016-08-17 20:03:28 -07002377 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002378 if (ret)
2379 return ret;
2380
Patrick Daly88d321d2017-02-09 18:02:13 -08002381 __saved_iova_start = iova;
2382 idx_start = idx_end = 0;
2383 sg_start = sg_end = sg;
2384 while (idx_end < nents) {
2385 batch_size = sg_end->length;
2386 sg_end = sg_next(sg_end);
2387 idx_end++;
2388 while ((idx_end < nents) &&
2389 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002390
Patrick Daly88d321d2017-02-09 18:02:13 -08002391 batch_size += sg_end->length;
2392 sg_end = sg_next(sg_end);
2393 idx_end++;
2394 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002395
Patrick Daly88d321d2017-02-09 18:02:13 -08002396 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2397 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2398 prot, &size);
2399 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2400 /* Returns 0 on error */
2401 if (!ret) {
2402 size_to_unmap = iova + size - __saved_iova_start;
2403 goto out;
2404 }
2405
2406 iova += batch_size;
2407 idx_start = idx_end;
2408 sg_start = sg_end;
2409 }
2410
2411out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002412 arm_smmu_assign_table(smmu_domain);
2413
Patrick Daly88d321d2017-02-09 18:02:13 -08002414 if (size_to_unmap) {
2415 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2416 iova = __saved_iova_start;
2417 }
2418 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
2419 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002420}
2421
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002422static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002423 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002424{
Joerg Roedel1d672632015-03-26 13:43:10 +01002425 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002426 struct arm_smmu_device *smmu = smmu_domain->smmu;
2427 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2428 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2429 struct device *dev = smmu->dev;
2430 void __iomem *cb_base;
2431 u32 tmp;
2432 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002433 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002434
2435 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2436
Robin Murphy661d9622015-05-27 17:09:34 +01002437 /* ATS1 registers can only be written atomically */
2438 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002439 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002440 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2441 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002442 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002443
2444 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2445 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002446 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002447 dev_err(dev,
2448 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2449 &iova, &phys);
2450 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002451 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002452 }
2453
Robin Murphyf9a05f02016-04-13 18:13:01 +01002454 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002455 if (phys & CB_PAR_F) {
2456 dev_err(dev, "translation fault!\n");
2457 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002458 phys = 0;
2459 } else {
2460 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002461 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002462
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002463 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002464}
2465
Will Deacon45ae7cf2013-06-24 18:31:25 +01002466static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002467 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002468{
Will Deacon518f7132014-11-14 17:17:54 +00002469 phys_addr_t ret;
2470 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002471 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002472 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002473
Will Deacon518f7132014-11-14 17:17:54 +00002474 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002475 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002476
Will Deacon518f7132014-11-14 17:17:54 +00002477 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002478 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002479 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002480
Will Deacon518f7132014-11-14 17:17:54 +00002481 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002482}
2483
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002484/*
2485 * This function can sleep, and cannot be called from atomic context. Will
2486 * power on register block if required. This restriction does not apply to the
2487 * original iova_to_phys() op.
2488 */
2489static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2490 dma_addr_t iova)
2491{
2492 phys_addr_t ret = 0;
2493 unsigned long flags;
2494 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly62ba1922017-08-30 16:47:18 -07002495 struct arm_smmu_device *smmu = smmu_domain->smmu;
2496
2497 if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS)
2498 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002499
Patrick Dalyad441dd2016-09-15 15:50:46 -07002500 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002501 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2502 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002503 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002504 return ret;
2505 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002506
2507 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2508 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2509 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002510 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002511
2512 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2513
2514 return ret;
2515}
2516
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002517static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002518{
Will Deacond0948942014-06-24 17:30:10 +01002519 switch (cap) {
2520 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002521 /*
2522 * Return true here as the SMMU can always send out coherent
2523 * requests.
2524 */
2525 return true;
Will Deacond0948942014-06-24 17:30:10 +01002526 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002527 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002528 case IOMMU_CAP_NOEXEC:
2529 return true;
Will Deacond0948942014-06-24 17:30:10 +01002530 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002531 return false;
Will Deacond0948942014-06-24 17:30:10 +01002532 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002533}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002534
Patrick Daly8e3371a2017-02-13 22:14:53 -08002535static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2536{
2537 struct arm_smmu_device *smmu;
2538 unsigned long flags;
2539
2540 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2541 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2542 if (smmu->dev->of_node == np) {
2543 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2544 return smmu;
2545 }
2546 }
2547 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2548 return NULL;
2549}
2550
Robin Murphy7e96c742016-09-14 15:26:46 +01002551static int arm_smmu_match_node(struct device *dev, void *data)
2552{
2553 return dev->of_node == data;
2554}
2555
2556static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2557{
2558 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2559 np, arm_smmu_match_node);
2560 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002561 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002562}
2563
Will Deacon03edb222015-01-19 14:27:33 +00002564static int arm_smmu_add_device(struct device *dev)
2565{
Robin Murphy06e393e2016-09-12 17:13:55 +01002566 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002567 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002568 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002569 int i, ret;
2570
Robin Murphy7e96c742016-09-14 15:26:46 +01002571 if (using_legacy_binding) {
2572 ret = arm_smmu_register_legacy_master(dev, &smmu);
2573 fwspec = dev->iommu_fwspec;
2574 if (ret)
2575 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002576 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002577 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2578 if (!smmu)
2579 return -ENODEV;
2580 } else {
2581 return -ENODEV;
2582 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002583
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002584 ret = arm_smmu_power_on(smmu->pwr);
2585 if (ret)
2586 goto out_free;
2587
Robin Murphyd5b41782016-09-14 15:21:39 +01002588 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002589 for (i = 0; i < fwspec->num_ids; i++) {
2590 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002591 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002592
Robin Murphy06e393e2016-09-12 17:13:55 +01002593 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002594 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002595 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002596 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002597 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002598 if (mask & ~smmu->smr_mask_mask) {
2599 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2600 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002601 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002602 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002603 }
Will Deacon03edb222015-01-19 14:27:33 +00002604
Robin Murphy06e393e2016-09-12 17:13:55 +01002605 ret = -ENOMEM;
2606 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2607 GFP_KERNEL);
2608 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002609 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002610
2611 cfg->smmu = smmu;
2612 fwspec->iommu_priv = cfg;
2613 while (i--)
2614 cfg->smendx[i] = INVALID_SMENDX;
2615
Robin Murphy6668f692016-09-12 17:13:54 +01002616 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002617 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002618 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002619
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002620 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002621 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002622
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002623out_pwr_off:
2624 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002625out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002626 if (fwspec)
2627 kfree(fwspec->iommu_priv);
2628 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002629 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002630}
2631
Will Deacon45ae7cf2013-06-24 18:31:25 +01002632static void arm_smmu_remove_device(struct device *dev)
2633{
Robin Murphy06e393e2016-09-12 17:13:55 +01002634 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002635 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002636
Robin Murphy06e393e2016-09-12 17:13:55 +01002637 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002638 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002639
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002640 smmu = fwspec_smmu(fwspec);
2641 if (arm_smmu_power_on(smmu->pwr)) {
2642 WARN_ON(1);
2643 return;
2644 }
2645
Robin Murphy06e393e2016-09-12 17:13:55 +01002646 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002647 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002648 kfree(fwspec->iommu_priv);
2649 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002650 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002651}
2652
Joerg Roedelaf659932015-10-21 23:51:41 +02002653static struct iommu_group *arm_smmu_device_group(struct device *dev)
2654{
Robin Murphy06e393e2016-09-12 17:13:55 +01002655 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2656 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002657 struct iommu_group *group = NULL;
2658 int i, idx;
2659
Robin Murphy06e393e2016-09-12 17:13:55 +01002660 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002661 if (group && smmu->s2crs[idx].group &&
2662 group != smmu->s2crs[idx].group)
2663 return ERR_PTR(-EINVAL);
2664
2665 group = smmu->s2crs[idx].group;
2666 }
2667
abickett8d352ff2017-09-01 10:29:23 -07002668 if (group)
2669 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02002670
abickett8d352ff2017-09-01 10:29:23 -07002671 if (dev_is_pci(dev))
2672 group = pci_device_group(dev);
2673 else
2674 group = generic_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002675
Joerg Roedelaf659932015-10-21 23:51:41 +02002676 return group;
2677}
2678
Will Deaconc752ce42014-06-25 22:46:31 +01002679static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2680 enum iommu_attr attr, void *data)
2681{
Joerg Roedel1d672632015-03-26 13:43:10 +01002682 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002683 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002684
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002685 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01002686 switch (attr) {
2687 case DOMAIN_ATTR_NESTING:
2688 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002689 ret = 0;
2690 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002691 case DOMAIN_ATTR_PT_BASE_ADDR:
2692 *((phys_addr_t *)data) =
2693 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002694 ret = 0;
2695 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002696 case DOMAIN_ATTR_CONTEXT_BANK:
2697 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002698 if (smmu_domain->smmu == NULL) {
2699 ret = -ENODEV;
2700 break;
2701 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002702 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2703 ret = 0;
2704 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002705 case DOMAIN_ATTR_TTBR0: {
2706 u64 val;
2707 struct arm_smmu_device *smmu = smmu_domain->smmu;
2708 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002709 if (smmu == NULL) {
2710 ret = -ENODEV;
2711 break;
2712 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002713 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2714 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2715 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2716 << (TTBRn_ASID_SHIFT);
2717 *((u64 *)data) = val;
2718 ret = 0;
2719 break;
2720 }
2721 case DOMAIN_ATTR_CONTEXTIDR:
2722 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002723 if (smmu_domain->smmu == NULL) {
2724 ret = -ENODEV;
2725 break;
2726 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002727 *((u32 *)data) = smmu_domain->cfg.procid;
2728 ret = 0;
2729 break;
2730 case DOMAIN_ATTR_PROCID:
2731 *((u32 *)data) = smmu_domain->cfg.procid;
2732 ret = 0;
2733 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002734 case DOMAIN_ATTR_DYNAMIC:
2735 *((int *)data) = !!(smmu_domain->attributes
2736 & (1 << DOMAIN_ATTR_DYNAMIC));
2737 ret = 0;
2738 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002739 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2740 *((int *)data) = !!(smmu_domain->attributes
2741 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2742 ret = 0;
2743 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002744 case DOMAIN_ATTR_S1_BYPASS:
2745 *((int *)data) = !!(smmu_domain->attributes
2746 & (1 << DOMAIN_ATTR_S1_BYPASS));
2747 ret = 0;
2748 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002749 case DOMAIN_ATTR_SECURE_VMID:
2750 *((int *)data) = smmu_domain->secure_vmid;
2751 ret = 0;
2752 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002753 case DOMAIN_ATTR_PGTBL_INFO: {
2754 struct iommu_pgtbl_info *info = data;
2755
2756 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2757 ret = -ENODEV;
2758 break;
2759 }
2760 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2761 ret = 0;
2762 break;
2763 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002764 case DOMAIN_ATTR_FAST:
2765 *((int *)data) = !!(smmu_domain->attributes
2766 & (1 << DOMAIN_ATTR_FAST));
2767 ret = 0;
2768 break;
Patrick Daly1e279922017-09-06 15:57:45 -07002769 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
2770 *((int *)data) = !!(smmu_domain->attributes
2771 & (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
2772 ret = 0;
2773 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002774 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2775 *((int *)data) = !!(smmu_domain->attributes &
2776 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2777 ret = 0;
2778 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002779 case DOMAIN_ATTR_EARLY_MAP:
2780 *((int *)data) = !!(smmu_domain->attributes
2781 & (1 << DOMAIN_ATTR_EARLY_MAP));
2782 ret = 0;
2783 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002784 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002785 if (!smmu_domain->smmu) {
2786 ret = -ENODEV;
2787 break;
2788 }
Liam Mark53cf2342016-12-20 11:36:07 -08002789 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
2790 ret = 0;
2791 break;
2792 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
2793 *((int *)data) = !!(smmu_domain->attributes
2794 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002795 ret = 0;
2796 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05302797 case DOMAIN_ATTR_CB_STALL_DISABLE:
2798 *((int *)data) = !!(smmu_domain->attributes
2799 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
2800 ret = 0;
2801 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002802 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002803 ret = -ENODEV;
2804 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002805 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002806 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002807 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002808}
2809
2810static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2811 enum iommu_attr attr, void *data)
2812{
Will Deacon518f7132014-11-14 17:17:54 +00002813 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002814 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002815
Will Deacon518f7132014-11-14 17:17:54 +00002816 mutex_lock(&smmu_domain->init_mutex);
2817
Will Deaconc752ce42014-06-25 22:46:31 +01002818 switch (attr) {
2819 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002820 if (smmu_domain->smmu) {
2821 ret = -EPERM;
2822 goto out_unlock;
2823 }
2824
Will Deaconc752ce42014-06-25 22:46:31 +01002825 if (*(int *)data)
2826 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2827 else
2828 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2829
Will Deacon518f7132014-11-14 17:17:54 +00002830 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002831 case DOMAIN_ATTR_PROCID:
2832 if (smmu_domain->smmu != NULL) {
2833 dev_err(smmu_domain->smmu->dev,
2834 "cannot change procid attribute while attached\n");
2835 ret = -EBUSY;
2836 break;
2837 }
2838 smmu_domain->cfg.procid = *((u32 *)data);
2839 ret = 0;
2840 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002841 case DOMAIN_ATTR_DYNAMIC: {
2842 int dynamic = *((int *)data);
2843
2844 if (smmu_domain->smmu != NULL) {
2845 dev_err(smmu_domain->smmu->dev,
2846 "cannot change dynamic attribute while attached\n");
2847 ret = -EBUSY;
2848 break;
2849 }
2850
2851 if (dynamic)
2852 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2853 else
2854 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2855 ret = 0;
2856 break;
2857 }
2858 case DOMAIN_ATTR_CONTEXT_BANK:
2859 /* context bank can't be set while attached */
2860 if (smmu_domain->smmu != NULL) {
2861 ret = -EBUSY;
2862 break;
2863 }
2864 /* ... and it can only be set for dynamic contexts. */
2865 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2866 ret = -EINVAL;
2867 break;
2868 }
2869
2870 /* this will be validated during attach */
2871 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2872 ret = 0;
2873 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002874 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2875 u32 non_fatal_faults = *((int *)data);
2876
2877 if (non_fatal_faults)
2878 smmu_domain->attributes |=
2879 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2880 else
2881 smmu_domain->attributes &=
2882 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2883 ret = 0;
2884 break;
2885 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002886 case DOMAIN_ATTR_S1_BYPASS: {
2887 int bypass = *((int *)data);
2888
2889 /* bypass can't be changed while attached */
2890 if (smmu_domain->smmu != NULL) {
2891 ret = -EBUSY;
2892 break;
2893 }
2894 if (bypass)
2895 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2896 else
2897 smmu_domain->attributes &=
2898 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2899
2900 ret = 0;
2901 break;
2902 }
Patrick Daly8befb662016-08-17 20:03:28 -07002903 case DOMAIN_ATTR_ATOMIC:
2904 {
2905 int atomic_ctx = *((int *)data);
2906
2907 /* can't be changed while attached */
2908 if (smmu_domain->smmu != NULL) {
2909 ret = -EBUSY;
2910 break;
2911 }
2912 if (atomic_ctx)
2913 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2914 else
2915 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2916 break;
2917 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002918 case DOMAIN_ATTR_SECURE_VMID:
2919 if (smmu_domain->secure_vmid != VMID_INVAL) {
2920 ret = -ENODEV;
2921 WARN(1, "secure vmid already set!");
2922 break;
2923 }
2924 smmu_domain->secure_vmid = *((int *)data);
2925 break;
Patrick Daly1e279922017-09-06 15:57:45 -07002926 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
2927 if (*((int *)data))
2928 smmu_domain->attributes |=
2929 1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
2930 ret = 0;
2931 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002932 case DOMAIN_ATTR_FAST:
2933 if (*((int *)data))
2934 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2935 ret = 0;
2936 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002937 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2938 /* can't be changed while attached */
2939 if (smmu_domain->smmu != NULL) {
2940 ret = -EBUSY;
2941 break;
2942 }
2943 if (*((int *)data))
2944 smmu_domain->attributes |=
2945 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
2946 ret = 0;
2947 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002948 case DOMAIN_ATTR_EARLY_MAP: {
2949 int early_map = *((int *)data);
2950
2951 ret = 0;
2952 if (early_map) {
2953 smmu_domain->attributes |=
2954 1 << DOMAIN_ATTR_EARLY_MAP;
2955 } else {
2956 if (smmu_domain->smmu)
2957 ret = arm_smmu_enable_s1_translations(
2958 smmu_domain);
2959
2960 if (!ret)
2961 smmu_domain->attributes &=
2962 ~(1 << DOMAIN_ATTR_EARLY_MAP);
2963 }
2964 break;
2965 }
Liam Mark53cf2342016-12-20 11:36:07 -08002966 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
2967 int force_coherent = *((int *)data);
2968
2969 if (smmu_domain->smmu != NULL) {
2970 dev_err(smmu_domain->smmu->dev,
2971 "cannot change force coherent attribute while attached\n");
2972 ret = -EBUSY;
2973 break;
2974 }
2975
2976 if (force_coherent)
2977 smmu_domain->attributes |=
2978 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
2979 else
2980 smmu_domain->attributes &=
2981 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
2982
2983 ret = 0;
2984 break;
2985 }
2986
Charan Teja Reddyc682e472017-04-20 19:11:20 +05302987 case DOMAIN_ATTR_CB_STALL_DISABLE:
2988 if (*((int *)data))
2989 smmu_domain->attributes |=
2990 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
2991 ret = 0;
2992 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002993 default:
Will Deacon518f7132014-11-14 17:17:54 +00002994 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002995 }
Will Deacon518f7132014-11-14 17:17:54 +00002996
2997out_unlock:
2998 mutex_unlock(&smmu_domain->init_mutex);
2999 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003000}
3001
Robin Murphy7e96c742016-09-14 15:26:46 +01003002static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
3003{
3004 u32 fwid = 0;
3005
3006 if (args->args_count > 0)
3007 fwid |= (u16)args->args[0];
3008
3009 if (args->args_count > 1)
3010 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
3011
3012 return iommu_fwspec_add_ids(dev, &fwid, 1);
3013}
3014
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003015static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
3016{
3017 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3018 struct arm_smmu_device *smmu = smmu_domain->smmu;
3019 void __iomem *cb_base;
3020 u32 reg;
3021 int ret;
3022
3023 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3024 ret = arm_smmu_power_on(smmu->pwr);
3025 if (ret)
3026 return ret;
3027
3028 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3029 reg |= SCTLR_M;
3030
3031 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
3032 arm_smmu_power_off(smmu->pwr);
3033 return ret;
3034}
3035
Liam Mark3ba41cf2016-12-09 14:39:04 -08003036static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
3037 dma_addr_t iova)
3038{
3039 bool ret;
3040 unsigned long flags;
3041 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3042 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3043
3044 if (!ops)
3045 return false;
3046
3047 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3048 ret = ops->is_iova_coherent(ops, iova);
3049 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3050 return ret;
3051}
3052
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003053static void arm_smmu_trigger_fault(struct iommu_domain *domain,
3054 unsigned long flags)
3055{
3056 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3057 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3058 struct arm_smmu_device *smmu;
3059 void __iomem *cb_base;
3060
3061 if (!smmu_domain->smmu) {
3062 pr_err("Can't trigger faults on non-attached domains\n");
3063 return;
3064 }
3065
3066 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003067 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003068 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003069
3070 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3071 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
3072 flags, cfg->cbndx);
3073 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07003074 /* give the interrupt time to fire... */
3075 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003076
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003077 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003078}
3079
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003080static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
3081 unsigned long offset)
3082{
3083 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3084 struct arm_smmu_device *smmu;
3085 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3086 void __iomem *cb_base;
3087 unsigned long val;
3088
3089 if (offset >= SZ_4K) {
3090 pr_err("Invalid offset: 0x%lx\n", offset);
3091 return 0;
3092 }
3093
3094 smmu = smmu_domain->smmu;
3095 if (!smmu) {
3096 WARN(1, "Can't read registers of a detached domain\n");
3097 val = 0;
3098 return val;
3099 }
3100
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003101 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003102 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003103
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003104 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3105 val = readl_relaxed(cb_base + offset);
3106
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003107 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003108 return val;
3109}
3110
3111static void arm_smmu_reg_write(struct iommu_domain *domain,
3112 unsigned long offset, unsigned long val)
3113{
3114 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3115 struct arm_smmu_device *smmu;
3116 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3117 void __iomem *cb_base;
3118
3119 if (offset >= SZ_4K) {
3120 pr_err("Invalid offset: 0x%lx\n", offset);
3121 return;
3122 }
3123
3124 smmu = smmu_domain->smmu;
3125 if (!smmu) {
3126 WARN(1, "Can't read registers of a detached domain\n");
3127 return;
3128 }
3129
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003130 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003131 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003132
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003133 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3134 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003135
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003136 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003137}
3138
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003139static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3140{
3141 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
3142}
3143
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003144static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3145{
3146 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3147
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003148 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003149}
3150
3151static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3152{
3153 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3154
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003155 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003156}
3157
Will Deacon518f7132014-11-14 17:17:54 +00003158static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003159 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003160 .domain_alloc = arm_smmu_domain_alloc,
3161 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003162 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003163 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003164 .map = arm_smmu_map,
3165 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003166 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003167 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003168 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003169 .add_device = arm_smmu_add_device,
3170 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003171 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003172 .domain_get_attr = arm_smmu_domain_get_attr,
3173 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003174 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003175 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003176 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003177 .reg_read = arm_smmu_reg_read,
3178 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003179 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003180 .enable_config_clocks = arm_smmu_enable_config_clocks,
3181 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003182 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003183 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003184};
3185
Patrick Dalyad441dd2016-09-15 15:50:46 -07003186#define IMPL_DEF1_MICRO_MMU_CTRL 0
3187#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3188#define MICRO_MMU_CTRL_IDLE (1 << 3)
3189
3190/* Definitions for implementation-defined registers */
3191#define ACTLR_QCOM_OSH_SHIFT 28
3192#define ACTLR_QCOM_OSH 1
3193
3194#define ACTLR_QCOM_ISH_SHIFT 29
3195#define ACTLR_QCOM_ISH 1
3196
3197#define ACTLR_QCOM_NSH_SHIFT 30
3198#define ACTLR_QCOM_NSH 1
3199
3200static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003201{
3202 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003203 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003204
3205 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3206 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3207 0, 30000)) {
3208 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3209 return -EBUSY;
3210 }
3211
3212 return 0;
3213}
3214
Patrick Dalyad441dd2016-09-15 15:50:46 -07003215static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003216{
3217 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3218 u32 reg;
3219
3220 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3221 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3222 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3223
Patrick Dalyad441dd2016-09-15 15:50:46 -07003224 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003225}
3226
Patrick Dalyad441dd2016-09-15 15:50:46 -07003227static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003228{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003229 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003230}
3231
Patrick Dalyad441dd2016-09-15 15:50:46 -07003232static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003233{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003234 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003235}
3236
Patrick Dalyad441dd2016-09-15 15:50:46 -07003237static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003238{
3239 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3240 u32 reg;
3241
3242 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3243 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3244 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3245}
3246
Patrick Dalyad441dd2016-09-15 15:50:46 -07003247static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003248{
3249 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003250 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003251 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003252 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003253
Patrick Dalyad441dd2016-09-15 15:50:46 -07003254 /*
3255 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3256 * to prevent table walks with an inconsistent state.
3257 */
3258 for (i = 0; i < smmu->num_context_banks; ++i) {
3259 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3260 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3261 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3262 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3263 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3264 }
3265
3266 /* Program implementation defined registers */
3267 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003268 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3269 writel_relaxed(regs[i].value,
3270 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003271 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003272}
3273
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003274static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3275 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003276{
3277 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3278 struct arm_smmu_device *smmu = smmu_domain->smmu;
3279 int ret;
3280 phys_addr_t phys = 0;
3281 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003282 u32 sctlr, sctlr_orig, fsr;
3283 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003284
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003285 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003286 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003287 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003288
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003289 spin_lock_irqsave(&smmu->atos_lock, flags);
3290 cb_base = ARM_SMMU_CB_BASE(smmu) +
3291 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003292
3293 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003294 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003295 qsmmuv2_wait_for_halt(smmu);
3296
3297 /* clear FSR to allow ATOS to log any faults */
3298 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3299 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3300
3301 /* disable stall mode momentarily */
3302 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3303 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3304 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3305
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003306 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003307
3308 /* restore SCTLR */
3309 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3310
3311 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003312 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3313
3314 arm_smmu_power_off(smmu_domain->smmu->pwr);
3315 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003316}
3317
3318struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3319 .device_reset = qsmmuv2_device_reset,
3320 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003321};
3322
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003323static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003324{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003325 int i;
3326 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003327 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003328 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003329
Peng Fan3ca37122016-05-03 21:50:30 +08003330 /*
3331 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3332 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3333 * bit is only present in MMU-500r2 onwards.
3334 */
3335 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3336 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3337 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3338 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3339 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3340 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3341 }
3342
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003343 /* Make sure all context banks are disabled and clear CB_FSR */
3344 for (i = 0; i < smmu->num_context_banks; ++i) {
3345 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3346 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3347 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003348 /*
3349 * Disable MMU-500's not-particularly-beneficial next-page
3350 * prefetcher for the sake of errata #841119 and #826419.
3351 */
3352 if (smmu->model == ARM_MMU500) {
3353 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3354 reg &= ~ARM_MMU500_ACTLR_CPRE;
3355 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3356 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003357 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003358}
3359
3360static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3361{
3362 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003363 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003364 u32 reg;
3365
3366 /* clear global FSR */
3367 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3368 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3369
Robin Murphy468f4942016-09-12 17:13:49 +01003370 /*
3371 * Reset stream mapping groups: Initial values mark all SMRn as
3372 * invalid and all S2CRn as bypass unless overridden.
3373 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003374 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3375 for (i = 0; i < smmu->num_mapping_groups; ++i)
3376 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003377
Patrick Daly59b6d202017-06-12 13:12:15 -07003378 arm_smmu_context_bank_reset(smmu);
3379 }
Will Deacon1463fe42013-07-31 19:21:27 +01003380
Will Deacon45ae7cf2013-06-24 18:31:25 +01003381 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003382 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3383 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3384
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003385 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003386
Will Deacon45ae7cf2013-06-24 18:31:25 +01003387 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003388 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003389
3390 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003391 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003392
Robin Murphy25a1c962016-02-10 14:25:33 +00003393 /* Enable client access, handling unmatched streams as appropriate */
3394 reg &= ~sCR0_CLIENTPD;
3395 if (disable_bypass)
3396 reg |= sCR0_USFCFG;
3397 else
3398 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003399
3400 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003401 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003402
3403 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003404 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003405
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003406 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3407 reg |= sCR0_VMID16EN;
3408
Will Deacon45ae7cf2013-06-24 18:31:25 +01003409 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003410 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003411 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003412
3413 /* Manage any implementation defined features */
3414 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003415}
3416
3417static int arm_smmu_id_size_to_bits(int size)
3418{
3419 switch (size) {
3420 case 0:
3421 return 32;
3422 case 1:
3423 return 36;
3424 case 2:
3425 return 40;
3426 case 3:
3427 return 42;
3428 case 4:
3429 return 44;
3430 case 5:
3431 default:
3432 return 48;
3433 }
3434}
3435
Patrick Dalyda688822017-05-17 20:12:48 -07003436
3437/*
3438 * Some context banks needs to be transferred from bootloader to HLOS in a way
3439 * that allows ongoing traffic. The current expectation is that these context
3440 * banks operate in bypass mode.
3441 * Additionally, there must be exactly one device in devicetree with stream-ids
3442 * overlapping those used by the bootloader.
3443 */
3444static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3445 struct arm_smmu_device *smmu,
3446 struct device *dev)
3447{
3448 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003449 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003450 u32 i, idx;
3451 int cb = -EINVAL;
3452 bool dynamic;
3453
Patrick Dalye72526b2017-07-18 16:21:44 -07003454 /*
3455 * Dynamic domains have already set cbndx through domain attribute.
3456 * Verify that they picked a valid value.
3457 */
Patrick Dalyda688822017-05-17 20:12:48 -07003458 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07003459 if (dynamic) {
3460 cb = smmu_domain->cfg.cbndx;
3461 if (cb < smmu->num_context_banks)
3462 return cb;
3463 else
3464 return -EINVAL;
3465 }
Patrick Dalyda688822017-05-17 20:12:48 -07003466
3467 mutex_lock(&smmu->stream_map_mutex);
3468 for_each_cfg_sme(fwspec, i, idx) {
3469 if (smmu->s2crs[idx].cb_handoff)
3470 cb = smmu->s2crs[idx].cbndx;
3471 }
3472
3473 if (cb < 0) {
3474 mutex_unlock(&smmu->stream_map_mutex);
3475 return __arm_smmu_alloc_bitmap(smmu->context_map,
3476 smmu->num_s2_context_banks,
3477 smmu->num_context_banks);
3478 }
3479
3480 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07003481 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Patrick Dalyda688822017-05-17 20:12:48 -07003482 smmu->s2crs[i].cb_handoff = false;
3483 smmu->s2crs[i].count -= 1;
3484 }
3485 }
3486 mutex_unlock(&smmu->stream_map_mutex);
3487
3488 return cb;
3489}
3490
3491static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
3492{
3493 u32 i, raw_smr, raw_s2cr;
3494 struct arm_smmu_smr smr;
3495 struct arm_smmu_s2cr s2cr;
3496
3497 for (i = 0; i < smmu->num_mapping_groups; i++) {
3498 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3499 ARM_SMMU_GR0_SMR(i));
3500 if (!(raw_smr & SMR_VALID))
3501 continue;
3502
3503 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3504 smr.id = (u16)raw_smr;
3505 smr.valid = true;
3506
3507 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3508 ARM_SMMU_GR0_S2CR(i));
Patrick Daly4ea4bdf2017-08-29 19:24:49 -07003509 memset(&s2cr, 0, sizeof(s2cr));
Patrick Dalyda688822017-05-17 20:12:48 -07003510 s2cr.group = NULL;
3511 s2cr.count = 1;
3512 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
3513 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
3514 S2CR_PRIVCFG_MASK;
3515 s2cr.cbndx = (u8)raw_s2cr;
3516 s2cr.cb_handoff = true;
3517
3518 if (s2cr.type != S2CR_TYPE_TRANS)
3519 continue;
3520
3521 smmu->smrs[i] = smr;
3522 smmu->s2crs[i] = s2cr;
3523 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
3524 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
3525 raw_smr, raw_s2cr, s2cr.cbndx);
3526 }
3527
3528 return 0;
3529}
3530
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003531static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3532{
3533 struct device *dev = smmu->dev;
3534 int i, ntuples, ret;
3535 u32 *tuples;
3536 struct arm_smmu_impl_def_reg *regs, *regit;
3537
3538 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3539 return 0;
3540
3541 ntuples /= sizeof(u32);
3542 if (ntuples % 2) {
3543 dev_err(dev,
3544 "Invalid number of attach-impl-defs registers: %d\n",
3545 ntuples);
3546 return -EINVAL;
3547 }
3548
3549 regs = devm_kmalloc(
3550 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3551 GFP_KERNEL);
3552 if (!regs)
3553 return -ENOMEM;
3554
3555 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3556 if (!tuples)
3557 return -ENOMEM;
3558
3559 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3560 tuples, ntuples);
3561 if (ret)
3562 return ret;
3563
3564 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3565 regit->offset = tuples[i];
3566 regit->value = tuples[i + 1];
3567 }
3568
3569 devm_kfree(dev, tuples);
3570
3571 smmu->impl_def_attach_registers = regs;
3572 smmu->num_impl_def_attach_registers = ntuples / 2;
3573
3574 return 0;
3575}
3576
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003577
3578static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003579{
3580 const char *cname;
3581 struct property *prop;
3582 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003583 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003584
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003585 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003586 of_property_count_strings(dev->of_node, "clock-names");
3587
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003588 if (pwr->num_clocks < 1) {
3589 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003590 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003591 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003592
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003593 pwr->clocks = devm_kzalloc(
3594 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003595 GFP_KERNEL);
3596
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003597 if (!pwr->clocks)
3598 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003599
3600 i = 0;
3601 of_property_for_each_string(dev->of_node, "clock-names",
3602 prop, cname) {
3603 struct clk *c = devm_clk_get(dev, cname);
3604
3605 if (IS_ERR(c)) {
3606 dev_err(dev, "Couldn't get clock: %s",
3607 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003608 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003609 }
3610
3611 if (clk_get_rate(c) == 0) {
3612 long rate = clk_round_rate(c, 1000);
3613
3614 clk_set_rate(c, rate);
3615 }
3616
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003617 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003618
3619 ++i;
3620 }
3621 return 0;
3622}
3623
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003624static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003625{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003626 const char *cname;
3627 struct property *prop;
3628 int i, ret = 0;
3629 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003630
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003631 pwr->num_gdscs =
3632 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3633
3634 if (pwr->num_gdscs < 1) {
3635 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003636 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003637 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003638
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003639 pwr->gdscs = devm_kzalloc(
3640 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3641
3642 if (!pwr->gdscs)
3643 return -ENOMEM;
3644
Prakash Guptafad87ca2017-05-16 12:13:02 +05303645 if (!of_property_read_u32(dev->of_node,
3646 "qcom,deferred-regulator-disable-delay",
3647 &(pwr->regulator_defer)))
3648 dev_info(dev, "regulator defer delay %d\n",
3649 pwr->regulator_defer);
3650
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003651 i = 0;
3652 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3653 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07003654 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003655
3656 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3657 return ret;
3658}
3659
3660static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3661{
3662 struct device *dev = pwr->dev;
3663
3664 /* We don't want the bus APIs to print an error message */
3665 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3666 dev_dbg(dev, "No bus scaling info\n");
3667 return 0;
3668 }
3669
3670 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3671 if (!pwr->bus_dt_data) {
3672 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3673 return -EINVAL;
3674 }
3675
3676 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3677 if (!pwr->bus_client) {
3678 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003679 return -EINVAL;
3680 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003681
3682 return 0;
3683}
3684
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003685/*
3686 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3687 */
3688static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3689 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003690{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003691 struct arm_smmu_power_resources *pwr;
3692 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003693
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003694 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3695 if (!pwr)
3696 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003697
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003698 pwr->dev = &pdev->dev;
3699 pwr->pdev = pdev;
3700 mutex_init(&pwr->power_lock);
3701 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003702
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003703 ret = arm_smmu_init_clocks(pwr);
3704 if (ret)
3705 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003706
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003707 ret = arm_smmu_init_regulators(pwr);
3708 if (ret)
3709 return ERR_PTR(ret);
3710
3711 ret = arm_smmu_init_bus_scaling(pwr);
3712 if (ret)
3713 return ERR_PTR(ret);
3714
3715 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003716}
3717
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003718/*
Patrick Dalyabeee952017-04-13 18:14:59 -07003719 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003720 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003721static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003722{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003723 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003724}
3725
Will Deacon45ae7cf2013-06-24 18:31:25 +01003726static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3727{
3728 unsigned long size;
3729 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3730 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003731 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003732 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003733
Mitchel Humpherysba822582015-10-20 11:37:41 -07003734 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3735 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003736 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003737
3738 /* ID0 */
3739 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003740
3741 /* Restrict available stages based on module parameter */
3742 if (force_stage == 1)
3743 id &= ~(ID0_S2TS | ID0_NTS);
3744 else if (force_stage == 2)
3745 id &= ~(ID0_S1TS | ID0_NTS);
3746
Will Deacon45ae7cf2013-06-24 18:31:25 +01003747 if (id & ID0_S1TS) {
3748 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003749 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003750 }
3751
3752 if (id & ID0_S2TS) {
3753 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003754 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003755 }
3756
3757 if (id & ID0_NTS) {
3758 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003759 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003760 }
3761
3762 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003763 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003764 dev_err(smmu->dev, "\tno translation support!\n");
3765 return -ENODEV;
3766 }
3767
Robin Murphyb7862e32016-04-13 18:13:03 +01003768 if ((id & ID0_S1TS) &&
3769 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003770 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003771 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003772 }
3773
Robin Murphybae2c2d2015-07-29 19:46:05 +01003774 /*
3775 * In order for DMA API calls to work properly, we must defer to what
3776 * the DT says about coherency, regardless of what the hardware claims.
3777 * Fortunately, this also opens up a workaround for systems where the
3778 * ID register value has ended up configured incorrectly.
3779 */
3780 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3781 cttw_reg = !!(id & ID0_CTTW);
3782 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003783 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003784 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003785 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003786 cttw_dt ? "" : "non-");
3787 if (cttw_dt != cttw_reg)
3788 dev_notice(smmu->dev,
3789 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003790
Robin Murphy53867802016-09-12 17:13:48 +01003791 /* Max. number of entries we have for stream matching/indexing */
3792 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3793 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003794 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003795 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08003796 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003797
3798 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003799 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3800 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003801 dev_err(smmu->dev,
3802 "stream-matching supported, but no SMRs present!\n");
3803 return -ENODEV;
3804 }
3805
Robin Murphy53867802016-09-12 17:13:48 +01003806 /*
3807 * SMR.ID bits may not be preserved if the corresponding MASK
3808 * bits are set, so check each one separately. We can reject
3809 * masters later if they try to claim IDs outside these masks.
3810 */
Patrick Daly937de532016-12-12 18:44:09 -08003811 for (i = 0; i < size; i++) {
3812 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
3813 if (!(smr & SMR_VALID))
3814 break;
3815 }
3816 if (i == size) {
3817 dev_err(smmu->dev,
3818 "Unable to compute streamid_masks\n");
3819 return -ENODEV;
3820 }
3821
Robin Murphy53867802016-09-12 17:13:48 +01003822 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003823 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3824 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003825 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003826
Robin Murphy53867802016-09-12 17:13:48 +01003827 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003828 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3829 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003830 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003831
Robin Murphy468f4942016-09-12 17:13:49 +01003832 /* Zero-initialised to mark as invalid */
3833 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3834 GFP_KERNEL);
3835 if (!smmu->smrs)
3836 return -ENOMEM;
3837
Robin Murphy53867802016-09-12 17:13:48 +01003838 dev_notice(smmu->dev,
3839 "\tstream matching with %lu register groups, mask 0x%x",
3840 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003841 }
Robin Murphya754fd12016-09-12 17:13:50 +01003842 /* s2cr->type == 0 means translation, so initialise explicitly */
3843 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3844 GFP_KERNEL);
3845 if (!smmu->s2crs)
3846 return -ENOMEM;
3847 for (i = 0; i < size; i++)
3848 smmu->s2crs[i] = s2cr_init_val;
3849
Robin Murphy53867802016-09-12 17:13:48 +01003850 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003851 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003852
Robin Murphy7602b872016-04-28 17:12:09 +01003853 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3854 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3855 if (!(id & ID0_PTFS_NO_AARCH32S))
3856 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3857 }
3858
Will Deacon45ae7cf2013-06-24 18:31:25 +01003859 /* ID1 */
3860 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003861 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003862
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003863 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003864 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003865 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003866 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003867 dev_warn(smmu->dev,
3868 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3869 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003870
Will Deacon518f7132014-11-14 17:17:54 +00003871 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003872 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3873 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3874 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3875 return -ENODEV;
3876 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003877 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003878 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003879 /*
3880 * Cavium CN88xx erratum #27704.
3881 * Ensure ASID and VMID allocation is unique across all SMMUs in
3882 * the system.
3883 */
3884 if (smmu->model == CAVIUM_SMMUV2) {
3885 smmu->cavium_id_base =
3886 atomic_add_return(smmu->num_context_banks,
3887 &cavium_smmu_context_count);
3888 smmu->cavium_id_base -= smmu->num_context_banks;
3889 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003890
3891 /* ID2 */
3892 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3893 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003894 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003895
Will Deacon518f7132014-11-14 17:17:54 +00003896 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003897 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003898 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003899
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003900 if (id & ID2_VMID16)
3901 smmu->features |= ARM_SMMU_FEAT_VMID16;
3902
Robin Murphyf1d84542015-03-04 16:41:05 +00003903 /*
3904 * What the page table walker can address actually depends on which
3905 * descriptor format is in use, but since a) we don't know that yet,
3906 * and b) it can vary per context bank, this will have to do...
3907 */
3908 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3909 dev_warn(smmu->dev,
3910 "failed to set DMA mask for table walker\n");
3911
Robin Murphyb7862e32016-04-13 18:13:03 +01003912 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003913 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003914 if (smmu->version == ARM_SMMU_V1_64K)
3915 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003916 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003917 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003918 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003919 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003920 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003921 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003922 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003923 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003924 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003925 }
3926
Robin Murphy7602b872016-04-28 17:12:09 +01003927 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003928 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003929 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003930 if (smmu->features &
3931 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003932 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003933 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003934 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003935 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003936 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003937
Robin Murphyd5466352016-05-09 17:20:09 +01003938 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3939 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3940 else
3941 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003942 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003943 smmu->pgsize_bitmap);
3944
Will Deacon518f7132014-11-14 17:17:54 +00003945
Will Deacon28d60072014-09-01 16:24:48 +01003946 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003947 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3948 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003949
3950 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003951 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3952 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003953
Will Deacon45ae7cf2013-06-24 18:31:25 +01003954 return 0;
3955}
3956
abickett8d352ff2017-09-01 10:29:23 -07003957static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
3958{
3959 if (!smmu->arch_ops)
3960 return 0;
3961 if (!smmu->arch_ops->init)
3962 return 0;
3963 return smmu->arch_ops->init(smmu);
3964}
3965
3966static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
3967{
3968 if (!smmu->arch_ops)
3969 return;
3970 if (!smmu->arch_ops->device_reset)
3971 return;
3972 return smmu->arch_ops->device_reset(smmu);
3973}
3974
Robin Murphy67b65a32016-04-13 18:12:57 +01003975struct arm_smmu_match_data {
3976 enum arm_smmu_arch_version version;
3977 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003978 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01003979};
3980
Patrick Dalyd7476202016-09-08 18:23:28 -07003981#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
3982static struct arm_smmu_match_data name = { \
3983.version = ver, \
3984.model = imp, \
3985.arch_ops = ops, \
3986} \
Robin Murphy67b65a32016-04-13 18:12:57 +01003987
Patrick Daly1f8a2882016-09-12 17:32:05 -07003988struct arm_smmu_arch_ops qsmmuv500_arch_ops;
3989
Patrick Dalyd7476202016-09-08 18:23:28 -07003990ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
3991ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
3992ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
3993ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
3994ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003995ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07003996ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
3997 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01003998
Joerg Roedel09b52692014-10-02 12:24:45 +02003999static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01004000 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
4001 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
4002 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01004003 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01004004 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01004005 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07004006 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07004007 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01004008 { },
4009};
4010MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
4011
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004012
4013static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
4014{
4015 if (!dev->iommu_fwspec)
4016 of_iommu_configure(dev, dev->of_node);
4017 return 0;
4018}
4019
Patrick Daly000a2f22017-02-13 22:18:12 -08004020static int arm_smmu_add_device_fixup(struct device *dev, void *data)
4021{
4022 struct iommu_ops *ops = data;
4023
4024 ops->add_device(dev);
4025 return 0;
4026}
4027
Patrick Daly1f8a2882016-09-12 17:32:05 -07004028static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004029static int arm_smmu_device_dt_probe(struct platform_device *pdev)
4030{
Robin Murphy67b65a32016-04-13 18:12:57 +01004031 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004032 struct resource *res;
4033 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004034 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01004035 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01004036 bool legacy_binding;
4037
4038 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
4039 if (legacy_binding && !using_generic_binding) {
4040 if (!using_legacy_binding)
4041 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
4042 using_legacy_binding = true;
4043 } else if (!legacy_binding && !using_legacy_binding) {
4044 using_generic_binding = true;
4045 } else {
4046 dev_err(dev, "not probing due to mismatched DT properties\n");
4047 return -ENODEV;
4048 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004049
4050 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4051 if (!smmu) {
4052 dev_err(dev, "failed to allocate arm_smmu_device\n");
4053 return -ENOMEM;
4054 }
4055 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08004056 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07004057 idr_init(&smmu->asid_idr);
4058 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004059
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004060 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01004061 smmu->version = data->version;
4062 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004063 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01004064
Will Deacon45ae7cf2013-06-24 18:31:25 +01004065 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01004066 smmu->base = devm_ioremap_resource(dev, res);
4067 if (IS_ERR(smmu->base))
4068 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004069 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004070
4071 if (of_property_read_u32(dev->of_node, "#global-interrupts",
4072 &smmu->num_global_irqs)) {
4073 dev_err(dev, "missing #global-interrupts property\n");
4074 return -ENODEV;
4075 }
4076
4077 num_irqs = 0;
4078 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
4079 num_irqs++;
4080 if (num_irqs > smmu->num_global_irqs)
4081 smmu->num_context_irqs++;
4082 }
4083
Andreas Herrmann44a08de2013-10-01 13:39:07 +01004084 if (!smmu->num_context_irqs) {
4085 dev_err(dev, "found %d interrupts but expected at least %d\n",
4086 num_irqs, smmu->num_global_irqs + 1);
4087 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004088 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004089
4090 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
4091 GFP_KERNEL);
4092 if (!smmu->irqs) {
4093 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
4094 return -ENOMEM;
4095 }
4096
4097 for (i = 0; i < num_irqs; ++i) {
4098 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07004099
Will Deacon45ae7cf2013-06-24 18:31:25 +01004100 if (irq < 0) {
4101 dev_err(dev, "failed to get irq index %d\n", i);
4102 return -ENODEV;
4103 }
4104 smmu->irqs[i] = irq;
4105 }
4106
Dhaval Patel031d7462015-05-09 14:47:29 -07004107 parse_driver_options(smmu);
4108
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004109 smmu->pwr = arm_smmu_init_power_resources(pdev);
4110 if (IS_ERR(smmu->pwr))
4111 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004112
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004113 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004114 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004115 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004116
4117 err = arm_smmu_device_cfg_probe(smmu);
4118 if (err)
4119 goto out_power_off;
4120
Patrick Dalyda688822017-05-17 20:12:48 -07004121 err = arm_smmu_handoff_cbs(smmu);
4122 if (err)
4123 goto out_power_off;
4124
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004125 err = arm_smmu_parse_impl_def_registers(smmu);
4126 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004127 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004128
Robin Murphyb7862e32016-04-13 18:13:03 +01004129 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004130 smmu->num_context_banks != smmu->num_context_irqs) {
4131 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004132 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4133 smmu->num_context_irqs, smmu->num_context_banks,
4134 smmu->num_context_banks);
4135 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004136 }
4137
Will Deacon45ae7cf2013-06-24 18:31:25 +01004138 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004139 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4140 NULL, arm_smmu_global_fault,
4141 IRQF_ONESHOT | IRQF_SHARED,
4142 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004143 if (err) {
4144 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4145 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004146 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004147 }
4148 }
4149
Patrick Dalyd7476202016-09-08 18:23:28 -07004150 err = arm_smmu_arch_init(smmu);
4151 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004152 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004153
Robin Murphy06e393e2016-09-12 17:13:55 +01004154 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004155 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004156 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004157 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004158
Patrick Daly8e3371a2017-02-13 22:14:53 -08004159 INIT_LIST_HEAD(&smmu->list);
4160 spin_lock(&arm_smmu_devices_lock);
4161 list_add(&smmu->list, &arm_smmu_devices);
4162 spin_unlock(&arm_smmu_devices_lock);
4163
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004164 /* bus_set_iommu depends on this. */
4165 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4166 arm_smmu_of_iommu_configure_fixup);
4167
Robin Murphy7e96c742016-09-14 15:26:46 +01004168 /* Oh, for a proper bus abstraction */
4169 if (!iommu_present(&platform_bus_type))
4170 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004171 else
4172 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4173 arm_smmu_add_device_fixup);
Robin Murphy7e96c742016-09-14 15:26:46 +01004174#ifdef CONFIG_ARM_AMBA
4175 if (!iommu_present(&amba_bustype))
4176 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4177#endif
4178#ifdef CONFIG_PCI
4179 if (!iommu_present(&pci_bus_type)) {
4180 pci_request_acs();
4181 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4182 }
4183#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004184 return 0;
4185
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004186out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004187 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004188
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004189out_exit_power_resources:
4190 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004191
Will Deacon45ae7cf2013-06-24 18:31:25 +01004192 return err;
4193}
4194
4195static int arm_smmu_device_remove(struct platform_device *pdev)
4196{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004197 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004198
4199 if (!smmu)
4200 return -ENODEV;
4201
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004202 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004203 return -EINVAL;
4204
Will Deaconecfadb62013-07-31 19:21:28 +01004205 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004206 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004207
Patrick Dalyc190d932016-08-30 17:23:28 -07004208 idr_destroy(&smmu->asid_idr);
4209
Will Deacon45ae7cf2013-06-24 18:31:25 +01004210 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004211 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004212 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004213
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004214 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004215
Will Deacon45ae7cf2013-06-24 18:31:25 +01004216 return 0;
4217}
4218
Will Deacon45ae7cf2013-06-24 18:31:25 +01004219static struct platform_driver arm_smmu_driver = {
4220 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004221 .name = "arm-smmu",
4222 .of_match_table = of_match_ptr(arm_smmu_of_match),
4223 },
4224 .probe = arm_smmu_device_dt_probe,
4225 .remove = arm_smmu_device_remove,
4226};
4227
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004228static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004229static int __init arm_smmu_init(void)
4230{
Robin Murphy7e96c742016-09-14 15:26:46 +01004231 static bool registered;
4232 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004233
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004234 if (registered)
4235 return 0;
4236
4237 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4238 if (ret)
4239 return ret;
4240
4241 ret = platform_driver_register(&arm_smmu_driver);
4242 registered = !ret;
Robin Murphy7e96c742016-09-14 15:26:46 +01004243 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004244}
4245
4246static void __exit arm_smmu_exit(void)
4247{
4248 return platform_driver_unregister(&arm_smmu_driver);
4249}
4250
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004251subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004252module_exit(arm_smmu_exit);
4253
Robin Murphy7e96c742016-09-14 15:26:46 +01004254static int __init arm_smmu_of_init(struct device_node *np)
4255{
4256 int ret = arm_smmu_init();
4257
4258 if (ret)
4259 return ret;
4260
4261 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4262 return -ENODEV;
4263
4264 return 0;
4265}
4266IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4267IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4268IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4269IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4270IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4271IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004272
Patrick Dalya0fddb62017-03-27 19:26:59 -07004273#define TCU_HW_VERSION_HLOS1 (0x18)
4274
Patrick Daly1f8a2882016-09-12 17:32:05 -07004275#define DEBUG_SID_HALT_REG 0x0
4276#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004277#define DEBUG_SID_HALT_SID_MASK 0x3ff
4278
4279#define DEBUG_VA_ADDR_REG 0x8
4280
4281#define DEBUG_TXN_TRIGG_REG 0x18
4282#define DEBUG_TXN_AXPROT_SHIFT 6
4283#define DEBUG_TXN_AXCACHE_SHIFT 2
4284#define DEBUG_TRX_WRITE (0x1 << 1)
4285#define DEBUG_TXN_READ (0x0 << 1)
4286#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004287
4288#define DEBUG_SR_HALT_ACK_REG 0x20
4289#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004290#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4291
4292#define DEBUG_PAR_REG 0x28
4293#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4294#define DEBUG_PAR_PA_SHIFT 12
4295#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004296
4297#define TBU_DBG_TIMEOUT_US 30000
4298
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004299struct qsmmuv500_archdata {
4300 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004301 void __iomem *tcu_base;
4302 u32 version;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004303};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004304#define get_qsmmuv500_archdata(smmu) \
4305 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004306
Patrick Daly1f8a2882016-09-12 17:32:05 -07004307struct qsmmuv500_tbu_device {
4308 struct list_head list;
4309 struct device *dev;
4310 struct arm_smmu_device *smmu;
4311 void __iomem *base;
4312 void __iomem *status_reg;
4313
4314 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004315 u32 sid_start;
4316 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004317
4318 /* Protects halt count */
4319 spinlock_t halt_lock;
4320 u32 halt_count;
4321};
4322
Patrick Daly1f8a2882016-09-12 17:32:05 -07004323static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
4324{
4325 unsigned long flags;
4326 u32 val;
4327 void __iomem *base;
4328
4329 spin_lock_irqsave(&tbu->halt_lock, flags);
4330 if (tbu->halt_count) {
4331 tbu->halt_count++;
4332 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4333 return 0;
4334 }
4335
4336 base = tbu->base;
4337 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4338 val |= DEBUG_SID_HALT_VAL;
4339 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4340
4341 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
4342 val, (val & DEBUG_SR_HALT_ACK_VAL),
4343 0, TBU_DBG_TIMEOUT_US)) {
4344 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4345 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4346 return -ETIMEDOUT;
4347 }
4348
4349 tbu->halt_count = 1;
4350 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4351 return 0;
4352}
4353
4354static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4355{
4356 unsigned long flags;
4357 u32 val;
4358 void __iomem *base;
4359
4360 spin_lock_irqsave(&tbu->halt_lock, flags);
4361 if (!tbu->halt_count) {
4362 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4363 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4364 return;
4365
4366 } else if (tbu->halt_count > 1) {
4367 tbu->halt_count--;
4368 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4369 return;
4370 }
4371
4372 base = tbu->base;
4373 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4374 val &= ~DEBUG_SID_HALT_VAL;
4375 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4376
4377 tbu->halt_count = 0;
4378 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4379}
4380
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004381static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4382 struct arm_smmu_device *smmu, u32 sid)
4383{
4384 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004385 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004386
4387 list_for_each_entry(tbu, &data->tbus, list) {
4388 if (tbu->sid_start <= sid &&
4389 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004390 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004391 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004392 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004393}
4394
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004395static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
4396 struct qsmmuv500_tbu_device *tbu,
4397 unsigned long *flags)
4398{
4399 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004400 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004401 u32 val;
4402
4403 spin_lock_irqsave(&smmu->atos_lock, *flags);
4404 /* The status register is not accessible on version 1.0 */
4405 if (data->version == 0x01000000)
4406 return 0;
4407
4408 if (readl_poll_timeout_atomic(tbu->status_reg,
4409 val, (val == 0x1), 0,
4410 TBU_DBG_TIMEOUT_US)) {
4411 dev_err(tbu->dev, "ECATS hw busy!\n");
4412 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4413 return -ETIMEDOUT;
4414 }
4415
4416 return 0;
4417}
4418
4419static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
4420 struct qsmmuv500_tbu_device *tbu,
4421 unsigned long *flags)
4422{
4423 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004424 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004425
4426 /* The status register is not accessible on version 1.0 */
4427 if (data->version != 0x01000000)
4428 writel_relaxed(0, tbu->status_reg);
4429 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4430}
4431
4432/*
4433 * Zero means failure.
4434 */
4435static phys_addr_t qsmmuv500_iova_to_phys(
4436 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
4437{
4438 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4439 struct arm_smmu_device *smmu = smmu_domain->smmu;
4440 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4441 struct qsmmuv500_tbu_device *tbu;
4442 int ret;
4443 phys_addr_t phys = 0;
4444 u64 val, fsr;
4445 unsigned long flags;
4446 void __iomem *cb_base;
4447 u32 sctlr_orig, sctlr;
4448 int needs_redo = 0;
4449
4450 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4451 tbu = qsmmuv500_find_tbu(smmu, sid);
4452 if (!tbu)
4453 return 0;
4454
4455 ret = arm_smmu_power_on(tbu->pwr);
4456 if (ret)
4457 return 0;
4458
4459 /*
4460 * Disable client transactions & wait for existing operations to
4461 * complete.
4462 */
4463 ret = qsmmuv500_tbu_halt(tbu);
4464 if (ret)
4465 goto out_power_off;
4466
4467 /* Only one concurrent atos operation */
4468 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
4469 if (ret)
4470 goto out_resume;
4471
4472 /*
4473 * We can be called from an interrupt handler with FSR already set
4474 * so terminate the faulting transaction prior to starting ecats.
4475 * No new racing faults can occur since we in the halted state.
4476 * ECATS can trigger the fault interrupt, so disable it temporarily
4477 * and check for an interrupt manually.
4478 */
4479 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4480 if (fsr & FSR_FAULT) {
4481 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4482 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4483 }
4484 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4485 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4486 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4487
4488redo:
4489 /* Set address and stream-id */
4490 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
4491 val |= sid & DEBUG_SID_HALT_SID_MASK;
4492 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
4493 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
4494
4495 /*
4496 * Write-back Read and Write-Allocate
4497 * Priviledged, nonsecure, data transaction
4498 * Read operation.
4499 */
4500 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
4501 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
4502 val |= DEBUG_TXN_TRIGGER;
4503 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
4504
4505 ret = 0;
4506 if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG,
4507 val, !(val & DEBUG_SR_ECATS_RUNNING_VAL),
4508 0, TBU_DBG_TIMEOUT_US)) {
4509 dev_err(tbu->dev, "ECATS translation timed out!\n");
4510 }
4511
4512 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4513 if (fsr & FSR_FAULT) {
4514 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
4515 val);
4516 ret = -EINVAL;
4517
4518 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
4519 /*
4520 * Clear pending interrupts
4521 * Barrier required to ensure that the FSR is cleared
4522 * before resuming SMMU operation
4523 */
4524 wmb();
4525 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4526 }
4527
4528 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
4529 if (val & DEBUG_PAR_FAULT_VAL) {
4530 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
4531 val);
4532 ret = -EINVAL;
4533 }
4534
4535 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
4536 if (ret < 0)
4537 phys = 0;
4538
4539 /* Reset hardware */
4540 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
4541 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
4542
4543 /*
4544 * After a failed translation, the next successful translation will
4545 * incorrectly be reported as a failure.
4546 */
4547 if (!phys && needs_redo++ < 2)
4548 goto redo;
4549
4550 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4551 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
4552
4553out_resume:
4554 qsmmuv500_tbu_resume(tbu);
4555
4556out_power_off:
4557 arm_smmu_power_off(tbu->pwr);
4558
4559 return phys;
4560}
4561
4562static phys_addr_t qsmmuv500_iova_to_phys_hard(
4563 struct iommu_domain *domain, dma_addr_t iova)
4564{
4565 u16 sid;
4566 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4567 struct iommu_fwspec *fwspec;
4568
4569 /* Select a sid */
4570 fwspec = smmu_domain->dev->iommu_fwspec;
4571 sid = (u16)fwspec->ids[0];
4572
4573 return qsmmuv500_iova_to_phys(domain, iova, sid);
4574}
4575
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004576static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004577{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004578 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004579 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004580 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004581
4582 if (!dev->driver) {
4583 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4584 return -EINVAL;
4585 }
4586
4587 tbu = dev_get_drvdata(dev);
4588
4589 INIT_LIST_HEAD(&tbu->list);
4590 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004591 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004592 return 0;
4593}
4594
4595static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4596{
Patrick Dalya0fddb62017-03-27 19:26:59 -07004597 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004598 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004599 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004600 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004601 int ret;
4602
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004603 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
4604 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004605 return -ENOMEM;
4606
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004607 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07004608
4609 pdev = container_of(dev, struct platform_device, dev);
4610 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
4611 data->tcu_base = devm_ioremap_resource(dev, res);
4612 if (IS_ERR(data->tcu_base))
4613 return PTR_ERR(data->tcu_base);
4614
4615 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004616 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004617
4618 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4619 if (ret)
4620 return ret;
4621
4622 /* Attempt to register child devices */
4623 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
4624 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07004625 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004626
4627 return 0;
4628}
4629
4630struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
4631 .init = qsmmuv500_arch_init,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004632 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly1f8a2882016-09-12 17:32:05 -07004633};
4634
4635static const struct of_device_id qsmmuv500_tbu_of_match[] = {
4636 {.compatible = "qcom,qsmmuv500-tbu"},
4637 {}
4638};
4639
4640static int qsmmuv500_tbu_probe(struct platform_device *pdev)
4641{
4642 struct resource *res;
4643 struct device *dev = &pdev->dev;
4644 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004645 const __be32 *cell;
4646 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004647
4648 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
4649 if (!tbu)
4650 return -ENOMEM;
4651
4652 INIT_LIST_HEAD(&tbu->list);
4653 tbu->dev = dev;
4654 spin_lock_init(&tbu->halt_lock);
4655
4656 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
4657 tbu->base = devm_ioremap_resource(dev, res);
4658 if (IS_ERR(tbu->base))
4659 return PTR_ERR(tbu->base);
4660
4661 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
4662 tbu->status_reg = devm_ioremap_resource(dev, res);
4663 if (IS_ERR(tbu->status_reg))
4664 return PTR_ERR(tbu->status_reg);
4665
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004666 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
4667 if (!cell || len < 8)
4668 return -EINVAL;
4669
4670 tbu->sid_start = of_read_number(cell, 1);
4671 tbu->num_sids = of_read_number(cell + 1, 1);
4672
Patrick Daly1f8a2882016-09-12 17:32:05 -07004673 tbu->pwr = arm_smmu_init_power_resources(pdev);
4674 if (IS_ERR(tbu->pwr))
4675 return PTR_ERR(tbu->pwr);
4676
4677 dev_set_drvdata(dev, tbu);
4678 return 0;
4679}
4680
4681static struct platform_driver qsmmuv500_tbu_driver = {
4682 .driver = {
4683 .name = "qsmmuv500-tbu",
4684 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
4685 },
4686 .probe = qsmmuv500_tbu_probe,
4687};
4688
Will Deacon45ae7cf2013-06-24 18:31:25 +01004689MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
4690MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
4691MODULE_LICENSE("GPL v2");