blob: 333836cccd0fb6e084334785880dd6ce0e61a595 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070050#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070051#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070052#include <linux/msm-bus.h>
53#include <dt-bindings/msm/msm-bus-ids.h>
Patrick Dalyda765c62017-09-11 16:31:07 -070054#include <linux/remote_spinlock.h>
55#include <linux/ktime.h>
56#include <trace/events/iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
58#include <linux/amba/bus.h>
59
Will Deacon518f7132014-11-14 17:17:54 +000060#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010061
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* Maximum number of context banks per SMMU */
63#define ARM_SMMU_MAX_CBS 128
64
Will Deacon45ae7cf2013-06-24 18:31:25 +010065/* SMMU global address space */
66#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010067#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010068
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000069/*
70 * SMMU global address space with conditional offset to access secure
71 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
72 * nsGFSYNR0: 0x450)
73 */
74#define ARM_SMMU_GR0_NS(smmu) \
75 ((smmu)->base + \
76 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
77 ? 0x400 : 0))
78
Robin Murphyf9a05f02016-04-13 18:13:01 +010079/*
80 * Some 64-bit registers only make sense to write atomically, but in such
81 * cases all the data relevant to AArch32 formats lies within the lower word,
82 * therefore this actually makes more sense than it might first appear.
83 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010085#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010086#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010087#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010088#endif
89
Will Deacon45ae7cf2013-06-24 18:31:25 +010090/* Configuration registers */
91#define ARM_SMMU_GR0_sCR0 0x0
92#define sCR0_CLIENTPD (1 << 0)
93#define sCR0_GFRE (1 << 1)
94#define sCR0_GFIE (1 << 2)
95#define sCR0_GCFGFRE (1 << 4)
96#define sCR0_GCFGFIE (1 << 5)
97#define sCR0_USFCFG (1 << 10)
98#define sCR0_VMIDPNE (1 << 11)
99#define sCR0_PTM (1 << 12)
100#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800101#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100102#define sCR0_BSU_SHIFT 14
103#define sCR0_BSU_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700104#define sCR0_SHCFG_SHIFT 22
105#define sCR0_SHCFG_MASK 0x3
106#define sCR0_SHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100107
Peng Fan3ca37122016-05-03 21:50:30 +0800108/* Auxiliary Configuration register */
109#define ARM_SMMU_GR0_sACR 0x10
110
Will Deacon45ae7cf2013-06-24 18:31:25 +0100111/* Identification registers */
112#define ARM_SMMU_GR0_ID0 0x20
113#define ARM_SMMU_GR0_ID1 0x24
114#define ARM_SMMU_GR0_ID2 0x28
115#define ARM_SMMU_GR0_ID3 0x2c
116#define ARM_SMMU_GR0_ID4 0x30
117#define ARM_SMMU_GR0_ID5 0x34
118#define ARM_SMMU_GR0_ID6 0x38
119#define ARM_SMMU_GR0_ID7 0x3c
120#define ARM_SMMU_GR0_sGFSR 0x48
121#define ARM_SMMU_GR0_sGFSYNR0 0x50
122#define ARM_SMMU_GR0_sGFSYNR1 0x54
123#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100124
125#define ID0_S1TS (1 << 30)
126#define ID0_S2TS (1 << 29)
127#define ID0_NTS (1 << 28)
128#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000129#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100130#define ID0_PTFS_NO_AARCH32 (1 << 25)
131#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100132#define ID0_CTTW (1 << 14)
133#define ID0_NUMIRPT_SHIFT 16
134#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700135#define ID0_NUMSIDB_SHIFT 9
136#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100137#define ID0_NUMSMRG_SHIFT 0
138#define ID0_NUMSMRG_MASK 0xff
139
140#define ID1_PAGESIZE (1 << 31)
141#define ID1_NUMPAGENDXB_SHIFT 28
142#define ID1_NUMPAGENDXB_MASK 7
143#define ID1_NUMS2CB_SHIFT 16
144#define ID1_NUMS2CB_MASK 0xff
145#define ID1_NUMCB_SHIFT 0
146#define ID1_NUMCB_MASK 0xff
147
148#define ID2_OAS_SHIFT 4
149#define ID2_OAS_MASK 0xf
150#define ID2_IAS_SHIFT 0
151#define ID2_IAS_MASK 0xf
152#define ID2_UBS_SHIFT 8
153#define ID2_UBS_MASK 0xf
154#define ID2_PTFS_4K (1 << 12)
155#define ID2_PTFS_16K (1 << 13)
156#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800157#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158
Peng Fan3ca37122016-05-03 21:50:30 +0800159#define ID7_MAJOR_SHIFT 4
160#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100161
Will Deacon45ae7cf2013-06-24 18:31:25 +0100162/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100163#define ARM_SMMU_GR0_TLBIVMID 0x64
164#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
165#define ARM_SMMU_GR0_TLBIALLH 0x6c
166#define ARM_SMMU_GR0_sTLBGSYNC 0x70
167#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
168#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800169#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170
171/* Stream mapping registers */
172#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
173#define SMR_VALID (1 << 31)
174#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700175#define SMR_MASK_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100176#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100177
178#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
179#define S2CR_CBNDX_SHIFT 0
180#define S2CR_CBNDX_MASK 0xff
181#define S2CR_TYPE_SHIFT 16
182#define S2CR_TYPE_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700183#define S2CR_SHCFG_SHIFT 8
184#define S2CR_SHCFG_MASK 0x3
185#define S2CR_SHCFG_NSH 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100186enum arm_smmu_s2cr_type {
187 S2CR_TYPE_TRANS,
188 S2CR_TYPE_BYPASS,
189 S2CR_TYPE_FAULT,
190};
191
192#define S2CR_PRIVCFG_SHIFT 24
193#define S2CR_PRIVCFG_MASK 0x3
194enum arm_smmu_s2cr_privcfg {
195 S2CR_PRIVCFG_DEFAULT,
196 S2CR_PRIVCFG_DIPAN,
197 S2CR_PRIVCFG_UNPRIV,
198 S2CR_PRIVCFG_PRIV,
199};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100200
201/* Context bank attribute registers */
202#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
203#define CBAR_VMID_SHIFT 0
204#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000205#define CBAR_S1_BPSHCFG_SHIFT 8
206#define CBAR_S1_BPSHCFG_MASK 3
207#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100208#define CBAR_S1_MEMATTR_SHIFT 12
209#define CBAR_S1_MEMATTR_MASK 0xf
210#define CBAR_S1_MEMATTR_WB 0xf
211#define CBAR_TYPE_SHIFT 16
212#define CBAR_TYPE_MASK 0x3
213#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
214#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
215#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
216#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
217#define CBAR_IRPTNDX_SHIFT 24
218#define CBAR_IRPTNDX_MASK 0xff
219
Shalaj Jain04059c52015-03-03 13:34:59 -0800220#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
221#define CBFRSYNRA_SID_MASK (0xffff)
222
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
224#define CBA2R_RW64_32BIT (0 << 0)
225#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800226#define CBA2R_VMID_SHIFT 16
227#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228
229/* Translation context bank */
230#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100231#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100232
233#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100234#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235#define ARM_SMMU_CB_RESUME 0x8
236#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100237#define ARM_SMMU_CB_TTBR0 0x20
238#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100239#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600240#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000242#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100243#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100244#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700245#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100246#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100247#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000248#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100249#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700250#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000251#define ARM_SMMU_CB_S1_TLBIVAL 0x620
252#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
253#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700254#define ARM_SMMU_CB_TLBSYNC 0x7f0
255#define ARM_SMMU_CB_TLBSTATUS 0x7f4
256#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100257#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000258#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100259
Patrick Daly7f377fe2017-10-06 17:37:10 -0700260#define SCTLR_SHCFG_SHIFT 22
261#define SCTLR_SHCFG_MASK 0x3
262#define SCTLR_SHCFG_NSH 0x3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100263#define SCTLR_S1_ASIDPNE (1 << 12)
264#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530265#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100266#define SCTLR_CFIE (1 << 6)
267#define SCTLR_CFRE (1 << 5)
268#define SCTLR_E (1 << 4)
269#define SCTLR_AFE (1 << 2)
270#define SCTLR_TRE (1 << 1)
271#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100272
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100273#define ARM_MMU500_ACTLR_CPRE (1 << 1)
274
Peng Fan3ca37122016-05-03 21:50:30 +0800275#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
276
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700277#define ARM_SMMU_IMPL_DEF0(smmu) \
278 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
279#define ARM_SMMU_IMPL_DEF1(smmu) \
280 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000281#define CB_PAR_F (1 << 0)
282
283#define ATSR_ACTIVE (1 << 0)
284
Will Deacon45ae7cf2013-06-24 18:31:25 +0100285#define RESUME_RETRY (0 << 0)
286#define RESUME_TERMINATE (1 << 0)
287
Will Deacon45ae7cf2013-06-24 18:31:25 +0100288#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100289#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100290
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100291#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100292
293#define FSR_MULTI (1 << 31)
294#define FSR_SS (1 << 30)
295#define FSR_UUT (1 << 8)
296#define FSR_ASF (1 << 7)
297#define FSR_TLBLKF (1 << 6)
298#define FSR_TLBMCF (1 << 5)
299#define FSR_EF (1 << 4)
300#define FSR_PF (1 << 3)
301#define FSR_AFF (1 << 2)
302#define FSR_TF (1 << 1)
303
Mitchel Humpherys29073202014-07-08 09:52:18 -0700304#define FSR_IGN (FSR_AFF | FSR_ASF | \
305 FSR_TLBMCF | FSR_TLBLKF)
306#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100307 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100308
309#define FSYNR0_WNR (1 << 4)
310
Will Deacon4cf740b2014-07-14 19:47:39 +0100311static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000312module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100313MODULE_PARM_DESC(force_stage,
314 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800315static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000316module_param(disable_bypass, bool, S_IRUGO);
317MODULE_PARM_DESC(disable_bypass,
318 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100319
Robin Murphy09360402014-08-28 17:51:59 +0100320enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100321 ARM_SMMU_V1,
322 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100323 ARM_SMMU_V2,
324};
325
Robin Murphy67b65a32016-04-13 18:12:57 +0100326enum arm_smmu_implementation {
327 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100328 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100329 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700330 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700331 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100332};
333
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700334struct arm_smmu_impl_def_reg {
335 u32 offset;
336 u32 value;
337};
338
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700339/*
340 * attach_count
341 * The SMR and S2CR registers are only programmed when the number of
342 * devices attached to the iommu using these registers is > 0. This
343 * is required for the "SID switch" use case for secure display.
344 * Protected by stream_map_mutex.
345 */
Robin Murphya754fd12016-09-12 17:13:50 +0100346struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100347 struct iommu_group *group;
348 int count;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700349 int attach_count;
Robin Murphya754fd12016-09-12 17:13:50 +0100350 enum arm_smmu_s2cr_type type;
351 enum arm_smmu_s2cr_privcfg privcfg;
352 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700353 bool cb_handoff;
Robin Murphya754fd12016-09-12 17:13:50 +0100354};
355
356#define s2cr_init_val (struct arm_smmu_s2cr){ \
357 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700358 .cb_handoff = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100359}
360
Will Deacon45ae7cf2013-06-24 18:31:25 +0100361struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100362 u16 mask;
363 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100364 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100365};
366
Will Deacona9a1b0b2014-05-01 18:05:08 +0100367struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100368 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100369 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100370};
Robin Murphy468f4942016-09-12 17:13:49 +0100371#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100372#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
373#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000374#define fwspec_smendx(fw, i) \
375 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100376#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000377 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100378
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700379/*
380 * Describes resources required for on/off power operation.
381 * Separate reference count is provided for atomic/nonatomic
382 * operations.
383 */
384struct arm_smmu_power_resources {
385 struct platform_device *pdev;
386 struct device *dev;
387
388 struct clk **clocks;
389 int num_clocks;
390
391 struct regulator_bulk_data *gdscs;
392 int num_gdscs;
393
394 uint32_t bus_client;
395 struct msm_bus_scale_pdata *bus_dt_data;
396
397 /* Protects power_count */
398 struct mutex power_lock;
399 int power_count;
400
401 /* Protects clock_refs_count */
402 spinlock_t clock_refs_lock;
403 int clock_refs_count;
Prakash Guptafad87ca2017-05-16 12:13:02 +0530404 int regulator_defer;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700405};
406
Patrick Daly03330cc2017-08-11 14:56:38 -0700407struct arm_smmu_arch_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100408struct arm_smmu_device {
409 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100410
411 void __iomem *base;
412 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100413 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100414
415#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
416#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
417#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
418#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
419#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000420#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800421#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100422#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
423#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
424#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
425#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
426#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100427 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000428
429#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800430#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Patrick Daly59b6d202017-06-12 13:12:15 -0700431#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700432#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Patrick Daly4423d3e2017-05-04 18:17:51 -0700433#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
Patrick Dalye7069342017-07-11 12:35:55 -0700434#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
Patrick Daly62ba1922017-08-30 16:47:18 -0700435#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 6)
Patrick Dalyda765c62017-09-11 16:31:07 -0700436#define ARM_SMMU_OPT_QCOM_MMU500_ERRATA1 (1 << 7)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000437 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100438 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100439 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100440
441 u32 num_context_banks;
442 u32 num_s2_context_banks;
443 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
444 atomic_t irptndx;
445
446 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100447 u16 streamid_mask;
448 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100449 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100450 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100451 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100452
Will Deacon518f7132014-11-14 17:17:54 +0000453 unsigned long va_size;
454 unsigned long ipa_size;
455 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100456 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100457
458 u32 num_global_irqs;
459 u32 num_context_irqs;
460 unsigned int *irqs;
461
Patrick Daly8e3371a2017-02-13 22:14:53 -0800462 struct list_head list;
463
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800464 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700465 /* Specific to QCOM */
466 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
467 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800468
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700469 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700470
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800471 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700472
473 /* protects idr */
474 struct mutex idr_mutex;
475 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700476
477 struct arm_smmu_arch_ops *arch_ops;
478 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100479};
480
Robin Murphy7602b872016-04-28 17:12:09 +0100481enum arm_smmu_context_fmt {
482 ARM_SMMU_CTX_FMT_NONE,
483 ARM_SMMU_CTX_FMT_AARCH64,
484 ARM_SMMU_CTX_FMT_AARCH32_L,
485 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100486};
487
488struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100489 u8 cbndx;
490 u8 irptndx;
491 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600492 u32 procid;
493 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100494 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100495};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100496#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600497#define INVALID_CBNDX 0xff
498#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700499/*
500 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
501 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
502 */
503#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100504
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600505#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800506#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100507
Will Deaconc752ce42014-06-25 22:46:31 +0100508enum arm_smmu_domain_stage {
509 ARM_SMMU_DOMAIN_S1 = 0,
510 ARM_SMMU_DOMAIN_S2,
511 ARM_SMMU_DOMAIN_NESTED,
512};
513
Patrick Dalyc11d1082016-09-01 15:52:44 -0700514struct arm_smmu_pte_info {
515 void *virt_addr;
516 size_t size;
517 struct list_head entry;
518};
519
Will Deacon45ae7cf2013-06-24 18:31:25 +0100520struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100521 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800522 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000523 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700524 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000525 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100526 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100527 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000528 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700529 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700530 u32 secure_vmid;
531 struct list_head pte_info_list;
532 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700533 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700534 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100535 struct iommu_domain domain;
Patrick Dalyda765c62017-09-11 16:31:07 -0700536
537 bool qsmmuv500_errata1_init;
538 bool qsmmuv500_errata1_client;
Patrick Daly23301482017-10-12 16:18:25 -0700539 bool qsmmuv500_errata2_min_align;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100540};
541
Patrick Daly8e3371a2017-02-13 22:14:53 -0800542static DEFINE_SPINLOCK(arm_smmu_devices_lock);
543static LIST_HEAD(arm_smmu_devices);
544
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000545struct arm_smmu_option_prop {
546 u32 opt;
547 const char *prop;
548};
549
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800550static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
551
Robin Murphy7e96c742016-09-14 15:26:46 +0100552static bool using_legacy_binding, using_generic_binding;
553
Mitchel Humpherys29073202014-07-08 09:52:18 -0700554static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000555 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800556 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700557 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700558 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700559 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700560 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Patrick Daly62ba1922017-08-30 16:47:18 -0700561 { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
Patrick Dalyda765c62017-09-11 16:31:07 -0700562 { ARM_SMMU_OPT_QCOM_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000563 { 0, NULL},
564};
565
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800566static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
567 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700568static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
569 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600570static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800571
Patrick Dalyc11d1082016-09-01 15:52:44 -0700572static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
573static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700574static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700575static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
576
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700577static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
578 dma_addr_t iova);
579
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800580static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
581
Patrick Dalyda688822017-05-17 20:12:48 -0700582static int arm_smmu_alloc_cb(struct iommu_domain *domain,
583 struct arm_smmu_device *smmu,
584 struct device *dev);
Patrick Dalyda765c62017-09-11 16:31:07 -0700585static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops;
Patrick Dalyda688822017-05-17 20:12:48 -0700586
Joerg Roedel1d672632015-03-26 13:43:10 +0100587static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
588{
589 return container_of(dom, struct arm_smmu_domain, domain);
590}
591
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000592static void parse_driver_options(struct arm_smmu_device *smmu)
593{
594 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700595
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000596 do {
597 if (of_property_read_bool(smmu->dev->of_node,
598 arm_smmu_options[i].prop)) {
599 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700600 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000601 arm_smmu_options[i].prop);
602 }
603 } while (arm_smmu_options[++i].opt);
604}
605
Patrick Dalyc190d932016-08-30 17:23:28 -0700606static bool is_dynamic_domain(struct iommu_domain *domain)
607{
608 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
609
610 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
611}
612
Liam Mark53cf2342016-12-20 11:36:07 -0800613static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
614{
615 if (smmu_domain->attributes &
616 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
617 return true;
618 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
619 return smmu_domain->smmu->dev->archdata.dma_coherent;
620 else
621 return false;
622}
623
Patrick Dalye271f212016-10-04 13:24:49 -0700624static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
625{
626 return (smmu_domain->secure_vmid != VMID_INVAL);
627}
628
629static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
630{
631 if (arm_smmu_is_domain_secure(smmu_domain))
632 mutex_lock(&smmu_domain->assign_lock);
633}
634
635static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
636{
637 if (arm_smmu_is_domain_secure(smmu_domain))
638 mutex_unlock(&smmu_domain->assign_lock);
639}
640
Patrick Daly03330cc2017-08-11 14:56:38 -0700641/*
642 * init()
643 * Hook for additional device tree parsing at probe time.
644 *
645 * device_reset()
646 * Hook for one-time architecture-specific register settings.
647 *
648 * iova_to_phys_hard()
649 * Provides debug information. May be called from the context fault irq handler.
650 *
651 * init_context_bank()
652 * Hook for architecture-specific settings which require knowledge of the
653 * dynamically allocated context bank number.
654 *
655 * device_group()
656 * Hook for checking whether a device is compatible with a said group.
657 */
658struct arm_smmu_arch_ops {
659 int (*init)(struct arm_smmu_device *smmu);
660 void (*device_reset)(struct arm_smmu_device *smmu);
661 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
662 dma_addr_t iova);
663 void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
664 struct device *dev);
665 int (*device_group)(struct device *dev, struct iommu_group *group);
666};
667
668static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
669{
670 if (!smmu->arch_ops)
671 return 0;
672 if (!smmu->arch_ops->init)
673 return 0;
674 return smmu->arch_ops->init(smmu);
675}
676
677static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
678{
679 if (!smmu->arch_ops)
680 return;
681 if (!smmu->arch_ops->device_reset)
682 return;
683 return smmu->arch_ops->device_reset(smmu);
684}
685
686static void arm_smmu_arch_init_context_bank(
687 struct arm_smmu_domain *smmu_domain, struct device *dev)
688{
689 struct arm_smmu_device *smmu = smmu_domain->smmu;
690
691 if (!smmu->arch_ops)
692 return;
693 if (!smmu->arch_ops->init_context_bank)
694 return;
695 return smmu->arch_ops->init_context_bank(smmu_domain, dev);
696}
697
698static int arm_smmu_arch_device_group(struct device *dev,
699 struct iommu_group *group)
700{
701 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
702 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
703
704 if (!smmu->arch_ops)
705 return 0;
706 if (!smmu->arch_ops->device_group)
707 return 0;
708 return smmu->arch_ops->device_group(dev, group);
709}
710
Will Deacon8f68f8e2014-07-15 11:27:08 +0100711static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100712{
713 if (dev_is_pci(dev)) {
714 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700715
Will Deacona9a1b0b2014-05-01 18:05:08 +0100716 while (!pci_is_root_bus(bus))
717 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100718 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100719 }
720
Robin Murphyd5b41782016-09-14 15:21:39 +0100721 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100722}
723
Robin Murphyd5b41782016-09-14 15:21:39 +0100724static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100725{
Robin Murphyd5b41782016-09-14 15:21:39 +0100726 *((__be32 *)data) = cpu_to_be32(alias);
727 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100728}
729
Robin Murphyd5b41782016-09-14 15:21:39 +0100730static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100731{
Robin Murphyd5b41782016-09-14 15:21:39 +0100732 struct of_phandle_iterator *it = *(void **)data;
733 struct device_node *np = it->node;
734 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100735
Robin Murphyd5b41782016-09-14 15:21:39 +0100736 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
737 "#stream-id-cells", 0)
738 if (it->node == np) {
739 *(void **)data = dev;
740 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700741 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100742 it->node = np;
743 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100744}
745
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100746static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100747static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100748
Robin Murphy06e393e2016-09-12 17:13:55 +0100749static int arm_smmu_register_legacy_master(struct device *dev,
750 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100751{
Robin Murphy06e393e2016-09-12 17:13:55 +0100752 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100753 struct device_node *np;
754 struct of_phandle_iterator it;
755 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100756 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100757 __be32 pci_sid;
758 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100759
Stephen Boydfecdeef2017-03-01 16:53:19 -0800760 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100761 np = dev_get_dev_node(dev);
762 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
763 of_node_put(np);
764 return -ENODEV;
765 }
766
767 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100768 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
769 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100770 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100771 of_node_put(np);
772 if (err == 0)
773 return -ENODEV;
774 if (err < 0)
775 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100776
Robin Murphyd5b41782016-09-14 15:21:39 +0100777 if (dev_is_pci(dev)) {
778 /* "mmu-masters" assumes Stream ID == Requester ID */
779 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
780 &pci_sid);
781 it.cur = &pci_sid;
782 it.cur_count = 1;
783 }
784
Robin Murphy06e393e2016-09-12 17:13:55 +0100785 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
786 &arm_smmu_ops);
787 if (err)
788 return err;
789
790 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
791 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100792 return -ENOMEM;
793
Robin Murphy06e393e2016-09-12 17:13:55 +0100794 *smmu = dev_get_drvdata(smmu_dev);
795 of_phandle_iterator_args(&it, sids, it.cur_count);
796 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
797 kfree(sids);
798 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100799}
800
801static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
802{
803 int idx;
804
805 do {
806 idx = find_next_zero_bit(map, end, start);
807 if (idx == end)
808 return -ENOSPC;
809 } while (test_and_set_bit(idx, map));
810
811 return idx;
812}
813
814static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
815{
816 clear_bit(idx, map);
817}
818
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700819static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700820{
821 int i, ret = 0;
822
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700823 for (i = 0; i < pwr->num_clocks; ++i) {
824 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700825 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700826 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700827 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700828 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700829 break;
830 }
831 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700832 return ret;
833}
834
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700835static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700836{
837 int i;
838
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700839 for (i = pwr->num_clocks; i; --i)
840 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700841}
842
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700843static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700844{
845 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700846
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700847 for (i = 0; i < pwr->num_clocks; ++i) {
848 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700849 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700850 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700851 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700852 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700853 break;
854 }
855 }
856
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700857 return ret;
858}
Patrick Daly8befb662016-08-17 20:03:28 -0700859
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700860static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
861{
862 int i;
863
864 for (i = pwr->num_clocks; i; --i)
865 clk_disable(pwr->clocks[i - 1]);
866}
867
868static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
869{
870 if (!pwr->bus_client)
871 return 0;
872 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
873}
874
875static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
876{
877 if (!pwr->bus_client)
878 return;
879 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
880}
881
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700882static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
883{
884 struct regulator_bulk_data *consumers;
885 int num_consumers, ret;
886 int i;
887
888 num_consumers = pwr->num_gdscs;
889 consumers = pwr->gdscs;
890 for (i = 0; i < num_consumers; i++) {
891 ret = regulator_enable(consumers[i].consumer);
892 if (ret)
893 goto out;
894 }
895 return 0;
896
897out:
898 i -= 1;
899 for (; i >= 0; i--)
900 regulator_disable(consumers[i].consumer);
901 return ret;
902}
903
Prakash Guptafad87ca2017-05-16 12:13:02 +0530904static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
905{
906 struct regulator_bulk_data *consumers;
907 int i;
908 int num_consumers, ret, r;
909
910 num_consumers = pwr->num_gdscs;
911 consumers = pwr->gdscs;
912 for (i = num_consumers - 1; i >= 0; --i) {
913 ret = regulator_disable_deferred(consumers[i].consumer,
914 pwr->regulator_defer);
915 if (ret != 0)
916 goto err;
917 }
918
919 return 0;
920
921err:
922 pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
923 for (++i; i < num_consumers; ++i) {
924 r = regulator_enable(consumers[i].consumer);
925 if (r != 0)
926 pr_err("Failed to reename %s: %d\n",
927 consumers[i].supply, r);
928 }
929
930 return ret;
931}
932
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700933/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
934static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
935{
936 int ret = 0;
937 unsigned long flags;
938
939 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
940 if (pwr->clock_refs_count > 0) {
941 pwr->clock_refs_count++;
942 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
943 return 0;
944 }
945
946 ret = arm_smmu_enable_clocks(pwr);
947 if (!ret)
948 pwr->clock_refs_count = 1;
949
950 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700951 return ret;
952}
953
954/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700955static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700956{
Patrick Daly8befb662016-08-17 20:03:28 -0700957 unsigned long flags;
958
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700959 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
960 if (pwr->clock_refs_count == 0) {
961 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
962 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
963 return;
964
965 } else if (pwr->clock_refs_count > 1) {
966 pwr->clock_refs_count--;
967 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700968 return;
969 }
970
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700971 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700972
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700973 pwr->clock_refs_count = 0;
974 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700975}
976
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700977static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700978{
979 int ret;
980
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700981 mutex_lock(&pwr->power_lock);
982 if (pwr->power_count > 0) {
983 pwr->power_count += 1;
984 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700985 return 0;
986 }
987
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700988 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700989 if (ret)
990 goto out_unlock;
991
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700992 ret = arm_smmu_enable_regulators(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700993 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700994 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700995
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700996 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700997 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700998 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -0700999
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001000 pwr->power_count = 1;
1001 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001002 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001003
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001004out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001005 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001006out_disable_bus:
1007 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001008out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001009 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001010 return ret;
1011}
1012
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001013static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001014{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001015 mutex_lock(&pwr->power_lock);
1016 if (pwr->power_count == 0) {
1017 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
1018 mutex_unlock(&pwr->power_lock);
1019 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001020
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001021 } else if (pwr->power_count > 1) {
1022 pwr->power_count--;
1023 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001024 return;
1025 }
1026
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001027 arm_smmu_unprepare_clocks(pwr);
Prakash Guptafad87ca2017-05-16 12:13:02 +05301028 arm_smmu_disable_regulators(pwr);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001029 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -07001030 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001031 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001032}
1033
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001034static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001035{
1036 int ret;
1037
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001038 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001039 if (ret)
1040 return ret;
1041
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001042 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001043 if (ret)
1044 goto out_disable;
1045
1046 return 0;
1047
1048out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001049 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001050 return ret;
1051}
1052
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001053static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001054{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001055 arm_smmu_power_off_atomic(pwr);
1056 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001057}
1058
1059/*
1060 * Must be used instead of arm_smmu_power_on if it may be called from
1061 * atomic context
1062 */
1063static int arm_smmu_domain_power_on(struct iommu_domain *domain,
1064 struct arm_smmu_device *smmu)
1065{
1066 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1067 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1068
1069 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001070 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001071
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001072 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001073}
1074
1075/*
1076 * Must be used instead of arm_smmu_power_on if it may be called from
1077 * atomic context
1078 */
1079static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1080 struct arm_smmu_device *smmu)
1081{
1082 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1083 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1084
1085 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001086 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001087 return;
1088 }
1089
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001090 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001091}
1092
Will Deacon45ae7cf2013-06-24 18:31:25 +01001093/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001094static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1095 int cbndx)
1096{
1097 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1098 u32 val;
1099
1100 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1101 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1102 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -07001103 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001104 dev_err(smmu->dev, "TLBSYNC timeout!\n");
1105}
1106
Will Deacon518f7132014-11-14 17:17:54 +00001107static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001108{
1109 int count = 0;
1110 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1111
1112 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1113 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1114 & sTLBGSTATUS_GSACTIVE) {
1115 cpu_relax();
1116 if (++count == TLB_LOOP_TIMEOUT) {
1117 dev_err_ratelimited(smmu->dev,
1118 "TLB sync timed out -- SMMU may be deadlocked\n");
1119 return;
1120 }
1121 udelay(1);
1122 }
1123}
1124
Will Deacon518f7132014-11-14 17:17:54 +00001125static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001126{
Will Deacon518f7132014-11-14 17:17:54 +00001127 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001128 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001129}
1130
Patrick Daly8befb662016-08-17 20:03:28 -07001131/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001132static void arm_smmu_tlb_inv_context(void *cookie)
1133{
1134 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +01001135 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1136 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001137 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001138 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -07001139 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon1463fe42013-07-31 19:21:27 +01001140
Patrick Dalye7069342017-07-11 12:35:55 -07001141 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001142 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001143 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001144 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001145 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001146 } else if (stage1 && use_tlbiall) {
1147 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1148 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1149 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001150 } else {
1151 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001152 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001153 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001154 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001155 }
Will Deacon1463fe42013-07-31 19:21:27 +01001156}
1157
Will Deacon518f7132014-11-14 17:17:54 +00001158static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001159 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001160{
1161 struct arm_smmu_domain *smmu_domain = cookie;
1162 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1163 struct arm_smmu_device *smmu = smmu_domain->smmu;
1164 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1165 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001166 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001167
Patrick Dalye7069342017-07-11 12:35:55 -07001168 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001169 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1170 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1171
Robin Murphy7602b872016-04-28 17:12:09 +01001172 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001173 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001174 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001175 do {
1176 writel_relaxed(iova, reg);
1177 iova += granule;
1178 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001179 } else {
1180 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001181 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001182 do {
1183 writeq_relaxed(iova, reg);
1184 iova += granule >> 12;
1185 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001186 }
Patrick Dalye7069342017-07-11 12:35:55 -07001187 } else if (stage1 && use_tlbiall) {
1188 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1189 reg += ARM_SMMU_CB_S1_TLBIALL;
1190 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001191 } else if (smmu->version == ARM_SMMU_V2) {
1192 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1193 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1194 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001195 iova >>= 12;
1196 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001197 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001198 iova += granule >> 12;
1199 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001200 } else {
1201 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001202 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001203 }
1204}
1205
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001206struct arm_smmu_secure_pool_chunk {
1207 void *addr;
1208 size_t size;
1209 struct list_head list;
1210};
1211
1212static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1213 size_t size)
1214{
1215 struct arm_smmu_secure_pool_chunk *it;
1216
1217 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1218 if (it->size == size) {
1219 void *addr = it->addr;
1220
1221 list_del(&it->list);
1222 kfree(it);
1223 return addr;
1224 }
1225 }
1226
1227 return NULL;
1228}
1229
1230static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1231 void *addr, size_t size)
1232{
1233 struct arm_smmu_secure_pool_chunk *chunk;
1234
1235 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1236 if (!chunk)
1237 return -ENOMEM;
1238
1239 chunk->addr = addr;
1240 chunk->size = size;
1241 memset(addr, 0, size);
1242 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1243
1244 return 0;
1245}
1246
1247static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1248{
1249 struct arm_smmu_secure_pool_chunk *it, *i;
1250
1251 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1252 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1253 /* pages will be freed later (after being unassigned) */
Prakash Gupta8e827be2017-10-04 12:37:11 +05301254 list_del(&it->list);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001255 kfree(it);
1256 }
1257}
1258
Patrick Dalyc11d1082016-09-01 15:52:44 -07001259static void *arm_smmu_alloc_pages_exact(void *cookie,
1260 size_t size, gfp_t gfp_mask)
1261{
1262 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001263 void *page;
1264 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001265
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001266 if (!arm_smmu_is_domain_secure(smmu_domain))
1267 return alloc_pages_exact(size, gfp_mask);
1268
1269 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1270 if (page)
1271 return page;
1272
1273 page = alloc_pages_exact(size, gfp_mask);
1274 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001275 ret = arm_smmu_prepare_pgtable(page, cookie);
1276 if (ret) {
1277 free_pages_exact(page, size);
1278 return NULL;
1279 }
1280 }
1281
1282 return page;
1283}
1284
1285static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1286{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001287 struct arm_smmu_domain *smmu_domain = cookie;
1288
1289 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1290 free_pages_exact(virt, size);
1291 return;
1292 }
1293
1294 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1295 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001296}
1297
Will Deacon518f7132014-11-14 17:17:54 +00001298static struct iommu_gather_ops arm_smmu_gather_ops = {
1299 .tlb_flush_all = arm_smmu_tlb_inv_context,
1300 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1301 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001302 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1303 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001304};
1305
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001306static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1307 dma_addr_t iova, u32 fsr)
1308{
1309 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001310 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyda765c62017-09-11 16:31:07 -07001311 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001312 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001313 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001314
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001315 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyda765c62017-09-11 16:31:07 -07001316 tlb->tlb_flush_all(smmu_domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001317 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001318
Patrick Dalyad441dd2016-09-15 15:50:46 -07001319 if (phys != phys_post_tlbiall) {
1320 dev_err(smmu->dev,
1321 "ATOS results differed across TLBIALL...\n"
1322 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1323 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001324
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001325 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001326}
1327
Will Deacon45ae7cf2013-06-24 18:31:25 +01001328static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1329{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001330 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001331 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001332 unsigned long iova;
1333 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001334 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001335 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1336 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001337 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001338 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001339 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001340 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001341 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001342 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001343 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001344
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001345 static DEFINE_RATELIMIT_STATE(_rs,
1346 DEFAULT_RATELIMIT_INTERVAL,
1347 DEFAULT_RATELIMIT_BURST);
1348
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001349 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001350 if (ret)
1351 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001352
Shalaj Jain04059c52015-03-03 13:34:59 -08001353 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001354 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001355 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1356
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001357 if (!(fsr & FSR_FAULT)) {
1358 ret = IRQ_NONE;
1359 goto out_power_off;
1360 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001361
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001362 if (fatal_asf && (fsr & FSR_ASF)) {
1363 dev_err(smmu->dev,
1364 "Took an address size fault. Refusing to recover.\n");
1365 BUG();
1366 }
1367
Will Deacon45ae7cf2013-06-24 18:31:25 +01001368 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001369 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001370 if (fsr & FSR_TF)
1371 flags |= IOMMU_FAULT_TRANSLATION;
1372 if (fsr & FSR_PF)
1373 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001374 if (fsr & FSR_EF)
1375 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001376 if (fsr & FSR_SS)
1377 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001378
Robin Murphyf9a05f02016-04-13 18:13:01 +01001379 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001380 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001381 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1382 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001383 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1384 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001385 dev_dbg(smmu->dev,
1386 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1387 iova, fsr, fsynr, cfg->cbndx);
1388 dev_dbg(smmu->dev,
1389 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001390 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001391 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001392 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001393 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1394 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001395 if (__ratelimit(&_rs)) {
1396 dev_err(smmu->dev,
1397 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1398 iova, fsr, fsynr, cfg->cbndx);
1399 dev_err(smmu->dev, "FAR = %016lx\n",
1400 (unsigned long)iova);
1401 dev_err(smmu->dev,
1402 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1403 fsr,
1404 (fsr & 0x02) ? "TF " : "",
1405 (fsr & 0x04) ? "AFF " : "",
1406 (fsr & 0x08) ? "PF " : "",
1407 (fsr & 0x10) ? "EF " : "",
1408 (fsr & 0x20) ? "TLBMCF " : "",
1409 (fsr & 0x40) ? "TLBLKF " : "",
1410 (fsr & 0x80) ? "MHF " : "",
1411 (fsr & 0x40000000) ? "SS " : "",
1412 (fsr & 0x80000000) ? "MULTI " : "");
1413 dev_err(smmu->dev,
1414 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001415 if (!phys_soft)
1416 dev_err(smmu->dev,
1417 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1418 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001419 if (phys_atos)
1420 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1421 &phys_atos);
1422 else
1423 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001424 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1425 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001426 ret = IRQ_NONE;
1427 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001428 if (!non_fatal_fault) {
1429 dev_err(smmu->dev,
1430 "Unhandled arm-smmu context fault!\n");
1431 BUG();
1432 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001433 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001434
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001435 /*
1436 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1437 * if stalled. This is required to keep the IOMMU client stalled on
1438 * the outstanding fault. This gives the client a chance to take any
1439 * debug action and then terminate the stalled transaction.
1440 * So, the sequence in case of stall on fault should be:
1441 * 1) Do not clear FSR or write to RESUME here
1442 * 2) Client takes any debug action
1443 * 3) Client terminates the stalled transaction and resumes the IOMMU
1444 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1445 * not before so that the fault remains outstanding. This ensures
1446 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1447 * need to be terminated.
1448 */
1449 if (tmp != -EBUSY) {
1450 /* Clear the faulting FSR */
1451 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001452
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001453 /*
1454 * Barrier required to ensure that the FSR is cleared
1455 * before resuming SMMU operation
1456 */
1457 wmb();
1458
1459 /* Retry or terminate any stalled transactions */
1460 if (fsr & FSR_SS)
1461 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1462 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001463
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001464out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001465 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001466
Patrick Daly5ba28112016-08-30 19:18:52 -07001467 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001468}
1469
1470static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1471{
1472 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1473 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001474 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001475
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001476 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001477 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001478
Will Deacon45ae7cf2013-06-24 18:31:25 +01001479 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1480 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1481 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1482 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1483
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001484 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001485 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001486 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001487 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001488
Will Deacon45ae7cf2013-06-24 18:31:25 +01001489 dev_err_ratelimited(smmu->dev,
1490 "Unexpected global fault, this could be serious\n");
1491 dev_err_ratelimited(smmu->dev,
1492 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1493 gfsr, gfsynr0, gfsynr1, gfsynr2);
1494
1495 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001496 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001497 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001498}
1499
Will Deacon518f7132014-11-14 17:17:54 +00001500static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1501 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001502{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001503 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001504 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001505 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001506 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1507 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001508 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001509
Will Deacon45ae7cf2013-06-24 18:31:25 +01001510 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001511 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1512 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001513
Will Deacon4a1c93c2015-03-04 12:21:03 +00001514 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001515 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1516 reg = CBA2R_RW64_64BIT;
1517 else
1518 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001519 /* 16-bit VMIDs live in CBA2R */
1520 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001521 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001522
Will Deacon4a1c93c2015-03-04 12:21:03 +00001523 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1524 }
1525
Will Deacon45ae7cf2013-06-24 18:31:25 +01001526 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001527 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001528 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001529 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001530
Will Deacon57ca90f2014-02-06 14:59:05 +00001531 /*
1532 * Use the weakest shareability/memory types, so they are
1533 * overridden by the ttbcr/pte.
1534 */
1535 if (stage1) {
1536 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1537 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001538 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1539 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001540 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001541 }
Will Deacon44680ee2014-06-25 11:29:12 +01001542 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001543
Will Deacon518f7132014-11-14 17:17:54 +00001544 /* TTBRs */
1545 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001546 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001547
Robin Murphyb94df6f2016-08-11 17:44:06 +01001548 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1549 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1550 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1551 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1552 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1553 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1554 } else {
1555 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1556 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1557 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1558 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1559 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1560 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1561 }
Will Deacon518f7132014-11-14 17:17:54 +00001562 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001563 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001564 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001565 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001566
Will Deacon518f7132014-11-14 17:17:54 +00001567 /* TTBCR */
1568 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001569 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1570 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1571 reg2 = 0;
1572 } else {
1573 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1574 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1575 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001576 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001577 if (smmu->version > ARM_SMMU_V1)
1578 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001579 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001580 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001581 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001582 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001583
Will Deacon518f7132014-11-14 17:17:54 +00001584 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001585 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001586 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1587 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1588 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1589 } else {
1590 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1591 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1592 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001593 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001594 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001595 }
1596
Will Deacon45ae7cf2013-06-24 18:31:25 +01001597 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001598 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001599
Patrick Daly7f377fe2017-10-06 17:37:10 -07001600 /* Ensure bypass transactions are Non-shareable */
1601 reg |= SCTLR_SHCFG_NSH << SCTLR_SHCFG_SHIFT;
1602
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301603 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
1604 reg &= ~SCTLR_CFCFG;
1605 reg |= SCTLR_HUPCF;
1606 }
1607
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001608 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1609 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1610 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001611 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001612 if (stage1)
1613 reg |= SCTLR_S1_ASIDPNE;
1614#ifdef __BIG_ENDIAN
1615 reg |= SCTLR_E;
1616#endif
Will Deacon25724842013-08-21 13:49:53 +01001617 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001618}
1619
Patrick Dalyc190d932016-08-30 17:23:28 -07001620static int arm_smmu_init_asid(struct iommu_domain *domain,
1621 struct arm_smmu_device *smmu)
1622{
1623 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1624 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1625 bool dynamic = is_dynamic_domain(domain);
1626 int ret;
1627
1628 if (!dynamic) {
1629 cfg->asid = cfg->cbndx + 1;
1630 } else {
1631 mutex_lock(&smmu->idr_mutex);
1632 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1633 smmu->num_context_banks + 2,
1634 MAX_ASID + 1, GFP_KERNEL);
1635
1636 mutex_unlock(&smmu->idr_mutex);
1637 if (ret < 0) {
1638 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1639 ret);
1640 return ret;
1641 }
1642 cfg->asid = ret;
1643 }
1644 return 0;
1645}
1646
1647static void arm_smmu_free_asid(struct iommu_domain *domain)
1648{
1649 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1650 struct arm_smmu_device *smmu = smmu_domain->smmu;
1651 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1652 bool dynamic = is_dynamic_domain(domain);
1653
1654 if (cfg->asid == INVALID_ASID || !dynamic)
1655 return;
1656
1657 mutex_lock(&smmu->idr_mutex);
1658 idr_remove(&smmu->asid_idr, cfg->asid);
1659 mutex_unlock(&smmu->idr_mutex);
1660}
1661
Will Deacon45ae7cf2013-06-24 18:31:25 +01001662static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001663 struct arm_smmu_device *smmu,
1664 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001665{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001666 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001667 unsigned long ias, oas;
1668 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001669 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001670 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001671 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001672 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001673 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001674 bool dynamic;
Patrick Dalyda765c62017-09-11 16:31:07 -07001675 const struct iommu_gather_ops *tlb;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001676
Will Deacon518f7132014-11-14 17:17:54 +00001677 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001678 if (smmu_domain->smmu)
1679 goto out_unlock;
1680
Patrick Dalyc190d932016-08-30 17:23:28 -07001681 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1682 smmu_domain->cfg.asid = INVALID_ASID;
1683
Patrick Dalyc190d932016-08-30 17:23:28 -07001684 dynamic = is_dynamic_domain(domain);
1685 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1686 dev_err(smmu->dev, "dynamic domains not supported\n");
1687 ret = -EPERM;
1688 goto out_unlock;
1689 }
1690
Will Deaconc752ce42014-06-25 22:46:31 +01001691 /*
1692 * Mapping the requested stage onto what we support is surprisingly
1693 * complicated, mainly because the spec allows S1+S2 SMMUs without
1694 * support for nested translation. That means we end up with the
1695 * following table:
1696 *
1697 * Requested Supported Actual
1698 * S1 N S1
1699 * S1 S1+S2 S1
1700 * S1 S2 S2
1701 * S1 S1 S1
1702 * N N N
1703 * N S1+S2 S2
1704 * N S2 S2
1705 * N S1 S1
1706 *
1707 * Note that you can't actually request stage-2 mappings.
1708 */
1709 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1710 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1711 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1712 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1713
Robin Murphy7602b872016-04-28 17:12:09 +01001714 /*
1715 * Choosing a suitable context format is even more fiddly. Until we
1716 * grow some way for the caller to express a preference, and/or move
1717 * the decision into the io-pgtable code where it arguably belongs,
1718 * just aim for the closest thing to the rest of the system, and hope
1719 * that the hardware isn't esoteric enough that we can't assume AArch64
1720 * support to be a superset of AArch32 support...
1721 */
1722 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1723 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001724 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1725 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1726 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1727 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1728 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001729 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1730 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1731 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1732 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1733 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1734
1735 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1736 ret = -EINVAL;
1737 goto out_unlock;
1738 }
1739
Will Deaconc752ce42014-06-25 22:46:31 +01001740 switch (smmu_domain->stage) {
1741 case ARM_SMMU_DOMAIN_S1:
1742 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1743 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001744 ias = smmu->va_size;
1745 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001746 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001747 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001748 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1749 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001750 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001751 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001752 ias = min(ias, 32UL);
1753 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001754 } else {
1755 fmt = ARM_V7S;
1756 ias = min(ias, 32UL);
1757 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001758 }
Will Deaconc752ce42014-06-25 22:46:31 +01001759 break;
1760 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001761 /*
1762 * We will likely want to change this if/when KVM gets
1763 * involved.
1764 */
Will Deaconc752ce42014-06-25 22:46:31 +01001765 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001766 cfg->cbar = CBAR_TYPE_S2_TRANS;
1767 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001768 ias = smmu->ipa_size;
1769 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001770 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001771 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001772 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001773 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001774 ias = min(ias, 40UL);
1775 oas = min(oas, 40UL);
1776 }
Will Deaconc752ce42014-06-25 22:46:31 +01001777 break;
1778 default:
1779 ret = -EINVAL;
1780 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001781 }
1782
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001783 if (is_fast)
1784 fmt = ARM_V8L_FAST;
1785
Patrick Dalyce6786f2016-11-09 14:19:23 -08001786 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1787 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001788 if (is_iommu_pt_coherent(smmu_domain))
1789 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Patrick Daly49ccf332017-09-27 15:10:29 -07001790 if ((quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
1791 (smmu->model == QCOM_SMMUV500))
1792 quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001793
Patrick Dalyda765c62017-09-11 16:31:07 -07001794 tlb = &arm_smmu_gather_ops;
1795 if (smmu->options & ARM_SMMU_OPT_QCOM_MMU500_ERRATA1)
1796 tlb = &qsmmuv500_errata1_smmu_gather_ops;
1797
Patrick Dalyda688822017-05-17 20:12:48 -07001798 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1799 if (ret < 0)
1800 goto out_unlock;
1801 cfg->cbndx = ret;
1802
Robin Murphyb7862e32016-04-13 18:13:03 +01001803 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001804 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1805 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001806 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001807 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001808 }
1809
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001810 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001811 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001812 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001813 .ias = ias,
1814 .oas = oas,
Patrick Dalyda765c62017-09-11 16:31:07 -07001815 .tlb = tlb,
Robin Murphy2df7a252015-07-29 19:46:06 +01001816 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001817 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001818
Will Deacon518f7132014-11-14 17:17:54 +00001819 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001820 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001821 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1822 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001823 if (!pgtbl_ops) {
1824 ret = -ENOMEM;
1825 goto out_clear_smmu;
1826 }
1827
Patrick Dalyc11d1082016-09-01 15:52:44 -07001828 /*
1829 * assign any page table memory that might have been allocated
1830 * during alloc_io_pgtable_ops
1831 */
Patrick Dalye271f212016-10-04 13:24:49 -07001832 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001833 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001834 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001835
Robin Murphyd5466352016-05-09 17:20:09 +01001836 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001837 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001838 domain->geometry.aperture_end = (1UL << ias) - 1;
1839 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001840
Patrick Dalyc190d932016-08-30 17:23:28 -07001841 /* Assign an asid */
1842 ret = arm_smmu_init_asid(domain, smmu);
1843 if (ret)
1844 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001845
Patrick Dalyc190d932016-08-30 17:23:28 -07001846 if (!dynamic) {
1847 /* Initialise the context bank with our page table cfg */
1848 arm_smmu_init_context_bank(smmu_domain,
1849 &smmu_domain->pgtbl_cfg);
1850
Patrick Daly03330cc2017-08-11 14:56:38 -07001851 arm_smmu_arch_init_context_bank(smmu_domain, dev);
1852
Patrick Dalyc190d932016-08-30 17:23:28 -07001853 /*
1854 * Request context fault interrupt. Do this last to avoid the
1855 * handler seeing a half-initialised domain state.
1856 */
1857 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1858 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001859 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1860 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001861 if (ret < 0) {
1862 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1863 cfg->irptndx, irq);
1864 cfg->irptndx = INVALID_IRPTNDX;
1865 goto out_clear_smmu;
1866 }
1867 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001868 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001869 }
Will Deacon518f7132014-11-14 17:17:54 +00001870 mutex_unlock(&smmu_domain->init_mutex);
1871
1872 /* Publish page table ops for map/unmap */
1873 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001874 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001875
Will Deacon518f7132014-11-14 17:17:54 +00001876out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001877 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001878 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001879out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001880 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001881 return ret;
1882}
1883
Patrick Daly77db4f92016-10-14 15:34:10 -07001884static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1885{
1886 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1887 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1888 smmu_domain->secure_vmid = VMID_INVAL;
1889}
1890
Will Deacon45ae7cf2013-06-24 18:31:25 +01001891static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1892{
Joerg Roedel1d672632015-03-26 13:43:10 +01001893 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001894 struct arm_smmu_device *smmu = smmu_domain->smmu;
1895 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001896 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001897 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001898 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001899 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900
Robin Murphy7e96c742016-09-14 15:26:46 +01001901 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001902 return;
1903
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001904 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001905 if (ret) {
1906 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1907 smmu);
1908 return;
1909 }
1910
Patrick Dalyc190d932016-08-30 17:23:28 -07001911 dynamic = is_dynamic_domain(domain);
1912 if (dynamic) {
1913 arm_smmu_free_asid(domain);
1914 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001915 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001916 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001917 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001918 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001919 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001920 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001921 return;
1922 }
1923
Will Deacon518f7132014-11-14 17:17:54 +00001924 /*
1925 * Disable the context bank and free the page tables before freeing
1926 * it.
1927 */
Will Deacon44680ee2014-06-25 11:29:12 +01001928 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001929 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001930
Will Deacon44680ee2014-06-25 11:29:12 +01001931 if (cfg->irptndx != INVALID_IRPTNDX) {
1932 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001933 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001934 }
1935
Markus Elfring44830b02015-11-06 18:32:41 +01001936 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001937 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001938 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001939 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001940 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001941 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001942
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001943 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001944 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001945}
1946
Joerg Roedel1d672632015-03-26 13:43:10 +01001947static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001948{
1949 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001950
Patrick Daly09801312016-08-29 17:02:52 -07001951 /* Do not support DOMAIN_DMA for now */
1952 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001953 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954 /*
1955 * Allocate the domain and initialise some of its data structures.
1956 * We can't really do anything meaningful until we've added a
1957 * master.
1958 */
1959 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1960 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001961 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001962
Robin Murphy7e96c742016-09-14 15:26:46 +01001963 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1964 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001965 kfree(smmu_domain);
1966 return NULL;
1967 }
1968
Will Deacon518f7132014-11-14 17:17:54 +00001969 mutex_init(&smmu_domain->init_mutex);
1970 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001971 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1972 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001973 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001974 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001975 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001976
1977 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001978}
1979
Joerg Roedel1d672632015-03-26 13:43:10 +01001980static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001981{
Joerg Roedel1d672632015-03-26 13:43:10 +01001982 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001983
1984 /*
1985 * Free the domain resources. We assume that all devices have
1986 * already been detached.
1987 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001988 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001990 kfree(smmu_domain);
1991}
1992
Robin Murphy468f4942016-09-12 17:13:49 +01001993static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1994{
1995 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01001996 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01001997
1998 if (smr->valid)
1999 reg |= SMR_VALID;
2000 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2001}
2002
Robin Murphya754fd12016-09-12 17:13:50 +01002003static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
2004{
2005 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
2006 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
2007 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
Patrick Daly7f377fe2017-10-06 17:37:10 -07002008 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT |
2009 S2CR_SHCFG_NSH << S2CR_SHCFG_SHIFT;
Robin Murphya754fd12016-09-12 17:13:50 +01002010
2011 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2012}
2013
2014static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
2015{
2016 arm_smmu_write_s2cr(smmu, idx);
2017 if (smmu->smrs)
2018 arm_smmu_write_smr(smmu, idx);
2019}
2020
Robin Murphy6668f692016-09-12 17:13:54 +01002021static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01002022{
2023 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01002024 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002025
Robin Murphy6668f692016-09-12 17:13:54 +01002026 /* Stream indexing is blissfully easy */
2027 if (!smrs)
2028 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01002029
Robin Murphy6668f692016-09-12 17:13:54 +01002030 /* Validating SMRs is... less so */
2031 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2032 if (!smrs[i].valid) {
2033 /*
2034 * Note the first free entry we come across, which
2035 * we'll claim in the end if nothing else matches.
2036 */
2037 if (free_idx < 0)
2038 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01002039 continue;
2040 }
Robin Murphy6668f692016-09-12 17:13:54 +01002041 /*
2042 * If the new entry is _entirely_ matched by an existing entry,
2043 * then reuse that, with the guarantee that there also cannot
2044 * be any subsequent conflicting entries. In normal use we'd
2045 * expect simply identical entries for this case, but there's
2046 * no harm in accommodating the generalisation.
2047 */
2048 if ((mask & smrs[i].mask) == mask &&
2049 !((id ^ smrs[i].id) & ~smrs[i].mask))
2050 return i;
2051 /*
2052 * If the new entry has any other overlap with an existing one,
2053 * though, then there always exists at least one stream ID
2054 * which would cause a conflict, and we can't allow that risk.
2055 */
2056 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
2057 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002058 }
2059
Robin Murphy6668f692016-09-12 17:13:54 +01002060 return free_idx;
2061}
2062
2063static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
2064{
2065 if (--smmu->s2crs[idx].count)
2066 return false;
2067
2068 smmu->s2crs[idx] = s2cr_init_val;
2069 if (smmu->smrs)
2070 smmu->smrs[idx].valid = false;
2071
2072 return true;
2073}
2074
2075static int arm_smmu_master_alloc_smes(struct device *dev)
2076{
Robin Murphy06e393e2016-09-12 17:13:55 +01002077 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2078 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01002079 struct arm_smmu_device *smmu = cfg->smmu;
2080 struct arm_smmu_smr *smrs = smmu->smrs;
2081 struct iommu_group *group;
2082 int i, idx, ret;
2083
2084 mutex_lock(&smmu->stream_map_mutex);
2085 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01002086 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002087 u16 sid = fwspec->ids[i];
2088 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
2089
Robin Murphy6668f692016-09-12 17:13:54 +01002090 if (idx != INVALID_SMENDX) {
2091 ret = -EEXIST;
2092 goto out_err;
2093 }
2094
Robin Murphy7e96c742016-09-14 15:26:46 +01002095 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01002096 if (ret < 0)
2097 goto out_err;
2098
2099 idx = ret;
2100 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002101 smrs[idx].id = sid;
2102 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01002103 smrs[idx].valid = true;
2104 }
2105 smmu->s2crs[idx].count++;
2106 cfg->smendx[i] = (s16)idx;
2107 }
2108
2109 group = iommu_group_get_for_dev(dev);
2110 if (!group)
2111 group = ERR_PTR(-ENOMEM);
2112 if (IS_ERR(group)) {
2113 ret = PTR_ERR(group);
2114 goto out_err;
2115 }
2116 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01002117
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002118 /* It worked! Don't poke the actual hardware until we've attached */
2119 for_each_cfg_sme(fwspec, i, idx)
Robin Murphy6668f692016-09-12 17:13:54 +01002120 smmu->s2crs[idx].group = group;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002121
Robin Murphy6668f692016-09-12 17:13:54 +01002122 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002123 return 0;
2124
Robin Murphy6668f692016-09-12 17:13:54 +01002125out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01002126 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01002127 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01002128 cfg->smendx[i] = INVALID_SMENDX;
2129 }
Robin Murphy6668f692016-09-12 17:13:54 +01002130 mutex_unlock(&smmu->stream_map_mutex);
2131 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002132}
2133
Robin Murphy06e393e2016-09-12 17:13:55 +01002134static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002135{
Robin Murphy06e393e2016-09-12 17:13:55 +01002136 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
2137 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01002138 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01002139
Robin Murphy6668f692016-09-12 17:13:54 +01002140 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002141 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002142 if (arm_smmu_free_sme(smmu, idx))
2143 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01002144 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002145 }
Robin Murphy6668f692016-09-12 17:13:54 +01002146 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002147}
2148
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002149static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
2150 struct iommu_fwspec *fwspec)
2151{
2152 struct arm_smmu_device *smmu = smmu_domain->smmu;
2153 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2154 int i, idx;
2155 const struct iommu_gather_ops *tlb;
2156
2157 tlb = smmu_domain->pgtbl_cfg.tlb;
2158
2159 mutex_lock(&smmu->stream_map_mutex);
2160 for_each_cfg_sme(fwspec, i, idx) {
2161 WARN_ON(s2cr[idx].attach_count == 0);
2162 s2cr[idx].attach_count -= 1;
2163
2164 if (s2cr[idx].attach_count > 0)
2165 continue;
2166
2167 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2168 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2169 }
2170 mutex_unlock(&smmu->stream_map_mutex);
2171
2172 /* Ensure there are no stale mappings for this context bank */
2173 tlb->tlb_flush_all(smmu_domain);
2174}
2175
Will Deacon45ae7cf2013-06-24 18:31:25 +01002176static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01002177 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002178{
Will Deacon44680ee2014-06-25 11:29:12 +01002179 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002180 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2181 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2182 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002183 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002184
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002185 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002186 for_each_cfg_sme(fwspec, i, idx) {
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002187 if (s2cr[idx].attach_count++ > 0)
Robin Murphy6668f692016-09-12 17:13:54 +01002188 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002189
2190 s2cr[idx].type = type;
2191 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2192 s2cr[idx].cbndx = cbndx;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002193 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002194 }
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002195 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002196
2197 return 0;
2198}
2199
Patrick Daly09801312016-08-29 17:02:52 -07002200static void arm_smmu_detach_dev(struct iommu_domain *domain,
2201 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002202{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002203 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002204 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002205 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly09801312016-08-29 17:02:52 -07002206 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002207 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002208
2209 if (dynamic)
2210 return;
2211
Patrick Daly09801312016-08-29 17:02:52 -07002212 if (!smmu) {
2213 dev_err(dev, "Domain not attached; cannot detach!\n");
2214 return;
2215 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002216
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002217 arm_smmu_domain_remove_master(smmu_domain, fwspec);
2218
Patrick Daly8befb662016-08-17 20:03:28 -07002219 /* Remove additional vote for atomic power */
2220 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002221 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
2222 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002223 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002224}
2225
Patrick Dalye271f212016-10-04 13:24:49 -07002226static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002227{
Patrick Dalye271f212016-10-04 13:24:49 -07002228 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002229 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2230 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2231 int source_vmid = VMID_HLOS;
2232 struct arm_smmu_pte_info *pte_info, *temp;
2233
Patrick Dalye271f212016-10-04 13:24:49 -07002234 if (!arm_smmu_is_domain_secure(smmu_domain))
2235 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002236
Patrick Dalye271f212016-10-04 13:24:49 -07002237 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002238 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2239 PAGE_SIZE, &source_vmid, 1,
2240 dest_vmids, dest_perms, 2);
2241 if (WARN_ON(ret))
2242 break;
2243 }
2244
2245 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2246 entry) {
2247 list_del(&pte_info->entry);
2248 kfree(pte_info);
2249 }
Patrick Dalye271f212016-10-04 13:24:49 -07002250 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002251}
2252
2253static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2254{
2255 int ret;
2256 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002257 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002258 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2259 struct arm_smmu_pte_info *pte_info, *temp;
2260
Patrick Dalye271f212016-10-04 13:24:49 -07002261 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002262 return;
2263
2264 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2265 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2266 PAGE_SIZE, source_vmlist, 2,
2267 &dest_vmids, &dest_perms, 1);
2268 if (WARN_ON(ret))
2269 break;
2270 free_pages_exact(pte_info->virt_addr, pte_info->size);
2271 }
2272
2273 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2274 entry) {
2275 list_del(&pte_info->entry);
2276 kfree(pte_info);
2277 }
2278}
2279
2280static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2281{
2282 struct arm_smmu_domain *smmu_domain = cookie;
2283 struct arm_smmu_pte_info *pte_info;
2284
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002285 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002286
2287 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2288 if (!pte_info)
2289 return;
2290
2291 pte_info->virt_addr = addr;
2292 pte_info->size = size;
2293 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2294}
2295
2296static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2297{
2298 struct arm_smmu_domain *smmu_domain = cookie;
2299 struct arm_smmu_pte_info *pte_info;
2300
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002301 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002302
2303 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2304 if (!pte_info)
2305 return -ENOMEM;
2306 pte_info->virt_addr = addr;
2307 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2308 return 0;
2309}
2310
Will Deacon45ae7cf2013-06-24 18:31:25 +01002311static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2312{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002313 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002314 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002315 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002316 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002317 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002318
Robin Murphy06e393e2016-09-12 17:13:55 +01002319 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002320 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2321 return -ENXIO;
2322 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002323
Robin Murphy4f79b142016-10-17 12:06:21 +01002324 /*
2325 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2326 * domains between of_xlate() and add_device() - we have no way to cope
2327 * with that, so until ARM gets converted to rely on groups and default
2328 * domains, just say no (but more politely than by dereferencing NULL).
2329 * This should be at least a WARN_ON once that's sorted.
2330 */
2331 if (!fwspec->iommu_priv)
2332 return -ENODEV;
2333
Robin Murphy06e393e2016-09-12 17:13:55 +01002334 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002335
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002336 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002337 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002338 if (ret)
2339 return ret;
2340
Will Deacon518f7132014-11-14 17:17:54 +00002341 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002342 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002343 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002344 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002345
Patrick Dalyc190d932016-08-30 17:23:28 -07002346 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002347 if (is_dynamic_domain(domain)) {
2348 ret = 0;
2349 goto out_power_off;
2350 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002351
Will Deacon45ae7cf2013-06-24 18:31:25 +01002352 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002353 * Sanity check the domain. We don't support domains across
2354 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002355 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002356 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002357 dev_err(dev,
2358 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002359 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002360 ret = -EINVAL;
2361 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002362 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002363
2364 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002365 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002366
2367out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002368 /*
2369 * Keep an additional vote for non-atomic power until domain is
2370 * detached
2371 */
2372 if (!ret && atomic_domain) {
2373 WARN_ON(arm_smmu_power_on(smmu->pwr));
2374 arm_smmu_power_off_atomic(smmu->pwr);
2375 }
2376
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002377 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002378
Will Deacon45ae7cf2013-06-24 18:31:25 +01002379 return ret;
2380}
2381
Will Deacon45ae7cf2013-06-24 18:31:25 +01002382static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002383 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002384{
Will Deacon518f7132014-11-14 17:17:54 +00002385 int ret;
2386 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002387 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002388 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002389
Will Deacon518f7132014-11-14 17:17:54 +00002390 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002391 return -ENODEV;
2392
Patrick Dalye271f212016-10-04 13:24:49 -07002393 arm_smmu_secure_domain_lock(smmu_domain);
2394
Will Deacon518f7132014-11-14 17:17:54 +00002395 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2396 ret = ops->map(ops, iova, paddr, size, prot);
2397 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002398
2399 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002400 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002401
Will Deacon518f7132014-11-14 17:17:54 +00002402 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002403}
2404
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002405static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2406 dma_addr_t iova)
2407{
2408 uint64_t ret;
2409 unsigned long flags;
2410 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2411 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2412
2413 if (!ops)
2414 return 0;
2415
2416 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2417 ret = ops->iova_to_pte(ops, iova);
2418 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2419 return ret;
2420}
2421
Will Deacon45ae7cf2013-06-24 18:31:25 +01002422static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2423 size_t size)
2424{
Will Deacon518f7132014-11-14 17:17:54 +00002425 size_t ret;
2426 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002427 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002428 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002429
Will Deacon518f7132014-11-14 17:17:54 +00002430 if (!ops)
2431 return 0;
2432
Patrick Daly8befb662016-08-17 20:03:28 -07002433 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002434 if (ret)
2435 return ret;
2436
Patrick Dalye271f212016-10-04 13:24:49 -07002437 arm_smmu_secure_domain_lock(smmu_domain);
2438
Will Deacon518f7132014-11-14 17:17:54 +00002439 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2440 ret = ops->unmap(ops, iova, size);
2441 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002442
Patrick Daly8befb662016-08-17 20:03:28 -07002443 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002444 /*
2445 * While splitting up block mappings, we might allocate page table
2446 * memory during unmap, so the vmids needs to be assigned to the
2447 * memory here as well.
2448 */
2449 arm_smmu_assign_table(smmu_domain);
2450 /* Also unassign any pages that were free'd during unmap */
2451 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002452 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002453 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002454}
2455
Patrick Daly88d321d2017-02-09 18:02:13 -08002456#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002457static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2458 struct scatterlist *sg, unsigned int nents, int prot)
2459{
2460 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002461 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002462 unsigned long flags;
2463 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2464 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002465 unsigned int idx_start, idx_end;
2466 struct scatterlist *sg_start, *sg_end;
2467 unsigned long __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002468
2469 if (!ops)
2470 return -ENODEV;
2471
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002472 arm_smmu_secure_domain_lock(smmu_domain);
2473
Patrick Daly88d321d2017-02-09 18:02:13 -08002474 __saved_iova_start = iova;
2475 idx_start = idx_end = 0;
2476 sg_start = sg_end = sg;
2477 while (idx_end < nents) {
2478 batch_size = sg_end->length;
2479 sg_end = sg_next(sg_end);
2480 idx_end++;
2481 while ((idx_end < nents) &&
2482 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002483
Patrick Daly88d321d2017-02-09 18:02:13 -08002484 batch_size += sg_end->length;
2485 sg_end = sg_next(sg_end);
2486 idx_end++;
2487 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002488
Patrick Daly88d321d2017-02-09 18:02:13 -08002489 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2490 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2491 prot, &size);
2492 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2493 /* Returns 0 on error */
2494 if (!ret) {
2495 size_to_unmap = iova + size - __saved_iova_start;
2496 goto out;
2497 }
2498
2499 iova += batch_size;
2500 idx_start = idx_end;
2501 sg_start = sg_end;
2502 }
2503
2504out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002505 arm_smmu_assign_table(smmu_domain);
2506
Patrick Daly88d321d2017-02-09 18:02:13 -08002507 if (size_to_unmap) {
2508 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2509 iova = __saved_iova_start;
2510 }
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002511 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly88d321d2017-02-09 18:02:13 -08002512 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002513}
2514
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002515static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002516 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002517{
Joerg Roedel1d672632015-03-26 13:43:10 +01002518 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002519 struct arm_smmu_device *smmu = smmu_domain->smmu;
2520 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2521 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2522 struct device *dev = smmu->dev;
2523 void __iomem *cb_base;
2524 u32 tmp;
2525 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002526 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002527
2528 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2529
Robin Murphy661d9622015-05-27 17:09:34 +01002530 /* ATS1 registers can only be written atomically */
2531 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002532 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002533 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2534 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002535 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002536
2537 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2538 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002539 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002540 dev_err(dev,
2541 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2542 &iova, &phys);
2543 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002544 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002545 }
2546
Robin Murphyf9a05f02016-04-13 18:13:01 +01002547 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002548 if (phys & CB_PAR_F) {
2549 dev_err(dev, "translation fault!\n");
2550 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002551 phys = 0;
2552 } else {
2553 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002554 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002555
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002556 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002557}
2558
Will Deacon45ae7cf2013-06-24 18:31:25 +01002559static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002560 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002561{
Will Deacon518f7132014-11-14 17:17:54 +00002562 phys_addr_t ret;
2563 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002564 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002565 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002566
Will Deacon518f7132014-11-14 17:17:54 +00002567 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002568 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002569
Will Deacon518f7132014-11-14 17:17:54 +00002570 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002571 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002572 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002573
Will Deacon518f7132014-11-14 17:17:54 +00002574 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002575}
2576
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002577/*
2578 * This function can sleep, and cannot be called from atomic context. Will
2579 * power on register block if required. This restriction does not apply to the
2580 * original iova_to_phys() op.
2581 */
2582static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2583 dma_addr_t iova)
2584{
2585 phys_addr_t ret = 0;
2586 unsigned long flags;
2587 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly62ba1922017-08-30 16:47:18 -07002588 struct arm_smmu_device *smmu = smmu_domain->smmu;
2589
2590 if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS)
2591 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002592
Patrick Dalyad441dd2016-09-15 15:50:46 -07002593 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002594 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2595 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002596 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002597 return ret;
2598 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002599
2600 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2601 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2602 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002603 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002604
2605 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2606
2607 return ret;
2608}
2609
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002610static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002611{
Will Deacond0948942014-06-24 17:30:10 +01002612 switch (cap) {
2613 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002614 /*
2615 * Return true here as the SMMU can always send out coherent
2616 * requests.
2617 */
2618 return true;
Will Deacond0948942014-06-24 17:30:10 +01002619 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002620 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002621 case IOMMU_CAP_NOEXEC:
2622 return true;
Will Deacond0948942014-06-24 17:30:10 +01002623 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002624 return false;
Will Deacond0948942014-06-24 17:30:10 +01002625 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002626}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002627
Patrick Daly8e3371a2017-02-13 22:14:53 -08002628static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2629{
2630 struct arm_smmu_device *smmu;
2631 unsigned long flags;
2632
2633 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2634 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2635 if (smmu->dev->of_node == np) {
2636 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2637 return smmu;
2638 }
2639 }
2640 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2641 return NULL;
2642}
2643
Robin Murphy7e96c742016-09-14 15:26:46 +01002644static int arm_smmu_match_node(struct device *dev, void *data)
2645{
2646 return dev->of_node == data;
2647}
2648
2649static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2650{
2651 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2652 np, arm_smmu_match_node);
2653 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002654 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002655}
2656
Will Deacon03edb222015-01-19 14:27:33 +00002657static int arm_smmu_add_device(struct device *dev)
2658{
Robin Murphy06e393e2016-09-12 17:13:55 +01002659 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002660 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002661 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002662 int i, ret;
2663
Robin Murphy7e96c742016-09-14 15:26:46 +01002664 if (using_legacy_binding) {
2665 ret = arm_smmu_register_legacy_master(dev, &smmu);
2666 fwspec = dev->iommu_fwspec;
2667 if (ret)
2668 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002669 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002670 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2671 if (!smmu)
2672 return -ENODEV;
2673 } else {
2674 return -ENODEV;
2675 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002676
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002677 ret = arm_smmu_power_on(smmu->pwr);
2678 if (ret)
2679 goto out_free;
2680
Robin Murphyd5b41782016-09-14 15:21:39 +01002681 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002682 for (i = 0; i < fwspec->num_ids; i++) {
2683 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002684 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002685
Robin Murphy06e393e2016-09-12 17:13:55 +01002686 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002687 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002688 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002689 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002690 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002691 if (mask & ~smmu->smr_mask_mask) {
2692 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2693 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002694 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002695 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002696 }
Will Deacon03edb222015-01-19 14:27:33 +00002697
Robin Murphy06e393e2016-09-12 17:13:55 +01002698 ret = -ENOMEM;
2699 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2700 GFP_KERNEL);
2701 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002702 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002703
2704 cfg->smmu = smmu;
2705 fwspec->iommu_priv = cfg;
2706 while (i--)
2707 cfg->smendx[i] = INVALID_SMENDX;
2708
Robin Murphy6668f692016-09-12 17:13:54 +01002709 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002710 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002711 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002712
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002713 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002714 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002715
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002716out_pwr_off:
2717 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002718out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002719 if (fwspec)
2720 kfree(fwspec->iommu_priv);
2721 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002722 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002723}
2724
Will Deacon45ae7cf2013-06-24 18:31:25 +01002725static void arm_smmu_remove_device(struct device *dev)
2726{
Robin Murphy06e393e2016-09-12 17:13:55 +01002727 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002728 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002729
Robin Murphy06e393e2016-09-12 17:13:55 +01002730 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002731 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002732
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002733 smmu = fwspec_smmu(fwspec);
2734 if (arm_smmu_power_on(smmu->pwr)) {
2735 WARN_ON(1);
2736 return;
2737 }
2738
Robin Murphy06e393e2016-09-12 17:13:55 +01002739 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002740 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002741 kfree(fwspec->iommu_priv);
2742 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002743 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002744}
2745
Joerg Roedelaf659932015-10-21 23:51:41 +02002746static struct iommu_group *arm_smmu_device_group(struct device *dev)
2747{
Robin Murphy06e393e2016-09-12 17:13:55 +01002748 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2749 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002750 struct iommu_group *group = NULL;
2751 int i, idx;
2752
Robin Murphy06e393e2016-09-12 17:13:55 +01002753 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002754 if (group && smmu->s2crs[idx].group &&
2755 group != smmu->s2crs[idx].group)
2756 return ERR_PTR(-EINVAL);
2757
2758 group = smmu->s2crs[idx].group;
2759 }
2760
Patrick Daly03330cc2017-08-11 14:56:38 -07002761 if (!group) {
2762 if (dev_is_pci(dev))
2763 group = pci_device_group(dev);
2764 else
2765 group = generic_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002766
Patrick Daly03330cc2017-08-11 14:56:38 -07002767 if (IS_ERR(group))
2768 return NULL;
2769 }
2770
2771 if (arm_smmu_arch_device_group(dev, group)) {
2772 iommu_group_put(group);
2773 return ERR_PTR(-EINVAL);
2774 }
Joerg Roedelaf659932015-10-21 23:51:41 +02002775
Joerg Roedelaf659932015-10-21 23:51:41 +02002776 return group;
2777}
2778
Will Deaconc752ce42014-06-25 22:46:31 +01002779static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2780 enum iommu_attr attr, void *data)
2781{
Joerg Roedel1d672632015-03-26 13:43:10 +01002782 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002783 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002784
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002785 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01002786 switch (attr) {
2787 case DOMAIN_ATTR_NESTING:
2788 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002789 ret = 0;
2790 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002791 case DOMAIN_ATTR_PT_BASE_ADDR:
2792 *((phys_addr_t *)data) =
2793 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002794 ret = 0;
2795 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002796 case DOMAIN_ATTR_CONTEXT_BANK:
2797 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002798 if (smmu_domain->smmu == NULL) {
2799 ret = -ENODEV;
2800 break;
2801 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002802 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2803 ret = 0;
2804 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002805 case DOMAIN_ATTR_TTBR0: {
2806 u64 val;
2807 struct arm_smmu_device *smmu = smmu_domain->smmu;
2808 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002809 if (smmu == NULL) {
2810 ret = -ENODEV;
2811 break;
2812 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002813 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2814 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2815 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2816 << (TTBRn_ASID_SHIFT);
2817 *((u64 *)data) = val;
2818 ret = 0;
2819 break;
2820 }
2821 case DOMAIN_ATTR_CONTEXTIDR:
2822 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002823 if (smmu_domain->smmu == NULL) {
2824 ret = -ENODEV;
2825 break;
2826 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002827 *((u32 *)data) = smmu_domain->cfg.procid;
2828 ret = 0;
2829 break;
2830 case DOMAIN_ATTR_PROCID:
2831 *((u32 *)data) = smmu_domain->cfg.procid;
2832 ret = 0;
2833 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002834 case DOMAIN_ATTR_DYNAMIC:
2835 *((int *)data) = !!(smmu_domain->attributes
2836 & (1 << DOMAIN_ATTR_DYNAMIC));
2837 ret = 0;
2838 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002839 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2840 *((int *)data) = !!(smmu_domain->attributes
2841 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2842 ret = 0;
2843 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002844 case DOMAIN_ATTR_S1_BYPASS:
2845 *((int *)data) = !!(smmu_domain->attributes
2846 & (1 << DOMAIN_ATTR_S1_BYPASS));
2847 ret = 0;
2848 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002849 case DOMAIN_ATTR_SECURE_VMID:
2850 *((int *)data) = smmu_domain->secure_vmid;
2851 ret = 0;
2852 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002853 case DOMAIN_ATTR_PGTBL_INFO: {
2854 struct iommu_pgtbl_info *info = data;
2855
2856 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2857 ret = -ENODEV;
2858 break;
2859 }
2860 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2861 ret = 0;
2862 break;
2863 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002864 case DOMAIN_ATTR_FAST:
2865 *((int *)data) = !!(smmu_domain->attributes
2866 & (1 << DOMAIN_ATTR_FAST));
2867 ret = 0;
2868 break;
Patrick Daly1e279922017-09-06 15:57:45 -07002869 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
2870 *((int *)data) = !!(smmu_domain->attributes
2871 & (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
2872 ret = 0;
2873 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002874 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2875 *((int *)data) = !!(smmu_domain->attributes &
2876 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2877 ret = 0;
2878 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002879 case DOMAIN_ATTR_EARLY_MAP:
2880 *((int *)data) = !!(smmu_domain->attributes
2881 & (1 << DOMAIN_ATTR_EARLY_MAP));
2882 ret = 0;
2883 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002884 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002885 if (!smmu_domain->smmu) {
2886 ret = -ENODEV;
2887 break;
2888 }
Liam Mark53cf2342016-12-20 11:36:07 -08002889 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
2890 ret = 0;
2891 break;
2892 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
2893 *((int *)data) = !!(smmu_domain->attributes
2894 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002895 ret = 0;
2896 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05302897 case DOMAIN_ATTR_CB_STALL_DISABLE:
2898 *((int *)data) = !!(smmu_domain->attributes
2899 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
2900 ret = 0;
2901 break;
Patrick Daly23301482017-10-12 16:18:25 -07002902 case DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_ALIGN:
2903 *((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
2904 ret = 0;
2905 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002906 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002907 ret = -ENODEV;
2908 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002909 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002910 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002911 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002912}
2913
2914static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2915 enum iommu_attr attr, void *data)
2916{
Will Deacon518f7132014-11-14 17:17:54 +00002917 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002918 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002919
Will Deacon518f7132014-11-14 17:17:54 +00002920 mutex_lock(&smmu_domain->init_mutex);
2921
Will Deaconc752ce42014-06-25 22:46:31 +01002922 switch (attr) {
2923 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002924 if (smmu_domain->smmu) {
2925 ret = -EPERM;
2926 goto out_unlock;
2927 }
2928
Will Deaconc752ce42014-06-25 22:46:31 +01002929 if (*(int *)data)
2930 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2931 else
2932 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2933
Will Deacon518f7132014-11-14 17:17:54 +00002934 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002935 case DOMAIN_ATTR_PROCID:
2936 if (smmu_domain->smmu != NULL) {
2937 dev_err(smmu_domain->smmu->dev,
2938 "cannot change procid attribute while attached\n");
2939 ret = -EBUSY;
2940 break;
2941 }
2942 smmu_domain->cfg.procid = *((u32 *)data);
2943 ret = 0;
2944 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002945 case DOMAIN_ATTR_DYNAMIC: {
2946 int dynamic = *((int *)data);
2947
2948 if (smmu_domain->smmu != NULL) {
2949 dev_err(smmu_domain->smmu->dev,
2950 "cannot change dynamic attribute while attached\n");
2951 ret = -EBUSY;
2952 break;
2953 }
2954
2955 if (dynamic)
2956 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2957 else
2958 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2959 ret = 0;
2960 break;
2961 }
2962 case DOMAIN_ATTR_CONTEXT_BANK:
2963 /* context bank can't be set while attached */
2964 if (smmu_domain->smmu != NULL) {
2965 ret = -EBUSY;
2966 break;
2967 }
2968 /* ... and it can only be set for dynamic contexts. */
2969 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2970 ret = -EINVAL;
2971 break;
2972 }
2973
2974 /* this will be validated during attach */
2975 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2976 ret = 0;
2977 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002978 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2979 u32 non_fatal_faults = *((int *)data);
2980
2981 if (non_fatal_faults)
2982 smmu_domain->attributes |=
2983 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2984 else
2985 smmu_domain->attributes &=
2986 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2987 ret = 0;
2988 break;
2989 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002990 case DOMAIN_ATTR_S1_BYPASS: {
2991 int bypass = *((int *)data);
2992
2993 /* bypass can't be changed while attached */
2994 if (smmu_domain->smmu != NULL) {
2995 ret = -EBUSY;
2996 break;
2997 }
2998 if (bypass)
2999 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
3000 else
3001 smmu_domain->attributes &=
3002 ~(1 << DOMAIN_ATTR_S1_BYPASS);
3003
3004 ret = 0;
3005 break;
3006 }
Patrick Daly8befb662016-08-17 20:03:28 -07003007 case DOMAIN_ATTR_ATOMIC:
3008 {
3009 int atomic_ctx = *((int *)data);
3010
3011 /* can't be changed while attached */
3012 if (smmu_domain->smmu != NULL) {
3013 ret = -EBUSY;
3014 break;
3015 }
3016 if (atomic_ctx)
3017 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
3018 else
3019 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
3020 break;
3021 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07003022 case DOMAIN_ATTR_SECURE_VMID:
3023 if (smmu_domain->secure_vmid != VMID_INVAL) {
3024 ret = -ENODEV;
3025 WARN(1, "secure vmid already set!");
3026 break;
3027 }
3028 smmu_domain->secure_vmid = *((int *)data);
3029 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003030 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3031 if (*((int *)data))
3032 smmu_domain->attributes |=
3033 1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
3034 ret = 0;
3035 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003036 /*
3037 * fast_smmu_unmap_page() and fast_smmu_alloc_iova() both
3038 * expect that the bus/clock/regulator are already on. Thus also
3039 * force DOMAIN_ATTR_ATOMIC to bet set.
3040 */
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003041 case DOMAIN_ATTR_FAST:
Patrick Daly0df84ac2017-10-11 17:32:41 -07003042 {
3043 int fast = *((int *)data);
3044
3045 if (fast) {
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003046 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003047 smmu_domain->attributes |= 1 << DOMAIN_ATTR_ATOMIC;
3048 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003049 ret = 0;
3050 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003051 }
Patrick Dalyce6786f2016-11-09 14:19:23 -08003052 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3053 /* can't be changed while attached */
3054 if (smmu_domain->smmu != NULL) {
3055 ret = -EBUSY;
3056 break;
3057 }
3058 if (*((int *)data))
3059 smmu_domain->attributes |=
3060 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
3061 ret = 0;
3062 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003063 case DOMAIN_ATTR_EARLY_MAP: {
3064 int early_map = *((int *)data);
3065
3066 ret = 0;
3067 if (early_map) {
3068 smmu_domain->attributes |=
3069 1 << DOMAIN_ATTR_EARLY_MAP;
3070 } else {
3071 if (smmu_domain->smmu)
3072 ret = arm_smmu_enable_s1_translations(
3073 smmu_domain);
3074
3075 if (!ret)
3076 smmu_domain->attributes &=
3077 ~(1 << DOMAIN_ATTR_EARLY_MAP);
3078 }
3079 break;
3080 }
Liam Mark53cf2342016-12-20 11:36:07 -08003081 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
3082 int force_coherent = *((int *)data);
3083
3084 if (smmu_domain->smmu != NULL) {
3085 dev_err(smmu_domain->smmu->dev,
3086 "cannot change force coherent attribute while attached\n");
3087 ret = -EBUSY;
3088 break;
3089 }
3090
3091 if (force_coherent)
3092 smmu_domain->attributes |=
3093 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
3094 else
3095 smmu_domain->attributes &=
3096 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
3097
3098 ret = 0;
3099 break;
3100 }
3101
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303102 case DOMAIN_ATTR_CB_STALL_DISABLE:
3103 if (*((int *)data))
3104 smmu_domain->attributes |=
3105 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
3106 ret = 0;
3107 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003108 default:
Will Deacon518f7132014-11-14 17:17:54 +00003109 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01003110 }
Will Deacon518f7132014-11-14 17:17:54 +00003111
3112out_unlock:
3113 mutex_unlock(&smmu_domain->init_mutex);
3114 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003115}
3116
Robin Murphy7e96c742016-09-14 15:26:46 +01003117static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
3118{
3119 u32 fwid = 0;
3120
3121 if (args->args_count > 0)
3122 fwid |= (u16)args->args[0];
3123
3124 if (args->args_count > 1)
3125 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
3126
3127 return iommu_fwspec_add_ids(dev, &fwid, 1);
3128}
3129
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003130static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
3131{
3132 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3133 struct arm_smmu_device *smmu = smmu_domain->smmu;
3134 void __iomem *cb_base;
3135 u32 reg;
3136 int ret;
3137
3138 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3139 ret = arm_smmu_power_on(smmu->pwr);
3140 if (ret)
3141 return ret;
3142
3143 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3144 reg |= SCTLR_M;
3145
3146 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
3147 arm_smmu_power_off(smmu->pwr);
3148 return ret;
3149}
3150
Liam Mark3ba41cf2016-12-09 14:39:04 -08003151static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
3152 dma_addr_t iova)
3153{
3154 bool ret;
3155 unsigned long flags;
3156 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3157 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3158
3159 if (!ops)
3160 return false;
3161
3162 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3163 ret = ops->is_iova_coherent(ops, iova);
3164 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3165 return ret;
3166}
3167
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003168static void arm_smmu_trigger_fault(struct iommu_domain *domain,
3169 unsigned long flags)
3170{
3171 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3172 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3173 struct arm_smmu_device *smmu;
3174 void __iomem *cb_base;
3175
3176 if (!smmu_domain->smmu) {
3177 pr_err("Can't trigger faults on non-attached domains\n");
3178 return;
3179 }
3180
3181 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003182 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003183 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003184
3185 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3186 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
3187 flags, cfg->cbndx);
3188 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07003189 /* give the interrupt time to fire... */
3190 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003191
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003192 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003193}
3194
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003195static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
3196 unsigned long offset)
3197{
3198 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3199 struct arm_smmu_device *smmu;
3200 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3201 void __iomem *cb_base;
3202 unsigned long val;
3203
3204 if (offset >= SZ_4K) {
3205 pr_err("Invalid offset: 0x%lx\n", offset);
3206 return 0;
3207 }
3208
3209 smmu = smmu_domain->smmu;
3210 if (!smmu) {
3211 WARN(1, "Can't read registers of a detached domain\n");
3212 val = 0;
3213 return val;
3214 }
3215
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003216 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003217 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003218
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003219 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3220 val = readl_relaxed(cb_base + offset);
3221
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003222 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003223 return val;
3224}
3225
3226static void arm_smmu_reg_write(struct iommu_domain *domain,
3227 unsigned long offset, unsigned long val)
3228{
3229 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3230 struct arm_smmu_device *smmu;
3231 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3232 void __iomem *cb_base;
3233
3234 if (offset >= SZ_4K) {
3235 pr_err("Invalid offset: 0x%lx\n", offset);
3236 return;
3237 }
3238
3239 smmu = smmu_domain->smmu;
3240 if (!smmu) {
3241 WARN(1, "Can't read registers of a detached domain\n");
3242 return;
3243 }
3244
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003245 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003246 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003247
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003248 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3249 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003250
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003251 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003252}
3253
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003254static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3255{
Patrick Dalyda765c62017-09-11 16:31:07 -07003256 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3257 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
3258
3259 tlb->tlb_flush_all(smmu_domain);
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003260}
3261
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003262static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3263{
3264 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3265
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003266 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003267}
3268
3269static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3270{
3271 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3272
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003273 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003274}
3275
Will Deacon518f7132014-11-14 17:17:54 +00003276static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003277 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003278 .domain_alloc = arm_smmu_domain_alloc,
3279 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003280 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003281 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003282 .map = arm_smmu_map,
3283 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003284 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003285 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003286 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003287 .add_device = arm_smmu_add_device,
3288 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003289 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003290 .domain_get_attr = arm_smmu_domain_get_attr,
3291 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003292 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003293 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003294 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003295 .reg_read = arm_smmu_reg_read,
3296 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003297 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003298 .enable_config_clocks = arm_smmu_enable_config_clocks,
3299 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003300 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003301 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003302};
3303
Patrick Dalyad441dd2016-09-15 15:50:46 -07003304#define IMPL_DEF1_MICRO_MMU_CTRL 0
3305#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3306#define MICRO_MMU_CTRL_IDLE (1 << 3)
3307
3308/* Definitions for implementation-defined registers */
3309#define ACTLR_QCOM_OSH_SHIFT 28
3310#define ACTLR_QCOM_OSH 1
3311
3312#define ACTLR_QCOM_ISH_SHIFT 29
3313#define ACTLR_QCOM_ISH 1
3314
3315#define ACTLR_QCOM_NSH_SHIFT 30
3316#define ACTLR_QCOM_NSH 1
3317
3318static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003319{
3320 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003321 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003322
3323 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3324 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3325 0, 30000)) {
3326 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3327 return -EBUSY;
3328 }
3329
3330 return 0;
3331}
3332
Patrick Dalyad441dd2016-09-15 15:50:46 -07003333static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003334{
3335 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3336 u32 reg;
3337
3338 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3339 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3340 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3341
Patrick Dalyad441dd2016-09-15 15:50:46 -07003342 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003343}
3344
Patrick Dalyad441dd2016-09-15 15:50:46 -07003345static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003346{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003347 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003348}
3349
Patrick Dalyad441dd2016-09-15 15:50:46 -07003350static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003351{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003352 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003353}
3354
Patrick Dalyad441dd2016-09-15 15:50:46 -07003355static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003356{
3357 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3358 u32 reg;
3359
3360 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3361 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3362 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3363}
3364
Patrick Dalyad441dd2016-09-15 15:50:46 -07003365static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003366{
3367 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003368 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003369 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003370 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003371
Patrick Dalyad441dd2016-09-15 15:50:46 -07003372 /*
3373 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3374 * to prevent table walks with an inconsistent state.
3375 */
3376 for (i = 0; i < smmu->num_context_banks; ++i) {
3377 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3378 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3379 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3380 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3381 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3382 }
3383
3384 /* Program implementation defined registers */
3385 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003386 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3387 writel_relaxed(regs[i].value,
3388 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003389 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003390}
3391
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003392static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3393 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003394{
3395 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3396 struct arm_smmu_device *smmu = smmu_domain->smmu;
3397 int ret;
3398 phys_addr_t phys = 0;
3399 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003400 u32 sctlr, sctlr_orig, fsr;
3401 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003402
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003403 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003404 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003405 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003406
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003407 spin_lock_irqsave(&smmu->atos_lock, flags);
3408 cb_base = ARM_SMMU_CB_BASE(smmu) +
3409 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003410
3411 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003412 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003413 qsmmuv2_wait_for_halt(smmu);
3414
3415 /* clear FSR to allow ATOS to log any faults */
3416 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3417 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3418
3419 /* disable stall mode momentarily */
3420 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3421 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3422 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3423
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003424 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003425
3426 /* restore SCTLR */
3427 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3428
3429 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003430 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3431
3432 arm_smmu_power_off(smmu_domain->smmu->pwr);
3433 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003434}
3435
3436struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3437 .device_reset = qsmmuv2_device_reset,
3438 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003439};
3440
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003441static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003442{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003443 int i;
3444 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003445 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003446 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003447
Peng Fan3ca37122016-05-03 21:50:30 +08003448 /*
3449 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3450 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3451 * bit is only present in MMU-500r2 onwards.
3452 */
3453 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3454 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3455 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3456 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3457 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3458 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3459 }
3460
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003461 /* Make sure all context banks are disabled and clear CB_FSR */
3462 for (i = 0; i < smmu->num_context_banks; ++i) {
3463 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3464 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3465 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003466 /*
3467 * Disable MMU-500's not-particularly-beneficial next-page
3468 * prefetcher for the sake of errata #841119 and #826419.
3469 */
3470 if (smmu->model == ARM_MMU500) {
3471 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3472 reg &= ~ARM_MMU500_ACTLR_CPRE;
3473 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3474 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003475 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003476}
3477
3478static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3479{
3480 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003481 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003482 u32 reg;
3483
3484 /* clear global FSR */
3485 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3486 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3487
Robin Murphy468f4942016-09-12 17:13:49 +01003488 /*
3489 * Reset stream mapping groups: Initial values mark all SMRn as
3490 * invalid and all S2CRn as bypass unless overridden.
3491 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003492 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3493 for (i = 0; i < smmu->num_mapping_groups; ++i)
3494 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003495
Patrick Daly59b6d202017-06-12 13:12:15 -07003496 arm_smmu_context_bank_reset(smmu);
3497 }
Will Deacon1463fe42013-07-31 19:21:27 +01003498
Will Deacon45ae7cf2013-06-24 18:31:25 +01003499 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003500 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3501 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3502
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003503 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003504
Will Deacon45ae7cf2013-06-24 18:31:25 +01003505 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003506 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003507
3508 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003509 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003510
Robin Murphy25a1c962016-02-10 14:25:33 +00003511 /* Enable client access, handling unmatched streams as appropriate */
3512 reg &= ~sCR0_CLIENTPD;
3513 if (disable_bypass)
3514 reg |= sCR0_USFCFG;
3515 else
3516 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003517
3518 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003519 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003520
3521 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003522 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003523
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003524 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3525 reg |= sCR0_VMID16EN;
3526
Patrick Daly7f377fe2017-10-06 17:37:10 -07003527 /* Force bypass transaction to be Non-Shareable & not io-coherent */
3528 reg &= ~(sCR0_SHCFG_MASK << sCR0_SHCFG_SHIFT);
3529 reg |= sCR0_SHCFG_NSH;
3530
Will Deacon45ae7cf2013-06-24 18:31:25 +01003531 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003532 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003533 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003534
3535 /* Manage any implementation defined features */
3536 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003537}
3538
3539static int arm_smmu_id_size_to_bits(int size)
3540{
3541 switch (size) {
3542 case 0:
3543 return 32;
3544 case 1:
3545 return 36;
3546 case 2:
3547 return 40;
3548 case 3:
3549 return 42;
3550 case 4:
3551 return 44;
3552 case 5:
3553 default:
3554 return 48;
3555 }
3556}
3557
Patrick Dalyda688822017-05-17 20:12:48 -07003558
3559/*
3560 * Some context banks needs to be transferred from bootloader to HLOS in a way
3561 * that allows ongoing traffic. The current expectation is that these context
3562 * banks operate in bypass mode.
3563 * Additionally, there must be exactly one device in devicetree with stream-ids
3564 * overlapping those used by the bootloader.
3565 */
3566static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3567 struct arm_smmu_device *smmu,
3568 struct device *dev)
3569{
3570 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003571 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003572 u32 i, idx;
3573 int cb = -EINVAL;
3574 bool dynamic;
3575
Patrick Dalye72526b2017-07-18 16:21:44 -07003576 /*
3577 * Dynamic domains have already set cbndx through domain attribute.
3578 * Verify that they picked a valid value.
3579 */
Patrick Dalyda688822017-05-17 20:12:48 -07003580 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07003581 if (dynamic) {
3582 cb = smmu_domain->cfg.cbndx;
3583 if (cb < smmu->num_context_banks)
3584 return cb;
3585 else
3586 return -EINVAL;
3587 }
Patrick Dalyda688822017-05-17 20:12:48 -07003588
3589 mutex_lock(&smmu->stream_map_mutex);
3590 for_each_cfg_sme(fwspec, i, idx) {
3591 if (smmu->s2crs[idx].cb_handoff)
3592 cb = smmu->s2crs[idx].cbndx;
3593 }
3594
3595 if (cb < 0) {
3596 mutex_unlock(&smmu->stream_map_mutex);
3597 return __arm_smmu_alloc_bitmap(smmu->context_map,
3598 smmu->num_s2_context_banks,
3599 smmu->num_context_banks);
3600 }
3601
3602 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07003603 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Patrick Dalyda688822017-05-17 20:12:48 -07003604 smmu->s2crs[i].cb_handoff = false;
3605 smmu->s2crs[i].count -= 1;
3606 }
3607 }
3608 mutex_unlock(&smmu->stream_map_mutex);
3609
3610 return cb;
3611}
3612
3613static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
3614{
3615 u32 i, raw_smr, raw_s2cr;
3616 struct arm_smmu_smr smr;
3617 struct arm_smmu_s2cr s2cr;
3618
3619 for (i = 0; i < smmu->num_mapping_groups; i++) {
3620 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3621 ARM_SMMU_GR0_SMR(i));
3622 if (!(raw_smr & SMR_VALID))
3623 continue;
3624
3625 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3626 smr.id = (u16)raw_smr;
3627 smr.valid = true;
3628
3629 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3630 ARM_SMMU_GR0_S2CR(i));
Patrick Daly4ea4bdf2017-08-29 19:24:49 -07003631 memset(&s2cr, 0, sizeof(s2cr));
Patrick Dalyda688822017-05-17 20:12:48 -07003632 s2cr.group = NULL;
3633 s2cr.count = 1;
3634 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
3635 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
3636 S2CR_PRIVCFG_MASK;
3637 s2cr.cbndx = (u8)raw_s2cr;
3638 s2cr.cb_handoff = true;
3639
3640 if (s2cr.type != S2CR_TYPE_TRANS)
3641 continue;
3642
3643 smmu->smrs[i] = smr;
3644 smmu->s2crs[i] = s2cr;
3645 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
3646 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
3647 raw_smr, raw_s2cr, s2cr.cbndx);
3648 }
3649
3650 return 0;
3651}
3652
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003653static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3654{
3655 struct device *dev = smmu->dev;
3656 int i, ntuples, ret;
3657 u32 *tuples;
3658 struct arm_smmu_impl_def_reg *regs, *regit;
3659
3660 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3661 return 0;
3662
3663 ntuples /= sizeof(u32);
3664 if (ntuples % 2) {
3665 dev_err(dev,
3666 "Invalid number of attach-impl-defs registers: %d\n",
3667 ntuples);
3668 return -EINVAL;
3669 }
3670
3671 regs = devm_kmalloc(
3672 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3673 GFP_KERNEL);
3674 if (!regs)
3675 return -ENOMEM;
3676
3677 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3678 if (!tuples)
3679 return -ENOMEM;
3680
3681 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3682 tuples, ntuples);
3683 if (ret)
3684 return ret;
3685
3686 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3687 regit->offset = tuples[i];
3688 regit->value = tuples[i + 1];
3689 }
3690
3691 devm_kfree(dev, tuples);
3692
3693 smmu->impl_def_attach_registers = regs;
3694 smmu->num_impl_def_attach_registers = ntuples / 2;
3695
3696 return 0;
3697}
3698
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003699
3700static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003701{
3702 const char *cname;
3703 struct property *prop;
3704 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003705 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003706
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003707 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003708 of_property_count_strings(dev->of_node, "clock-names");
3709
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003710 if (pwr->num_clocks < 1) {
3711 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003712 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003713 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003714
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003715 pwr->clocks = devm_kzalloc(
3716 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003717 GFP_KERNEL);
3718
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003719 if (!pwr->clocks)
3720 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003721
3722 i = 0;
3723 of_property_for_each_string(dev->of_node, "clock-names",
3724 prop, cname) {
3725 struct clk *c = devm_clk_get(dev, cname);
3726
3727 if (IS_ERR(c)) {
3728 dev_err(dev, "Couldn't get clock: %s",
3729 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003730 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003731 }
3732
3733 if (clk_get_rate(c) == 0) {
3734 long rate = clk_round_rate(c, 1000);
3735
3736 clk_set_rate(c, rate);
3737 }
3738
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003739 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003740
3741 ++i;
3742 }
3743 return 0;
3744}
3745
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003746static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003747{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003748 const char *cname;
3749 struct property *prop;
3750 int i, ret = 0;
3751 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003752
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003753 pwr->num_gdscs =
3754 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3755
3756 if (pwr->num_gdscs < 1) {
3757 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003758 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003759 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003760
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003761 pwr->gdscs = devm_kzalloc(
3762 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3763
3764 if (!pwr->gdscs)
3765 return -ENOMEM;
3766
Prakash Guptafad87ca2017-05-16 12:13:02 +05303767 if (!of_property_read_u32(dev->of_node,
3768 "qcom,deferred-regulator-disable-delay",
3769 &(pwr->regulator_defer)))
3770 dev_info(dev, "regulator defer delay %d\n",
3771 pwr->regulator_defer);
3772
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003773 i = 0;
3774 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3775 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07003776 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003777
3778 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3779 return ret;
3780}
3781
3782static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3783{
3784 struct device *dev = pwr->dev;
3785
3786 /* We don't want the bus APIs to print an error message */
3787 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3788 dev_dbg(dev, "No bus scaling info\n");
3789 return 0;
3790 }
3791
3792 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3793 if (!pwr->bus_dt_data) {
3794 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3795 return -EINVAL;
3796 }
3797
3798 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3799 if (!pwr->bus_client) {
3800 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003801 return -EINVAL;
3802 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003803
3804 return 0;
3805}
3806
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003807/*
3808 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3809 */
3810static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3811 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003812{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003813 struct arm_smmu_power_resources *pwr;
3814 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003815
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003816 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3817 if (!pwr)
3818 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003819
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003820 pwr->dev = &pdev->dev;
3821 pwr->pdev = pdev;
3822 mutex_init(&pwr->power_lock);
3823 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003824
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003825 ret = arm_smmu_init_clocks(pwr);
3826 if (ret)
3827 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003828
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003829 ret = arm_smmu_init_regulators(pwr);
3830 if (ret)
3831 return ERR_PTR(ret);
3832
3833 ret = arm_smmu_init_bus_scaling(pwr);
3834 if (ret)
3835 return ERR_PTR(ret);
3836
3837 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003838}
3839
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003840/*
Patrick Dalyabeee952017-04-13 18:14:59 -07003841 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003842 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003843static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003844{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003845 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003846}
3847
Will Deacon45ae7cf2013-06-24 18:31:25 +01003848static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3849{
3850 unsigned long size;
3851 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3852 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003853 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003854 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003855
Mitchel Humpherysba822582015-10-20 11:37:41 -07003856 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3857 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003858 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003859
3860 /* ID0 */
3861 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003862
3863 /* Restrict available stages based on module parameter */
3864 if (force_stage == 1)
3865 id &= ~(ID0_S2TS | ID0_NTS);
3866 else if (force_stage == 2)
3867 id &= ~(ID0_S1TS | ID0_NTS);
3868
Will Deacon45ae7cf2013-06-24 18:31:25 +01003869 if (id & ID0_S1TS) {
3870 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003871 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003872 }
3873
3874 if (id & ID0_S2TS) {
3875 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003876 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003877 }
3878
3879 if (id & ID0_NTS) {
3880 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003881 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003882 }
3883
3884 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003885 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003886 dev_err(smmu->dev, "\tno translation support!\n");
3887 return -ENODEV;
3888 }
3889
Robin Murphyb7862e32016-04-13 18:13:03 +01003890 if ((id & ID0_S1TS) &&
3891 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003892 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003893 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003894 }
3895
Robin Murphybae2c2d2015-07-29 19:46:05 +01003896 /*
3897 * In order for DMA API calls to work properly, we must defer to what
3898 * the DT says about coherency, regardless of what the hardware claims.
3899 * Fortunately, this also opens up a workaround for systems where the
3900 * ID register value has ended up configured incorrectly.
3901 */
3902 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3903 cttw_reg = !!(id & ID0_CTTW);
3904 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003905 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003906 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003907 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003908 cttw_dt ? "" : "non-");
3909 if (cttw_dt != cttw_reg)
3910 dev_notice(smmu->dev,
3911 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003912
Robin Murphy53867802016-09-12 17:13:48 +01003913 /* Max. number of entries we have for stream matching/indexing */
3914 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3915 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003916 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003917 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08003918 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003919
3920 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003921 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3922 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003923 dev_err(smmu->dev,
3924 "stream-matching supported, but no SMRs present!\n");
3925 return -ENODEV;
3926 }
3927
Robin Murphy53867802016-09-12 17:13:48 +01003928 /*
3929 * SMR.ID bits may not be preserved if the corresponding MASK
3930 * bits are set, so check each one separately. We can reject
3931 * masters later if they try to claim IDs outside these masks.
3932 */
Patrick Daly937de532016-12-12 18:44:09 -08003933 for (i = 0; i < size; i++) {
3934 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
3935 if (!(smr & SMR_VALID))
3936 break;
3937 }
3938 if (i == size) {
3939 dev_err(smmu->dev,
3940 "Unable to compute streamid_masks\n");
3941 return -ENODEV;
3942 }
3943
Robin Murphy53867802016-09-12 17:13:48 +01003944 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003945 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3946 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003947 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003948
Robin Murphy53867802016-09-12 17:13:48 +01003949 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003950 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3951 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003952 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003953
Robin Murphy468f4942016-09-12 17:13:49 +01003954 /* Zero-initialised to mark as invalid */
3955 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3956 GFP_KERNEL);
3957 if (!smmu->smrs)
3958 return -ENOMEM;
3959
Robin Murphy53867802016-09-12 17:13:48 +01003960 dev_notice(smmu->dev,
3961 "\tstream matching with %lu register groups, mask 0x%x",
3962 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003963 }
Robin Murphya754fd12016-09-12 17:13:50 +01003964 /* s2cr->type == 0 means translation, so initialise explicitly */
3965 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3966 GFP_KERNEL);
3967 if (!smmu->s2crs)
3968 return -ENOMEM;
3969 for (i = 0; i < size; i++)
3970 smmu->s2crs[i] = s2cr_init_val;
3971
Robin Murphy53867802016-09-12 17:13:48 +01003972 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003973 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003974
Robin Murphy7602b872016-04-28 17:12:09 +01003975 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3976 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3977 if (!(id & ID0_PTFS_NO_AARCH32S))
3978 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3979 }
3980
Will Deacon45ae7cf2013-06-24 18:31:25 +01003981 /* ID1 */
3982 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003983 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003984
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003985 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003986 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003987 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003988 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003989 dev_warn(smmu->dev,
3990 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3991 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003992
Will Deacon518f7132014-11-14 17:17:54 +00003993 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003994 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3995 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3996 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3997 return -ENODEV;
3998 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003999 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01004000 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01004001 /*
4002 * Cavium CN88xx erratum #27704.
4003 * Ensure ASID and VMID allocation is unique across all SMMUs in
4004 * the system.
4005 */
4006 if (smmu->model == CAVIUM_SMMUV2) {
4007 smmu->cavium_id_base =
4008 atomic_add_return(smmu->num_context_banks,
4009 &cavium_smmu_context_count);
4010 smmu->cavium_id_base -= smmu->num_context_banks;
4011 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004012
4013 /* ID2 */
4014 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
4015 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004016 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004017
Will Deacon518f7132014-11-14 17:17:54 +00004018 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01004019 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004020 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004021
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08004022 if (id & ID2_VMID16)
4023 smmu->features |= ARM_SMMU_FEAT_VMID16;
4024
Robin Murphyf1d84542015-03-04 16:41:05 +00004025 /*
4026 * What the page table walker can address actually depends on which
4027 * descriptor format is in use, but since a) we don't know that yet,
4028 * and b) it can vary per context bank, this will have to do...
4029 */
4030 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
4031 dev_warn(smmu->dev,
4032 "failed to set DMA mask for table walker\n");
4033
Robin Murphyb7862e32016-04-13 18:13:03 +01004034 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00004035 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01004036 if (smmu->version == ARM_SMMU_V1_64K)
4037 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004038 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004039 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00004040 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00004041 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01004042 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00004043 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01004044 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00004045 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01004046 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004047 }
4048
Robin Murphy7602b872016-04-28 17:12:09 +01004049 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01004050 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01004051 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01004052 if (smmu->features &
4053 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01004054 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01004055 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01004056 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01004057 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01004058 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01004059
Robin Murphyd5466352016-05-09 17:20:09 +01004060 if (arm_smmu_ops.pgsize_bitmap == -1UL)
4061 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
4062 else
4063 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004064 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01004065 smmu->pgsize_bitmap);
4066
Will Deacon518f7132014-11-14 17:17:54 +00004067
Will Deacon28d60072014-09-01 16:24:48 +01004068 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004069 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
4070 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004071
4072 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004073 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
4074 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004075
Will Deacon45ae7cf2013-06-24 18:31:25 +01004076 return 0;
4077}
4078
Robin Murphy67b65a32016-04-13 18:12:57 +01004079struct arm_smmu_match_data {
4080 enum arm_smmu_arch_version version;
4081 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004082 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01004083};
4084
Patrick Dalyd7476202016-09-08 18:23:28 -07004085#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
4086static struct arm_smmu_match_data name = { \
4087.version = ver, \
4088.model = imp, \
4089.arch_ops = ops, \
4090} \
Robin Murphy67b65a32016-04-13 18:12:57 +01004091
Patrick Daly1f8a2882016-09-12 17:32:05 -07004092struct arm_smmu_arch_ops qsmmuv500_arch_ops;
4093
Patrick Dalyd7476202016-09-08 18:23:28 -07004094ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
4095ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
4096ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
4097ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
4098ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07004099ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004100ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
4101 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01004102
Joerg Roedel09b52692014-10-02 12:24:45 +02004103static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01004104 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
4105 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
4106 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01004107 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01004108 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01004109 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07004110 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07004111 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01004112 { },
4113};
4114MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
4115
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004116
4117static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
4118{
4119 if (!dev->iommu_fwspec)
4120 of_iommu_configure(dev, dev->of_node);
4121 return 0;
4122}
4123
Patrick Daly000a2f22017-02-13 22:18:12 -08004124static int arm_smmu_add_device_fixup(struct device *dev, void *data)
4125{
4126 struct iommu_ops *ops = data;
4127
4128 ops->add_device(dev);
4129 return 0;
4130}
4131
Patrick Daly1f8a2882016-09-12 17:32:05 -07004132static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004133static int arm_smmu_device_dt_probe(struct platform_device *pdev)
4134{
Robin Murphy67b65a32016-04-13 18:12:57 +01004135 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004136 struct resource *res;
4137 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004138 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01004139 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01004140 bool legacy_binding;
4141
4142 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
4143 if (legacy_binding && !using_generic_binding) {
4144 if (!using_legacy_binding)
4145 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
4146 using_legacy_binding = true;
4147 } else if (!legacy_binding && !using_legacy_binding) {
4148 using_generic_binding = true;
4149 } else {
4150 dev_err(dev, "not probing due to mismatched DT properties\n");
4151 return -ENODEV;
4152 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004153
4154 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4155 if (!smmu) {
4156 dev_err(dev, "failed to allocate arm_smmu_device\n");
4157 return -ENOMEM;
4158 }
4159 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08004160 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07004161 idr_init(&smmu->asid_idr);
4162 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004163
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004164 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01004165 smmu->version = data->version;
4166 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004167 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01004168
Will Deacon45ae7cf2013-06-24 18:31:25 +01004169 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01004170 smmu->base = devm_ioremap_resource(dev, res);
4171 if (IS_ERR(smmu->base))
4172 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004173 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004174
4175 if (of_property_read_u32(dev->of_node, "#global-interrupts",
4176 &smmu->num_global_irqs)) {
4177 dev_err(dev, "missing #global-interrupts property\n");
4178 return -ENODEV;
4179 }
4180
4181 num_irqs = 0;
4182 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
4183 num_irqs++;
4184 if (num_irqs > smmu->num_global_irqs)
4185 smmu->num_context_irqs++;
4186 }
4187
Andreas Herrmann44a08de2013-10-01 13:39:07 +01004188 if (!smmu->num_context_irqs) {
4189 dev_err(dev, "found %d interrupts but expected at least %d\n",
4190 num_irqs, smmu->num_global_irqs + 1);
4191 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004192 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004193
4194 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
4195 GFP_KERNEL);
4196 if (!smmu->irqs) {
4197 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
4198 return -ENOMEM;
4199 }
4200
4201 for (i = 0; i < num_irqs; ++i) {
4202 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07004203
Will Deacon45ae7cf2013-06-24 18:31:25 +01004204 if (irq < 0) {
4205 dev_err(dev, "failed to get irq index %d\n", i);
4206 return -ENODEV;
4207 }
4208 smmu->irqs[i] = irq;
4209 }
4210
Dhaval Patel031d7462015-05-09 14:47:29 -07004211 parse_driver_options(smmu);
4212
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004213 smmu->pwr = arm_smmu_init_power_resources(pdev);
4214 if (IS_ERR(smmu->pwr))
4215 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004216
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004217 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004218 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004219 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004220
4221 err = arm_smmu_device_cfg_probe(smmu);
4222 if (err)
4223 goto out_power_off;
4224
Patrick Dalyda688822017-05-17 20:12:48 -07004225 err = arm_smmu_handoff_cbs(smmu);
4226 if (err)
4227 goto out_power_off;
4228
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004229 err = arm_smmu_parse_impl_def_registers(smmu);
4230 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004231 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004232
Robin Murphyb7862e32016-04-13 18:13:03 +01004233 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004234 smmu->num_context_banks != smmu->num_context_irqs) {
4235 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004236 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4237 smmu->num_context_irqs, smmu->num_context_banks,
4238 smmu->num_context_banks);
4239 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004240 }
4241
Will Deacon45ae7cf2013-06-24 18:31:25 +01004242 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004243 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4244 NULL, arm_smmu_global_fault,
4245 IRQF_ONESHOT | IRQF_SHARED,
4246 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004247 if (err) {
4248 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4249 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004250 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004251 }
4252 }
4253
Patrick Dalyd7476202016-09-08 18:23:28 -07004254 err = arm_smmu_arch_init(smmu);
4255 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004256 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004257
Robin Murphy06e393e2016-09-12 17:13:55 +01004258 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004259 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004260 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004261 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004262
Patrick Daly8e3371a2017-02-13 22:14:53 -08004263 INIT_LIST_HEAD(&smmu->list);
4264 spin_lock(&arm_smmu_devices_lock);
4265 list_add(&smmu->list, &arm_smmu_devices);
4266 spin_unlock(&arm_smmu_devices_lock);
4267
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004268 /* bus_set_iommu depends on this. */
4269 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4270 arm_smmu_of_iommu_configure_fixup);
4271
Robin Murphy7e96c742016-09-14 15:26:46 +01004272 /* Oh, for a proper bus abstraction */
4273 if (!iommu_present(&platform_bus_type))
4274 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004275 else
4276 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4277 arm_smmu_add_device_fixup);
Robin Murphy7e96c742016-09-14 15:26:46 +01004278#ifdef CONFIG_ARM_AMBA
4279 if (!iommu_present(&amba_bustype))
4280 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4281#endif
4282#ifdef CONFIG_PCI
4283 if (!iommu_present(&pci_bus_type)) {
4284 pci_request_acs();
4285 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4286 }
4287#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004288 return 0;
4289
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004290out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004291 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004292
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004293out_exit_power_resources:
4294 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004295
Will Deacon45ae7cf2013-06-24 18:31:25 +01004296 return err;
4297}
4298
4299static int arm_smmu_device_remove(struct platform_device *pdev)
4300{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004301 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004302
4303 if (!smmu)
4304 return -ENODEV;
4305
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004306 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004307 return -EINVAL;
4308
Will Deaconecfadb62013-07-31 19:21:28 +01004309 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004310 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004311
Patrick Dalyc190d932016-08-30 17:23:28 -07004312 idr_destroy(&smmu->asid_idr);
4313
Will Deacon45ae7cf2013-06-24 18:31:25 +01004314 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004315 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004316 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004317
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004318 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004319
Will Deacon45ae7cf2013-06-24 18:31:25 +01004320 return 0;
4321}
4322
Will Deacon45ae7cf2013-06-24 18:31:25 +01004323static struct platform_driver arm_smmu_driver = {
4324 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004325 .name = "arm-smmu",
4326 .of_match_table = of_match_ptr(arm_smmu_of_match),
4327 },
4328 .probe = arm_smmu_device_dt_probe,
4329 .remove = arm_smmu_device_remove,
4330};
4331
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004332static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004333static int __init arm_smmu_init(void)
4334{
Robin Murphy7e96c742016-09-14 15:26:46 +01004335 static bool registered;
4336 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004337
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004338 if (registered)
4339 return 0;
4340
4341 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4342 if (ret)
4343 return ret;
4344
4345 ret = platform_driver_register(&arm_smmu_driver);
4346 registered = !ret;
Robin Murphy7e96c742016-09-14 15:26:46 +01004347 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004348}
4349
4350static void __exit arm_smmu_exit(void)
4351{
4352 return platform_driver_unregister(&arm_smmu_driver);
4353}
4354
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004355subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004356module_exit(arm_smmu_exit);
4357
Robin Murphy7e96c742016-09-14 15:26:46 +01004358static int __init arm_smmu_of_init(struct device_node *np)
4359{
4360 int ret = arm_smmu_init();
4361
4362 if (ret)
4363 return ret;
4364
4365 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4366 return -ENODEV;
4367
4368 return 0;
4369}
4370IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4371IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4372IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4373IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4374IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4375IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004376
Patrick Dalya0fddb62017-03-27 19:26:59 -07004377#define TCU_HW_VERSION_HLOS1 (0x18)
4378
Patrick Daly1f8a2882016-09-12 17:32:05 -07004379#define DEBUG_SID_HALT_REG 0x0
4380#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004381#define DEBUG_SID_HALT_SID_MASK 0x3ff
4382
4383#define DEBUG_VA_ADDR_REG 0x8
4384
4385#define DEBUG_TXN_TRIGG_REG 0x18
4386#define DEBUG_TXN_AXPROT_SHIFT 6
4387#define DEBUG_TXN_AXCACHE_SHIFT 2
4388#define DEBUG_TRX_WRITE (0x1 << 1)
4389#define DEBUG_TXN_READ (0x0 << 1)
4390#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004391
4392#define DEBUG_SR_HALT_ACK_REG 0x20
4393#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004394#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4395
4396#define DEBUG_PAR_REG 0x28
4397#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4398#define DEBUG_PAR_PA_SHIFT 12
4399#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004400
4401#define TBU_DBG_TIMEOUT_US 30000
4402
Patrick Daly23301482017-10-12 16:18:25 -07004403#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK 0x3
4404#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT 0x8
4405
Patrick Daly03330cc2017-08-11 14:56:38 -07004406
4407struct actlr_setting {
4408 struct arm_smmu_smr smr;
4409 u32 actlr;
4410};
4411
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004412struct qsmmuv500_archdata {
4413 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004414 void __iomem *tcu_base;
4415 u32 version;
Patrick Dalyda765c62017-09-11 16:31:07 -07004416
4417 struct actlr_setting *actlrs;
4418 u32 actlr_tbl_size;
4419
4420 struct arm_smmu_smr *errata1_clients;
4421 u32 num_errata1_clients;
4422 remote_spinlock_t errata1_lock;
4423 ktime_t last_tlbi_ktime;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004424};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004425#define get_qsmmuv500_archdata(smmu) \
4426 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004427
Patrick Daly1f8a2882016-09-12 17:32:05 -07004428struct qsmmuv500_tbu_device {
4429 struct list_head list;
4430 struct device *dev;
4431 struct arm_smmu_device *smmu;
4432 void __iomem *base;
4433 void __iomem *status_reg;
4434
4435 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004436 u32 sid_start;
4437 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004438
4439 /* Protects halt count */
4440 spinlock_t halt_lock;
4441 u32 halt_count;
4442};
4443
Patrick Daly03330cc2017-08-11 14:56:38 -07004444struct qsmmuv500_group_iommudata {
4445 bool has_actlr;
4446 u32 actlr;
4447};
4448#define to_qsmmuv500_group_iommudata(group) \
4449 ((struct qsmmuv500_group_iommudata *) \
4450 (iommu_group_get_iommudata(group)))
4451
4452
4453static bool arm_smmu_fwspec_match_smr(struct iommu_fwspec *fwspec,
Patrick Dalyda765c62017-09-11 16:31:07 -07004454 struct arm_smmu_smr *smr)
4455{
4456 struct arm_smmu_smr *smr2;
Patrick Daly03330cc2017-08-11 14:56:38 -07004457 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Patrick Dalyda765c62017-09-11 16:31:07 -07004458 int i, idx;
4459
Patrick Daly03330cc2017-08-11 14:56:38 -07004460 for_each_cfg_sme(fwspec, i, idx) {
4461 smr2 = &smmu->smrs[idx];
Patrick Dalyda765c62017-09-11 16:31:07 -07004462 /* Continue if table entry does not match */
4463 if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
4464 continue;
4465 return true;
4466 }
4467 return false;
4468}
4469
4470#define ERRATA1_REMOTE_SPINLOCK "S:6"
4471#define ERRATA1_TLBI_INTERVAL_US 10
4472static bool
4473qsmmuv500_errata1_required(struct arm_smmu_domain *smmu_domain,
4474 struct qsmmuv500_archdata *data)
4475{
4476 bool ret = false;
4477 int j;
4478 struct arm_smmu_smr *smr;
Patrick Daly03330cc2017-08-11 14:56:38 -07004479 struct iommu_fwspec *fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004480
4481 if (smmu_domain->qsmmuv500_errata1_init)
4482 return smmu_domain->qsmmuv500_errata1_client;
4483
Patrick Daly03330cc2017-08-11 14:56:38 -07004484 fwspec = smmu_domain->dev->iommu_fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004485 for (j = 0; j < data->num_errata1_clients; j++) {
4486 smr = &data->errata1_clients[j];
Patrick Daly03330cc2017-08-11 14:56:38 -07004487 if (arm_smmu_fwspec_match_smr(fwspec, smr)) {
Patrick Dalyda765c62017-09-11 16:31:07 -07004488 ret = true;
4489 break;
4490 }
4491 }
4492
4493 smmu_domain->qsmmuv500_errata1_init = true;
4494 smmu_domain->qsmmuv500_errata1_client = ret;
4495 return ret;
4496}
4497
4498static void __qsmmuv500_errata1_tlbiall(struct arm_smmu_domain *smmu_domain)
4499{
4500 struct arm_smmu_device *smmu = smmu_domain->smmu;
4501 struct device *dev = smmu_domain->dev;
4502 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4503 void __iomem *base;
4504 ktime_t cur;
4505 u32 val;
4506
4507 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4508 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
4509 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
4510 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
4511 !(val & TLBSTATUS_SACTIVE), 0, 100)) {
4512 cur = ktime_get();
4513 trace_errata_throttle_start(dev, 0);
4514
4515 msm_bus_noc_throttle_wa(true);
4516 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
4517 !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
4518 dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout");
4519 trace_errata_failed(dev, 0);
4520 }
4521
4522 msm_bus_noc_throttle_wa(false);
4523
4524 trace_errata_throttle_end(
4525 dev, ktime_us_delta(ktime_get(), cur));
4526 }
4527}
4528
4529/* Must be called with clocks/regulators enabled */
4530static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
4531{
4532 struct arm_smmu_domain *smmu_domain = cookie;
4533 struct device *dev = smmu_domain->dev;
4534 struct qsmmuv500_archdata *data =
4535 get_qsmmuv500_archdata(smmu_domain->smmu);
4536 ktime_t cur;
4537 bool errata;
4538
4539 cur = ktime_get();
4540 trace_errata_tlbi_start(dev, 0);
4541
4542 errata = qsmmuv500_errata1_required(smmu_domain, data);
4543 remote_spin_lock(&data->errata1_lock);
4544 if (errata) {
4545 s64 delta;
4546
4547 delta = ktime_us_delta(ktime_get(), data->last_tlbi_ktime);
4548 if (delta < ERRATA1_TLBI_INTERVAL_US)
4549 udelay(ERRATA1_TLBI_INTERVAL_US - delta);
4550
4551 __qsmmuv500_errata1_tlbiall(smmu_domain);
4552
4553 data->last_tlbi_ktime = ktime_get();
4554 } else {
4555 __qsmmuv500_errata1_tlbiall(smmu_domain);
4556 }
4557 remote_spin_unlock(&data->errata1_lock);
4558
4559 trace_errata_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
4560}
4561
4562static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
4563 .tlb_flush_all = qsmmuv500_errata1_tlb_inv_context,
4564 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
4565 .free_pages_exact = arm_smmu_free_pages_exact,
4566};
4567
Patrick Daly1f8a2882016-09-12 17:32:05 -07004568static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
4569{
4570 unsigned long flags;
4571 u32 val;
4572 void __iomem *base;
4573
4574 spin_lock_irqsave(&tbu->halt_lock, flags);
4575 if (tbu->halt_count) {
4576 tbu->halt_count++;
4577 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4578 return 0;
4579 }
4580
4581 base = tbu->base;
4582 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4583 val |= DEBUG_SID_HALT_VAL;
4584 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4585
4586 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
4587 val, (val & DEBUG_SR_HALT_ACK_VAL),
4588 0, TBU_DBG_TIMEOUT_US)) {
4589 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4590 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4591 return -ETIMEDOUT;
4592 }
4593
4594 tbu->halt_count = 1;
4595 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4596 return 0;
4597}
4598
4599static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4600{
4601 unsigned long flags;
4602 u32 val;
4603 void __iomem *base;
4604
4605 spin_lock_irqsave(&tbu->halt_lock, flags);
4606 if (!tbu->halt_count) {
4607 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4608 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4609 return;
4610
4611 } else if (tbu->halt_count > 1) {
4612 tbu->halt_count--;
4613 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4614 return;
4615 }
4616
4617 base = tbu->base;
4618 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4619 val &= ~DEBUG_SID_HALT_VAL;
4620 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4621
4622 tbu->halt_count = 0;
4623 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4624}
4625
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004626static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4627 struct arm_smmu_device *smmu, u32 sid)
4628{
4629 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004630 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004631
4632 list_for_each_entry(tbu, &data->tbus, list) {
4633 if (tbu->sid_start <= sid &&
4634 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004635 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004636 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004637 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004638}
4639
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004640static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
4641 struct qsmmuv500_tbu_device *tbu,
4642 unsigned long *flags)
4643{
4644 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004645 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004646 u32 val;
4647
4648 spin_lock_irqsave(&smmu->atos_lock, *flags);
4649 /* The status register is not accessible on version 1.0 */
4650 if (data->version == 0x01000000)
4651 return 0;
4652
4653 if (readl_poll_timeout_atomic(tbu->status_reg,
4654 val, (val == 0x1), 0,
4655 TBU_DBG_TIMEOUT_US)) {
4656 dev_err(tbu->dev, "ECATS hw busy!\n");
4657 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4658 return -ETIMEDOUT;
4659 }
4660
4661 return 0;
4662}
4663
4664static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
4665 struct qsmmuv500_tbu_device *tbu,
4666 unsigned long *flags)
4667{
4668 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004669 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004670
4671 /* The status register is not accessible on version 1.0 */
4672 if (data->version != 0x01000000)
4673 writel_relaxed(0, tbu->status_reg);
4674 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4675}
4676
4677/*
4678 * Zero means failure.
4679 */
4680static phys_addr_t qsmmuv500_iova_to_phys(
4681 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
4682{
4683 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4684 struct arm_smmu_device *smmu = smmu_domain->smmu;
4685 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4686 struct qsmmuv500_tbu_device *tbu;
4687 int ret;
4688 phys_addr_t phys = 0;
4689 u64 val, fsr;
4690 unsigned long flags;
4691 void __iomem *cb_base;
4692 u32 sctlr_orig, sctlr;
4693 int needs_redo = 0;
4694
4695 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4696 tbu = qsmmuv500_find_tbu(smmu, sid);
4697 if (!tbu)
4698 return 0;
4699
4700 ret = arm_smmu_power_on(tbu->pwr);
4701 if (ret)
4702 return 0;
4703
4704 /*
4705 * Disable client transactions & wait for existing operations to
4706 * complete.
4707 */
4708 ret = qsmmuv500_tbu_halt(tbu);
4709 if (ret)
4710 goto out_power_off;
4711
4712 /* Only one concurrent atos operation */
4713 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
4714 if (ret)
4715 goto out_resume;
4716
4717 /*
4718 * We can be called from an interrupt handler with FSR already set
4719 * so terminate the faulting transaction prior to starting ecats.
4720 * No new racing faults can occur since we in the halted state.
4721 * ECATS can trigger the fault interrupt, so disable it temporarily
4722 * and check for an interrupt manually.
4723 */
4724 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4725 if (fsr & FSR_FAULT) {
4726 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4727 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4728 }
4729 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4730 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4731 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4732
4733redo:
4734 /* Set address and stream-id */
4735 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
4736 val |= sid & DEBUG_SID_HALT_SID_MASK;
4737 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
4738 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
4739
4740 /*
4741 * Write-back Read and Write-Allocate
4742 * Priviledged, nonsecure, data transaction
4743 * Read operation.
4744 */
4745 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
4746 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
4747 val |= DEBUG_TXN_TRIGGER;
4748 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
4749
4750 ret = 0;
4751 if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG,
4752 val, !(val & DEBUG_SR_ECATS_RUNNING_VAL),
4753 0, TBU_DBG_TIMEOUT_US)) {
4754 dev_err(tbu->dev, "ECATS translation timed out!\n");
4755 }
4756
4757 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4758 if (fsr & FSR_FAULT) {
4759 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
4760 val);
4761 ret = -EINVAL;
4762
4763 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
4764 /*
4765 * Clear pending interrupts
4766 * Barrier required to ensure that the FSR is cleared
4767 * before resuming SMMU operation
4768 */
4769 wmb();
4770 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4771 }
4772
4773 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
4774 if (val & DEBUG_PAR_FAULT_VAL) {
4775 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
4776 val);
4777 ret = -EINVAL;
4778 }
4779
4780 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
4781 if (ret < 0)
4782 phys = 0;
4783
4784 /* Reset hardware */
4785 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
4786 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
4787
4788 /*
4789 * After a failed translation, the next successful translation will
4790 * incorrectly be reported as a failure.
4791 */
4792 if (!phys && needs_redo++ < 2)
4793 goto redo;
4794
4795 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4796 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
4797
4798out_resume:
4799 qsmmuv500_tbu_resume(tbu);
4800
4801out_power_off:
4802 arm_smmu_power_off(tbu->pwr);
4803
4804 return phys;
4805}
4806
4807static phys_addr_t qsmmuv500_iova_to_phys_hard(
4808 struct iommu_domain *domain, dma_addr_t iova)
4809{
4810 u16 sid;
4811 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4812 struct iommu_fwspec *fwspec;
4813
4814 /* Select a sid */
4815 fwspec = smmu_domain->dev->iommu_fwspec;
4816 sid = (u16)fwspec->ids[0];
4817
4818 return qsmmuv500_iova_to_phys(domain, iova, sid);
4819}
4820
Patrick Daly03330cc2017-08-11 14:56:38 -07004821static void qsmmuv500_release_group_iommudata(void *data)
4822{
4823 kfree(data);
4824}
4825
4826/* If a device has a valid actlr, it must match */
4827static int qsmmuv500_device_group(struct device *dev,
4828 struct iommu_group *group)
4829{
4830 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
4831 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
4832 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4833 struct qsmmuv500_group_iommudata *iommudata;
4834 u32 actlr, i;
4835 struct arm_smmu_smr *smr;
4836
4837 iommudata = to_qsmmuv500_group_iommudata(group);
4838 if (!iommudata) {
4839 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
4840 if (!iommudata)
4841 return -ENOMEM;
4842
4843 iommu_group_set_iommudata(group, iommudata,
4844 qsmmuv500_release_group_iommudata);
4845 }
4846
4847 for (i = 0; i < data->actlr_tbl_size; i++) {
4848 smr = &data->actlrs[i].smr;
4849 actlr = data->actlrs[i].actlr;
4850
4851 if (!arm_smmu_fwspec_match_smr(fwspec, smr))
4852 continue;
4853
4854 if (!iommudata->has_actlr) {
4855 iommudata->actlr = actlr;
4856 iommudata->has_actlr = true;
4857 } else if (iommudata->actlr != actlr) {
4858 return -EINVAL;
4859 }
4860 }
4861
4862 return 0;
4863}
4864
4865static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
4866 struct device *dev)
4867{
4868 struct arm_smmu_device *smmu = smmu_domain->smmu;
4869 struct qsmmuv500_group_iommudata *iommudata =
4870 to_qsmmuv500_group_iommudata(dev->iommu_group);
4871 void __iomem *cb_base;
4872 const struct iommu_gather_ops *tlb;
4873
4874 if (!iommudata->has_actlr)
4875 return;
4876
4877 tlb = smmu_domain->pgtbl_cfg.tlb;
4878 cb_base = ARM_SMMU_CB_BASE(smmu) +
4879 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
4880
4881 writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR);
4882
4883 /*
Patrick Daly23301482017-10-12 16:18:25 -07004884 * Prefetch only works properly if the start and end of all
4885 * buffers in the page table are aligned to 16 Kb.
4886 */
4887 if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &&
4888 QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
4889 smmu_domain->qsmmuv500_errata2_min_align = true;
4890
4891 /*
Patrick Daly03330cc2017-08-11 14:56:38 -07004892 * Flush the context bank after modifying ACTLR to ensure there
4893 * are no cache entries with stale state
4894 */
4895 tlb->tlb_flush_all(smmu_domain);
4896}
4897
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004898static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004899{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004900 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004901 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004902 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004903
4904 if (!dev->driver) {
4905 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4906 return -EINVAL;
4907 }
4908
4909 tbu = dev_get_drvdata(dev);
4910
4911 INIT_LIST_HEAD(&tbu->list);
4912 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004913 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004914 return 0;
4915}
4916
Patrick Dalyda765c62017-09-11 16:31:07 -07004917static int qsmmuv500_parse_errata1(struct arm_smmu_device *smmu)
4918{
4919 int len, i;
4920 struct device *dev = smmu->dev;
4921 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4922 struct arm_smmu_smr *smrs;
4923 const __be32 *cell;
4924
4925 cell = of_get_property(dev->of_node, "qcom,mmu500-errata-1", NULL);
4926 if (!cell)
4927 return 0;
4928
4929 remote_spin_lock_init(&data->errata1_lock, ERRATA1_REMOTE_SPINLOCK);
4930 len = of_property_count_elems_of_size(
4931 dev->of_node, "qcom,mmu500-errata-1", sizeof(u32) * 2);
4932 if (len < 0)
4933 return 0;
4934
4935 smrs = devm_kzalloc(dev, sizeof(*smrs) * len, GFP_KERNEL);
4936 if (!smrs)
4937 return -ENOMEM;
4938
4939 for (i = 0; i < len; i++) {
4940 smrs[i].id = of_read_number(cell++, 1);
4941 smrs[i].mask = of_read_number(cell++, 1);
4942 }
4943
4944 data->errata1_clients = smrs;
4945 data->num_errata1_clients = len;
4946 return 0;
4947}
4948
Patrick Daly03330cc2017-08-11 14:56:38 -07004949static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
4950{
4951 int len, i;
4952 struct device *dev = smmu->dev;
4953 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4954 struct actlr_setting *actlrs;
4955 const __be32 *cell;
4956
4957 cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
4958 if (!cell)
4959 return 0;
4960
4961 len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
4962 sizeof(u32) * 3);
4963 if (len < 0)
4964 return 0;
4965
4966 actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
4967 if (!actlrs)
4968 return -ENOMEM;
4969
4970 for (i = 0; i < len; i++) {
4971 actlrs[i].smr.id = of_read_number(cell++, 1);
4972 actlrs[i].smr.mask = of_read_number(cell++, 1);
4973 actlrs[i].actlr = of_read_number(cell++, 1);
4974 }
4975
4976 data->actlrs = actlrs;
4977 data->actlr_tbl_size = len;
4978 return 0;
4979}
4980
Patrick Daly1f8a2882016-09-12 17:32:05 -07004981static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4982{
Patrick Dalya0fddb62017-03-27 19:26:59 -07004983 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004984 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004985 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004986 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004987 int ret;
Patrick Daly03330cc2017-08-11 14:56:38 -07004988 u32 val;
4989 void __iomem *reg;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004990
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004991 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
4992 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004993 return -ENOMEM;
4994
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004995 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07004996
4997 pdev = container_of(dev, struct platform_device, dev);
4998 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
4999 data->tcu_base = devm_ioremap_resource(dev, res);
5000 if (IS_ERR(data->tcu_base))
5001 return PTR_ERR(data->tcu_base);
5002
5003 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005004 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005005
Patrick Dalyda765c62017-09-11 16:31:07 -07005006 ret = qsmmuv500_parse_errata1(smmu);
5007 if (ret)
5008 return ret;
5009
Patrick Daly03330cc2017-08-11 14:56:38 -07005010 ret = qsmmuv500_read_actlr_tbl(smmu);
5011 if (ret)
5012 return ret;
5013
5014 reg = ARM_SMMU_GR0(smmu);
5015 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5016 val &= ~ARM_MMU500_ACR_CACHE_LOCK;
5017 writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
5018 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5019 /*
5020 * Modifiying the nonsecure copy of the sACR register is only
5021 * allowed if permission is given in the secure sACR register.
5022 * Attempt to detect if we were able to update the value.
5023 */
5024 WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
5025
Patrick Daly1f8a2882016-09-12 17:32:05 -07005026 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
5027 if (ret)
5028 return ret;
5029
5030 /* Attempt to register child devices */
5031 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
5032 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07005033 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005034
5035 return 0;
5036}
5037
5038struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
5039 .init = qsmmuv500_arch_init,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005040 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly03330cc2017-08-11 14:56:38 -07005041 .init_context_bank = qsmmuv500_init_cb,
5042 .device_group = qsmmuv500_device_group,
Patrick Daly1f8a2882016-09-12 17:32:05 -07005043};
5044
5045static const struct of_device_id qsmmuv500_tbu_of_match[] = {
5046 {.compatible = "qcom,qsmmuv500-tbu"},
5047 {}
5048};
5049
5050static int qsmmuv500_tbu_probe(struct platform_device *pdev)
5051{
5052 struct resource *res;
5053 struct device *dev = &pdev->dev;
5054 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005055 const __be32 *cell;
5056 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005057
5058 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
5059 if (!tbu)
5060 return -ENOMEM;
5061
5062 INIT_LIST_HEAD(&tbu->list);
5063 tbu->dev = dev;
5064 spin_lock_init(&tbu->halt_lock);
5065
5066 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
5067 tbu->base = devm_ioremap_resource(dev, res);
5068 if (IS_ERR(tbu->base))
5069 return PTR_ERR(tbu->base);
5070
5071 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
5072 tbu->status_reg = devm_ioremap_resource(dev, res);
5073 if (IS_ERR(tbu->status_reg))
5074 return PTR_ERR(tbu->status_reg);
5075
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005076 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
5077 if (!cell || len < 8)
5078 return -EINVAL;
5079
5080 tbu->sid_start = of_read_number(cell, 1);
5081 tbu->num_sids = of_read_number(cell + 1, 1);
5082
Patrick Daly1f8a2882016-09-12 17:32:05 -07005083 tbu->pwr = arm_smmu_init_power_resources(pdev);
5084 if (IS_ERR(tbu->pwr))
5085 return PTR_ERR(tbu->pwr);
5086
5087 dev_set_drvdata(dev, tbu);
5088 return 0;
5089}
5090
5091static struct platform_driver qsmmuv500_tbu_driver = {
5092 .driver = {
5093 .name = "qsmmuv500-tbu",
5094 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
5095 },
5096 .probe = qsmmuv500_tbu_probe,
5097};
5098
Will Deacon45ae7cf2013-06-24 18:31:25 +01005099MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
5100MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
5101MODULE_LICENSE("GPL v2");