blob: b6979edb3bb8bdb9c1fedc767ab3119b52be84cf [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Daly86960052017-12-04 18:53:13 -080050#include <soc/qcom/scm.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070051#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070052#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070053#include <linux/msm-bus.h>
54#include <dt-bindings/msm/msm-bus-ids.h>
Patrick Dalyda765c62017-09-11 16:31:07 -070055#include <linux/remote_spinlock.h>
56#include <linux/ktime.h>
57#include <trace/events/iommu.h>
Charan Teja Reddyf8464882017-12-05 20:29:05 +053058#include <linux/notifier.h>
Prakash Gupta5b8eb322018-01-09 15:16:39 +053059#include <dt-bindings/arm/arm-smmu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010060
61#include <linux/amba/bus.h>
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +053062#include <soc/qcom/msm_tz_smmu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010063
Will Deacon518f7132014-11-14 17:17:54 +000064#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Will Deacon45ae7cf2013-06-24 18:31:25 +010066/* Maximum number of context banks per SMMU */
67#define ARM_SMMU_MAX_CBS 128
68
Will Deacon45ae7cf2013-06-24 18:31:25 +010069/* SMMU global address space */
70#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010071#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010072
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000073/*
74 * SMMU global address space with conditional offset to access secure
75 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
76 * nsGFSYNR0: 0x450)
77 */
78#define ARM_SMMU_GR0_NS(smmu) \
79 ((smmu)->base + \
80 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
81 ? 0x400 : 0))
82
Robin Murphyf9a05f02016-04-13 18:13:01 +010083/*
84 * Some 64-bit registers only make sense to write atomically, but in such
85 * cases all the data relevant to AArch32 formats lies within the lower word,
86 * therefore this actually makes more sense than it might first appear.
87 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010088#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010089#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010090#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010091#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010092#endif
93
Will Deacon45ae7cf2013-06-24 18:31:25 +010094/* Configuration registers */
95#define ARM_SMMU_GR0_sCR0 0x0
96#define sCR0_CLIENTPD (1 << 0)
97#define sCR0_GFRE (1 << 1)
98#define sCR0_GFIE (1 << 2)
99#define sCR0_GCFGFRE (1 << 4)
100#define sCR0_GCFGFIE (1 << 5)
101#define sCR0_USFCFG (1 << 10)
102#define sCR0_VMIDPNE (1 << 11)
103#define sCR0_PTM (1 << 12)
104#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800105#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100106#define sCR0_BSU_SHIFT 14
107#define sCR0_BSU_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700108#define sCR0_SHCFG_SHIFT 22
109#define sCR0_SHCFG_MASK 0x3
110#define sCR0_SHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100111
Peng Fan3ca37122016-05-03 21:50:30 +0800112/* Auxiliary Configuration register */
113#define ARM_SMMU_GR0_sACR 0x10
114
Will Deacon45ae7cf2013-06-24 18:31:25 +0100115/* Identification registers */
116#define ARM_SMMU_GR0_ID0 0x20
117#define ARM_SMMU_GR0_ID1 0x24
118#define ARM_SMMU_GR0_ID2 0x28
119#define ARM_SMMU_GR0_ID3 0x2c
120#define ARM_SMMU_GR0_ID4 0x30
121#define ARM_SMMU_GR0_ID5 0x34
122#define ARM_SMMU_GR0_ID6 0x38
123#define ARM_SMMU_GR0_ID7 0x3c
124#define ARM_SMMU_GR0_sGFSR 0x48
125#define ARM_SMMU_GR0_sGFSYNR0 0x50
126#define ARM_SMMU_GR0_sGFSYNR1 0x54
127#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128
129#define ID0_S1TS (1 << 30)
130#define ID0_S2TS (1 << 29)
131#define ID0_NTS (1 << 28)
132#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000133#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100134#define ID0_PTFS_NO_AARCH32 (1 << 25)
135#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100136#define ID0_CTTW (1 << 14)
137#define ID0_NUMIRPT_SHIFT 16
138#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700139#define ID0_NUMSIDB_SHIFT 9
140#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100141#define ID0_NUMSMRG_SHIFT 0
142#define ID0_NUMSMRG_MASK 0xff
143
144#define ID1_PAGESIZE (1 << 31)
145#define ID1_NUMPAGENDXB_SHIFT 28
146#define ID1_NUMPAGENDXB_MASK 7
147#define ID1_NUMS2CB_SHIFT 16
148#define ID1_NUMS2CB_MASK 0xff
149#define ID1_NUMCB_SHIFT 0
150#define ID1_NUMCB_MASK 0xff
151
152#define ID2_OAS_SHIFT 4
153#define ID2_OAS_MASK 0xf
154#define ID2_IAS_SHIFT 0
155#define ID2_IAS_MASK 0xf
156#define ID2_UBS_SHIFT 8
157#define ID2_UBS_MASK 0xf
158#define ID2_PTFS_4K (1 << 12)
159#define ID2_PTFS_16K (1 << 13)
160#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800161#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100162
Peng Fan3ca37122016-05-03 21:50:30 +0800163#define ID7_MAJOR_SHIFT 4
164#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100167#define ARM_SMMU_GR0_TLBIVMID 0x64
168#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
169#define ARM_SMMU_GR0_TLBIALLH 0x6c
170#define ARM_SMMU_GR0_sTLBGSYNC 0x70
171#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
172#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800173#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100174
175/* Stream mapping registers */
176#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
177#define SMR_VALID (1 << 31)
178#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700179#define SMR_MASK_MASK 0x7FFF
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530180#define SID_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100181#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100182
183#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
184#define S2CR_CBNDX_SHIFT 0
185#define S2CR_CBNDX_MASK 0xff
186#define S2CR_TYPE_SHIFT 16
187#define S2CR_TYPE_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700188#define S2CR_SHCFG_SHIFT 8
189#define S2CR_SHCFG_MASK 0x3
190#define S2CR_SHCFG_NSH 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100191enum arm_smmu_s2cr_type {
192 S2CR_TYPE_TRANS,
193 S2CR_TYPE_BYPASS,
194 S2CR_TYPE_FAULT,
195};
196
197#define S2CR_PRIVCFG_SHIFT 24
198#define S2CR_PRIVCFG_MASK 0x3
199enum arm_smmu_s2cr_privcfg {
200 S2CR_PRIVCFG_DEFAULT,
201 S2CR_PRIVCFG_DIPAN,
202 S2CR_PRIVCFG_UNPRIV,
203 S2CR_PRIVCFG_PRIV,
204};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100205
206/* Context bank attribute registers */
207#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
208#define CBAR_VMID_SHIFT 0
209#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000210#define CBAR_S1_BPSHCFG_SHIFT 8
211#define CBAR_S1_BPSHCFG_MASK 3
212#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define CBAR_S1_MEMATTR_SHIFT 12
214#define CBAR_S1_MEMATTR_MASK 0xf
215#define CBAR_S1_MEMATTR_WB 0xf
216#define CBAR_TYPE_SHIFT 16
217#define CBAR_TYPE_MASK 0x3
218#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
219#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
220#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
221#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
222#define CBAR_IRPTNDX_SHIFT 24
223#define CBAR_IRPTNDX_MASK 0xff
224
Shalaj Jain04059c52015-03-03 13:34:59 -0800225#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
226#define CBFRSYNRA_SID_MASK (0xffff)
227
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
229#define CBA2R_RW64_32BIT (0 << 0)
230#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800231#define CBA2R_VMID_SHIFT 16
232#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233
234/* Translation context bank */
235#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100236#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237
238#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100239#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100240#define ARM_SMMU_CB_RESUME 0x8
241#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100242#define ARM_SMMU_CB_TTBR0 0x20
243#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100244#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600245#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000247#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100248#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100249#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700250#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100251#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100252#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000253#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100254#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700255#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000256#define ARM_SMMU_CB_S1_TLBIVAL 0x620
257#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
258#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700259#define ARM_SMMU_CB_TLBSYNC 0x7f0
260#define ARM_SMMU_CB_TLBSTATUS 0x7f4
261#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100262#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000263#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264
Patrick Daly7f377fe2017-10-06 17:37:10 -0700265#define SCTLR_SHCFG_SHIFT 22
266#define SCTLR_SHCFG_MASK 0x3
267#define SCTLR_SHCFG_NSH 0x3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100268#define SCTLR_S1_ASIDPNE (1 << 12)
269#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530270#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100271#define SCTLR_CFIE (1 << 6)
272#define SCTLR_CFRE (1 << 5)
273#define SCTLR_E (1 << 4)
274#define SCTLR_AFE (1 << 2)
275#define SCTLR_TRE (1 << 1)
276#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100278#define ARM_MMU500_ACTLR_CPRE (1 << 1)
279
Peng Fan3ca37122016-05-03 21:50:30 +0800280#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
281
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700282#define ARM_SMMU_IMPL_DEF0(smmu) \
283 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
284#define ARM_SMMU_IMPL_DEF1(smmu) \
285 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000286#define CB_PAR_F (1 << 0)
287
288#define ATSR_ACTIVE (1 << 0)
289
Will Deacon45ae7cf2013-06-24 18:31:25 +0100290#define RESUME_RETRY (0 << 0)
291#define RESUME_TERMINATE (1 << 0)
292
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100294#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100295
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100296#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100297
298#define FSR_MULTI (1 << 31)
299#define FSR_SS (1 << 30)
300#define FSR_UUT (1 << 8)
301#define FSR_ASF (1 << 7)
302#define FSR_TLBLKF (1 << 6)
303#define FSR_TLBMCF (1 << 5)
304#define FSR_EF (1 << 4)
305#define FSR_PF (1 << 3)
306#define FSR_AFF (1 << 2)
307#define FSR_TF (1 << 1)
308
Mitchel Humpherys29073202014-07-08 09:52:18 -0700309#define FSR_IGN (FSR_AFF | FSR_ASF | \
310 FSR_TLBMCF | FSR_TLBLKF)
311#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100312 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100313
314#define FSYNR0_WNR (1 << 4)
315
Will Deacon4cf740b2014-07-14 19:47:39 +0100316static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000317module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100318MODULE_PARM_DESC(force_stage,
319 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800320static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000321module_param(disable_bypass, bool, S_IRUGO);
322MODULE_PARM_DESC(disable_bypass,
323 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100324
Robin Murphy09360402014-08-28 17:51:59 +0100325enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100326 ARM_SMMU_V1,
327 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100328 ARM_SMMU_V2,
329};
330
Robin Murphy67b65a32016-04-13 18:12:57 +0100331enum arm_smmu_implementation {
332 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100333 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100334 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700335 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700336 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100337};
338
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700339struct arm_smmu_impl_def_reg {
340 u32 offset;
341 u32 value;
342};
343
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700344/*
345 * attach_count
346 * The SMR and S2CR registers are only programmed when the number of
347 * devices attached to the iommu using these registers is > 0. This
348 * is required for the "SID switch" use case for secure display.
349 * Protected by stream_map_mutex.
350 */
Robin Murphya754fd12016-09-12 17:13:50 +0100351struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100352 struct iommu_group *group;
353 int count;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700354 int attach_count;
Robin Murphya754fd12016-09-12 17:13:50 +0100355 enum arm_smmu_s2cr_type type;
356 enum arm_smmu_s2cr_privcfg privcfg;
357 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700358 bool cb_handoff;
Robin Murphya754fd12016-09-12 17:13:50 +0100359};
360
361#define s2cr_init_val (struct arm_smmu_s2cr){ \
362 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700363 .cb_handoff = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100364}
365
Will Deacon45ae7cf2013-06-24 18:31:25 +0100366struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100367 u16 mask;
368 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100369 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100370};
371
Will Deacona9a1b0b2014-05-01 18:05:08 +0100372struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100373 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100374 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100375};
Robin Murphy468f4942016-09-12 17:13:49 +0100376#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100377#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
378#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000379#define fwspec_smendx(fw, i) \
380 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100381#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000382 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100383
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700384/*
385 * Describes resources required for on/off power operation.
386 * Separate reference count is provided for atomic/nonatomic
387 * operations.
388 */
389struct arm_smmu_power_resources {
390 struct platform_device *pdev;
391 struct device *dev;
392
393 struct clk **clocks;
394 int num_clocks;
395
396 struct regulator_bulk_data *gdscs;
397 int num_gdscs;
398
399 uint32_t bus_client;
400 struct msm_bus_scale_pdata *bus_dt_data;
401
402 /* Protects power_count */
403 struct mutex power_lock;
404 int power_count;
405
406 /* Protects clock_refs_count */
407 spinlock_t clock_refs_lock;
408 int clock_refs_count;
Prakash Guptafad87ca2017-05-16 12:13:02 +0530409 int regulator_defer;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700410};
411
Patrick Daly03330cc2017-08-11 14:56:38 -0700412struct arm_smmu_arch_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100413struct arm_smmu_device {
414 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100415
416 void __iomem *base;
417 unsigned long size;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530418 phys_addr_t phys_addr;
Will Deaconc757e852014-07-30 11:33:25 +0100419 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100420
421#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
422#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
423#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
424#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
425#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000426#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800427#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100428#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
429#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
430#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
431#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
432#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100433 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000434
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000435 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100436 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100437 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100438
439 u32 num_context_banks;
440 u32 num_s2_context_banks;
441 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Charan Teja Reddy4971ca42018-01-23 18:27:08 +0530442 DECLARE_BITMAP(secure_context_map, ARM_SMMU_MAX_CBS);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100443 atomic_t irptndx;
444
445 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100446 u16 streamid_mask;
447 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100448 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100449 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100450 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100451
Will Deacon518f7132014-11-14 17:17:54 +0000452 unsigned long va_size;
453 unsigned long ipa_size;
454 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100455 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100456
457 u32 num_global_irqs;
458 u32 num_context_irqs;
459 unsigned int *irqs;
460
Patrick Daly8e3371a2017-02-13 22:14:53 -0800461 struct list_head list;
462
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800463 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700464 /* Specific to QCOM */
465 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
466 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800467
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700468 struct arm_smmu_power_resources *pwr;
Charan Teja Reddyf8464882017-12-05 20:29:05 +0530469 struct notifier_block regulator_nb;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700470
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800471 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700472
473 /* protects idr */
474 struct mutex idr_mutex;
475 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700476
477 struct arm_smmu_arch_ops *arch_ops;
478 void *archdata;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530479
480 enum tz_smmu_device_id sec_id;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100481};
482
Robin Murphy7602b872016-04-28 17:12:09 +0100483enum arm_smmu_context_fmt {
484 ARM_SMMU_CTX_FMT_NONE,
485 ARM_SMMU_CTX_FMT_AARCH64,
486 ARM_SMMU_CTX_FMT_AARCH32_L,
487 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100488};
489
490struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100491 u8 cbndx;
492 u8 irptndx;
493 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600494 u32 procid;
495 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100496 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100497};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100498#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600499#define INVALID_CBNDX 0xff
500#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700501/*
502 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
503 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
504 */
505#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100506
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600507#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800508#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100509
Will Deaconc752ce42014-06-25 22:46:31 +0100510enum arm_smmu_domain_stage {
511 ARM_SMMU_DOMAIN_S1 = 0,
512 ARM_SMMU_DOMAIN_S2,
513 ARM_SMMU_DOMAIN_NESTED,
514};
515
Patrick Dalyc11d1082016-09-01 15:52:44 -0700516struct arm_smmu_pte_info {
517 void *virt_addr;
518 size_t size;
519 struct list_head entry;
520};
521
Will Deacon45ae7cf2013-06-24 18:31:25 +0100522struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100523 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800524 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000525 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700526 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000527 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100528 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100529 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000530 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700531 u32 attributes;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530532 bool slave_side_secure;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700533 u32 secure_vmid;
534 struct list_head pte_info_list;
535 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700536 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700537 struct list_head secure_pool_list;
Patrick Daly2d600832018-02-11 15:12:55 -0800538 /* nonsecure pool protected by pgtbl_lock */
539 struct list_head nonsecure_pool;
Joerg Roedel1d672632015-03-26 13:43:10 +0100540 struct iommu_domain domain;
Patrick Dalyda765c62017-09-11 16:31:07 -0700541
542 bool qsmmuv500_errata1_init;
543 bool qsmmuv500_errata1_client;
Patrick Daly23301482017-10-12 16:18:25 -0700544 bool qsmmuv500_errata2_min_align;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100545};
546
Patrick Daly8e3371a2017-02-13 22:14:53 -0800547static DEFINE_SPINLOCK(arm_smmu_devices_lock);
548static LIST_HEAD(arm_smmu_devices);
549
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000550struct arm_smmu_option_prop {
551 u32 opt;
552 const char *prop;
553};
554
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800555static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
556
Robin Murphy7e96c742016-09-14 15:26:46 +0100557static bool using_legacy_binding, using_generic_binding;
558
Mitchel Humpherys29073202014-07-08 09:52:18 -0700559static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000560 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800561 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700562 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700563 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700564 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700565 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Patrick Daly62ba1922017-08-30 16:47:18 -0700566 { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
Patrick Daly83174c12017-10-26 12:31:15 -0700567 { ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530568 { ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
Charan Teja Reddyf8464882017-12-05 20:29:05 +0530569 { ARM_SMMU_OPT_HALT, "qcom,enable-smmu-halt"},
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000570 { 0, NULL},
571};
572
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800573static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
574 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700575static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
576 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600577static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800578
Patrick Dalyc11d1082016-09-01 15:52:44 -0700579static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
580static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700581static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700582static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
583
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700584static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
585 dma_addr_t iova);
586
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800587static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
588
Patrick Dalyda688822017-05-17 20:12:48 -0700589static int arm_smmu_alloc_cb(struct iommu_domain *domain,
590 struct arm_smmu_device *smmu,
591 struct device *dev);
Patrick Dalyda765c62017-09-11 16:31:07 -0700592static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops;
Patrick Dalyda688822017-05-17 20:12:48 -0700593
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530594static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530595static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
596static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530597
Joerg Roedel1d672632015-03-26 13:43:10 +0100598static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
599{
600 return container_of(dom, struct arm_smmu_domain, domain);
601}
602
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000603static void parse_driver_options(struct arm_smmu_device *smmu)
604{
605 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700606
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000607 do {
608 if (of_property_read_bool(smmu->dev->of_node,
609 arm_smmu_options[i].prop)) {
610 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700611 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000612 arm_smmu_options[i].prop);
613 }
614 } while (arm_smmu_options[++i].opt);
615}
616
Patrick Dalyc190d932016-08-30 17:23:28 -0700617static bool is_dynamic_domain(struct iommu_domain *domain)
618{
619 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
620
621 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
622}
623
Charan Teja Reddyec6f7822018-01-10 17:32:52 +0530624static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu, u32 cb)
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530625{
626 int ret;
627 int scm_ret = 0;
628
629 if (!arm_smmu_is_static_cb(smmu))
630 return 0;
631
Charan Teja Reddyec6f7822018-01-10 17:32:52 +0530632 ret = scm_restore_sec_cfg(smmu->sec_id, cb, &scm_ret);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530633 if (ret || scm_ret) {
634 pr_err("scm call IOMMU_SECURE_CFG failed\n");
635 return -EINVAL;
636 }
637
638 return 0;
639}
Liam Mark53cf2342016-12-20 11:36:07 -0800640static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
641{
642 if (smmu_domain->attributes &
643 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
644 return true;
645 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
646 return smmu_domain->smmu->dev->archdata.dma_coherent;
647 else
648 return false;
649}
650
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530651static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu)
652{
653 return smmu->options & ARM_SMMU_OPT_STATIC_CB;
654}
655
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530656static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain)
Patrick Dalye271f212016-10-04 13:24:49 -0700657{
658 return (smmu_domain->secure_vmid != VMID_INVAL);
659}
660
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530661static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain)
662{
663 return arm_smmu_has_secure_vmid(smmu_domain) &&
664 smmu_domain->slave_side_secure;
665}
666
667static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain)
668{
669 return arm_smmu_has_secure_vmid(smmu_domain)
670 && !smmu_domain->slave_side_secure;
671}
672
Patrick Dalye271f212016-10-04 13:24:49 -0700673static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
674{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530675 if (arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -0700676 mutex_lock(&smmu_domain->assign_lock);
677}
678
679static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
680{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530681 if (arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -0700682 mutex_unlock(&smmu_domain->assign_lock);
683}
684
Patrick Daly03330cc2017-08-11 14:56:38 -0700685/*
686 * init()
687 * Hook for additional device tree parsing at probe time.
688 *
689 * device_reset()
690 * Hook for one-time architecture-specific register settings.
691 *
692 * iova_to_phys_hard()
693 * Provides debug information. May be called from the context fault irq handler.
694 *
695 * init_context_bank()
696 * Hook for architecture-specific settings which require knowledge of the
697 * dynamically allocated context bank number.
698 *
699 * device_group()
700 * Hook for checking whether a device is compatible with a said group.
701 */
702struct arm_smmu_arch_ops {
703 int (*init)(struct arm_smmu_device *smmu);
704 void (*device_reset)(struct arm_smmu_device *smmu);
705 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
706 dma_addr_t iova);
707 void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
708 struct device *dev);
709 int (*device_group)(struct device *dev, struct iommu_group *group);
710};
711
712static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
713{
714 if (!smmu->arch_ops)
715 return 0;
716 if (!smmu->arch_ops->init)
717 return 0;
718 return smmu->arch_ops->init(smmu);
719}
720
721static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
722{
723 if (!smmu->arch_ops)
724 return;
725 if (!smmu->arch_ops->device_reset)
726 return;
727 return smmu->arch_ops->device_reset(smmu);
728}
729
730static void arm_smmu_arch_init_context_bank(
731 struct arm_smmu_domain *smmu_domain, struct device *dev)
732{
733 struct arm_smmu_device *smmu = smmu_domain->smmu;
734
735 if (!smmu->arch_ops)
736 return;
737 if (!smmu->arch_ops->init_context_bank)
738 return;
739 return smmu->arch_ops->init_context_bank(smmu_domain, dev);
740}
741
742static int arm_smmu_arch_device_group(struct device *dev,
743 struct iommu_group *group)
744{
745 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
746 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
747
748 if (!smmu->arch_ops)
749 return 0;
750 if (!smmu->arch_ops->device_group)
751 return 0;
752 return smmu->arch_ops->device_group(dev, group);
753}
754
Will Deacon8f68f8e2014-07-15 11:27:08 +0100755static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100756{
757 if (dev_is_pci(dev)) {
758 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700759
Will Deacona9a1b0b2014-05-01 18:05:08 +0100760 while (!pci_is_root_bus(bus))
761 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100762 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100763 }
764
Robin Murphyd5b41782016-09-14 15:21:39 +0100765 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100766}
767
Robin Murphyd5b41782016-09-14 15:21:39 +0100768static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100769{
Robin Murphyd5b41782016-09-14 15:21:39 +0100770 *((__be32 *)data) = cpu_to_be32(alias);
771 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100772}
773
Robin Murphyd5b41782016-09-14 15:21:39 +0100774static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100775{
Robin Murphyd5b41782016-09-14 15:21:39 +0100776 struct of_phandle_iterator *it = *(void **)data;
777 struct device_node *np = it->node;
778 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100779
Robin Murphyd5b41782016-09-14 15:21:39 +0100780 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
781 "#stream-id-cells", 0)
782 if (it->node == np) {
783 *(void **)data = dev;
784 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700785 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100786 it->node = np;
787 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100788}
789
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100790static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100791static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100792
Robin Murphy06e393e2016-09-12 17:13:55 +0100793static int arm_smmu_register_legacy_master(struct device *dev,
794 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100795{
Robin Murphy06e393e2016-09-12 17:13:55 +0100796 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100797 struct device_node *np;
798 struct of_phandle_iterator it;
799 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100800 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100801 __be32 pci_sid;
802 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100803
Stephen Boydfecdeef2017-03-01 16:53:19 -0800804 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100805 np = dev_get_dev_node(dev);
806 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
807 of_node_put(np);
808 return -ENODEV;
809 }
810
811 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100812 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
813 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100814 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100815 of_node_put(np);
816 if (err == 0)
817 return -ENODEV;
818 if (err < 0)
819 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100820
Robin Murphyd5b41782016-09-14 15:21:39 +0100821 if (dev_is_pci(dev)) {
822 /* "mmu-masters" assumes Stream ID == Requester ID */
823 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
824 &pci_sid);
825 it.cur = &pci_sid;
826 it.cur_count = 1;
827 }
828
Robin Murphy06e393e2016-09-12 17:13:55 +0100829 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
830 &arm_smmu_ops);
831 if (err)
832 return err;
833
834 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
835 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100836 return -ENOMEM;
837
Robin Murphy06e393e2016-09-12 17:13:55 +0100838 *smmu = dev_get_drvdata(smmu_dev);
839 of_phandle_iterator_args(&it, sids, it.cur_count);
840 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
841 kfree(sids);
842 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100843}
844
845static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
846{
847 int idx;
848
849 do {
850 idx = find_next_zero_bit(map, end, start);
851 if (idx == end)
852 return -ENOSPC;
853 } while (test_and_set_bit(idx, map));
854
855 return idx;
856}
857
858static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
859{
860 clear_bit(idx, map);
861}
862
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700863static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700864{
865 int i, ret = 0;
866
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700867 for (i = 0; i < pwr->num_clocks; ++i) {
868 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700869 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700870 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700871 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700872 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700873 break;
874 }
875 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700876 return ret;
877}
878
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700879static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700880{
881 int i;
882
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700883 for (i = pwr->num_clocks; i; --i)
884 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700885}
886
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700887static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700888{
889 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700890
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700891 for (i = 0; i < pwr->num_clocks; ++i) {
892 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700893 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700894 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700895 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700896 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700897 break;
898 }
899 }
900
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700901 return ret;
902}
Patrick Daly8befb662016-08-17 20:03:28 -0700903
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700904static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
905{
906 int i;
907
908 for (i = pwr->num_clocks; i; --i)
909 clk_disable(pwr->clocks[i - 1]);
910}
911
912static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
913{
914 if (!pwr->bus_client)
915 return 0;
916 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
917}
918
919static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
920{
921 if (!pwr->bus_client)
922 return;
923 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
924}
925
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700926static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
927{
928 struct regulator_bulk_data *consumers;
929 int num_consumers, ret;
930 int i;
931
932 num_consumers = pwr->num_gdscs;
933 consumers = pwr->gdscs;
934 for (i = 0; i < num_consumers; i++) {
935 ret = regulator_enable(consumers[i].consumer);
936 if (ret)
937 goto out;
938 }
939 return 0;
940
941out:
942 i -= 1;
943 for (; i >= 0; i--)
944 regulator_disable(consumers[i].consumer);
945 return ret;
946}
947
Prakash Guptafad87ca2017-05-16 12:13:02 +0530948static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
949{
950 struct regulator_bulk_data *consumers;
951 int i;
952 int num_consumers, ret, r;
953
954 num_consumers = pwr->num_gdscs;
955 consumers = pwr->gdscs;
956 for (i = num_consumers - 1; i >= 0; --i) {
957 ret = regulator_disable_deferred(consumers[i].consumer,
958 pwr->regulator_defer);
959 if (ret != 0)
960 goto err;
961 }
962
963 return 0;
964
965err:
966 pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
967 for (++i; i < num_consumers; ++i) {
968 r = regulator_enable(consumers[i].consumer);
969 if (r != 0)
970 pr_err("Failed to reename %s: %d\n",
971 consumers[i].supply, r);
972 }
973
974 return ret;
975}
976
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700977/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
978static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
979{
980 int ret = 0;
981 unsigned long flags;
982
983 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
984 if (pwr->clock_refs_count > 0) {
985 pwr->clock_refs_count++;
986 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
987 return 0;
988 }
989
990 ret = arm_smmu_enable_clocks(pwr);
991 if (!ret)
992 pwr->clock_refs_count = 1;
993
994 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700995 return ret;
996}
997
998/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700999static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001000{
Patrick Daly8befb662016-08-17 20:03:28 -07001001 unsigned long flags;
1002
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001003 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
1004 if (pwr->clock_refs_count == 0) {
1005 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
1006 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
1007 return;
1008
1009 } else if (pwr->clock_refs_count > 1) {
1010 pwr->clock_refs_count--;
1011 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001012 return;
1013 }
1014
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001015 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001016
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001017 pwr->clock_refs_count = 0;
1018 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001019}
1020
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001021static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001022{
1023 int ret;
1024
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001025 mutex_lock(&pwr->power_lock);
1026 if (pwr->power_count > 0) {
1027 pwr->power_count += 1;
1028 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001029 return 0;
1030 }
1031
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001032 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001033 if (ret)
1034 goto out_unlock;
1035
Patrick Dalyb26f97c2017-08-11 15:24:20 -07001036 ret = arm_smmu_enable_regulators(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001037 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001038 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001039
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001040 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07001041 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001042 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -07001043
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001044 pwr->power_count = 1;
1045 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001046 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001047
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001048out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001049 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001050out_disable_bus:
1051 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001052out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001053 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001054 return ret;
1055}
1056
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001057static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001058{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001059 mutex_lock(&pwr->power_lock);
1060 if (pwr->power_count == 0) {
1061 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
1062 mutex_unlock(&pwr->power_lock);
1063 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001064
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001065 } else if (pwr->power_count > 1) {
1066 pwr->power_count--;
1067 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001068 return;
1069 }
1070
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001071 arm_smmu_unprepare_clocks(pwr);
Prakash Guptafad87ca2017-05-16 12:13:02 +05301072 arm_smmu_disable_regulators(pwr);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001073 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -07001074 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001075 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001076}
1077
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001078static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001079{
1080 int ret;
1081
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001082 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001083 if (ret)
1084 return ret;
1085
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001086 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001087 if (ret)
1088 goto out_disable;
1089
1090 return 0;
1091
1092out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001093 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001094 return ret;
1095}
1096
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001097static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001098{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001099 arm_smmu_power_off_atomic(pwr);
1100 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001101}
1102
1103/*
1104 * Must be used instead of arm_smmu_power_on if it may be called from
1105 * atomic context
1106 */
1107static int arm_smmu_domain_power_on(struct iommu_domain *domain,
1108 struct arm_smmu_device *smmu)
1109{
1110 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1111 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1112
1113 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001114 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001115
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001116 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001117}
1118
1119/*
1120 * Must be used instead of arm_smmu_power_on if it may be called from
1121 * atomic context
1122 */
1123static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1124 struct arm_smmu_device *smmu)
1125{
1126 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1127 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1128
1129 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001130 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001131 return;
1132 }
1133
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001134 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001135}
1136
Will Deacon45ae7cf2013-06-24 18:31:25 +01001137/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001138static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1139 int cbndx)
1140{
1141 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1142 u32 val;
1143
1144 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1145 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1146 !(val & TLBSTATUS_SACTIVE),
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301147 0, TLB_LOOP_TIMEOUT)) {
1148 trace_tlbsync_timeout(smmu->dev, 0);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001149 dev_err(smmu->dev, "TLBSYNC timeout!\n");
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301150 }
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001151}
1152
Will Deacon518f7132014-11-14 17:17:54 +00001153static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001154{
1155 int count = 0;
1156 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1157
1158 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1159 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1160 & sTLBGSTATUS_GSACTIVE) {
1161 cpu_relax();
1162 if (++count == TLB_LOOP_TIMEOUT) {
1163 dev_err_ratelimited(smmu->dev,
1164 "TLB sync timed out -- SMMU may be deadlocked\n");
1165 return;
1166 }
1167 udelay(1);
1168 }
1169}
1170
Will Deacon518f7132014-11-14 17:17:54 +00001171static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001172{
Will Deacon518f7132014-11-14 17:17:54 +00001173 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001174 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001175}
1176
Patrick Daly8befb662016-08-17 20:03:28 -07001177/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001178static void arm_smmu_tlb_inv_context(void *cookie)
1179{
1180 struct arm_smmu_domain *smmu_domain = cookie;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301181 struct device *dev = smmu_domain->dev;
Will Deacon44680ee2014-06-25 11:29:12 +01001182 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1183 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001184 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001185 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -07001186 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301187 ktime_t cur = ktime_get();
1188
1189 trace_tlbi_start(dev, 0);
Will Deacon1463fe42013-07-31 19:21:27 +01001190
Patrick Dalye7069342017-07-11 12:35:55 -07001191 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001192 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001193 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001194 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001195 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001196 } else if (stage1 && use_tlbiall) {
1197 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1198 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1199 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001200 } else {
1201 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001202 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001203 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001204 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001205 }
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301206
1207 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Will Deacon1463fe42013-07-31 19:21:27 +01001208}
1209
Will Deacon518f7132014-11-14 17:17:54 +00001210static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001211 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001212{
1213 struct arm_smmu_domain *smmu_domain = cookie;
1214 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1215 struct arm_smmu_device *smmu = smmu_domain->smmu;
1216 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1217 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001218 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001219
Patrick Dalye7069342017-07-11 12:35:55 -07001220 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001221 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1222 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1223
Robin Murphy7602b872016-04-28 17:12:09 +01001224 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001225 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001226 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001227 do {
1228 writel_relaxed(iova, reg);
1229 iova += granule;
1230 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001231 } else {
1232 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001233 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001234 do {
1235 writeq_relaxed(iova, reg);
1236 iova += granule >> 12;
1237 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001238 }
Patrick Dalye7069342017-07-11 12:35:55 -07001239 } else if (stage1 && use_tlbiall) {
1240 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1241 reg += ARM_SMMU_CB_S1_TLBIALL;
1242 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001243 } else if (smmu->version == ARM_SMMU_V2) {
1244 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1245 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1246 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001247 iova >>= 12;
1248 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001249 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001250 iova += granule >> 12;
1251 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001252 } else {
1253 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001254 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001255 }
1256}
1257
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001258struct arm_smmu_secure_pool_chunk {
1259 void *addr;
1260 size_t size;
1261 struct list_head list;
1262};
1263
1264static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1265 size_t size)
1266{
1267 struct arm_smmu_secure_pool_chunk *it;
1268
1269 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1270 if (it->size == size) {
1271 void *addr = it->addr;
1272
1273 list_del(&it->list);
1274 kfree(it);
1275 return addr;
1276 }
1277 }
1278
1279 return NULL;
1280}
1281
1282static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1283 void *addr, size_t size)
1284{
1285 struct arm_smmu_secure_pool_chunk *chunk;
1286
1287 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1288 if (!chunk)
1289 return -ENOMEM;
1290
1291 chunk->addr = addr;
1292 chunk->size = size;
1293 memset(addr, 0, size);
1294 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1295
1296 return 0;
1297}
1298
1299static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1300{
1301 struct arm_smmu_secure_pool_chunk *it, *i;
1302
1303 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1304 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1305 /* pages will be freed later (after being unassigned) */
Prakash Gupta8e827be2017-10-04 12:37:11 +05301306 list_del(&it->list);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001307 kfree(it);
1308 }
1309}
1310
Patrick Dalyc11d1082016-09-01 15:52:44 -07001311static void *arm_smmu_alloc_pages_exact(void *cookie,
1312 size_t size, gfp_t gfp_mask)
1313{
1314 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001315 void *page;
1316 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001317
Patrick Daly2d600832018-02-11 15:12:55 -08001318 if (!arm_smmu_is_master_side_secure(smmu_domain)) {
1319 struct page *pg;
1320 /* size is expected to be 4K with current configuration */
1321 if (size == PAGE_SIZE) {
1322 pg = list_first_entry_or_null(
1323 &smmu_domain->nonsecure_pool, struct page, lru);
1324 if (pg) {
1325 list_del_init(&pg->lru);
1326 return page_address(pg);
1327 }
1328 }
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001329 return alloc_pages_exact(size, gfp_mask);
Patrick Daly2d600832018-02-11 15:12:55 -08001330 }
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001331
1332 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1333 if (page)
1334 return page;
1335
1336 page = alloc_pages_exact(size, gfp_mask);
1337 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001338 ret = arm_smmu_prepare_pgtable(page, cookie);
1339 if (ret) {
1340 free_pages_exact(page, size);
1341 return NULL;
1342 }
1343 }
1344
1345 return page;
1346}
1347
1348static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1349{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001350 struct arm_smmu_domain *smmu_domain = cookie;
1351
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301352 if (!arm_smmu_is_master_side_secure(smmu_domain)) {
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001353 free_pages_exact(virt, size);
1354 return;
1355 }
1356
1357 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1358 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001359}
1360
Will Deacon518f7132014-11-14 17:17:54 +00001361static struct iommu_gather_ops arm_smmu_gather_ops = {
1362 .tlb_flush_all = arm_smmu_tlb_inv_context,
1363 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1364 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001365 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1366 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001367};
1368
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001369static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1370 dma_addr_t iova, u32 fsr)
1371{
1372 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001373 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyda765c62017-09-11 16:31:07 -07001374 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001375 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001376 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001377
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001378 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyda765c62017-09-11 16:31:07 -07001379 tlb->tlb_flush_all(smmu_domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001380 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001381
Patrick Dalyad441dd2016-09-15 15:50:46 -07001382 if (phys != phys_post_tlbiall) {
1383 dev_err(smmu->dev,
1384 "ATOS results differed across TLBIALL...\n"
1385 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1386 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001387
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001388 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001389}
1390
Will Deacon45ae7cf2013-06-24 18:31:25 +01001391static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1392{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001393 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001394 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001395 unsigned long iova;
1396 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001397 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001398 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1399 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001400 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001401 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001402 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001403 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001404 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001405 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001406 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001407
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001408 static DEFINE_RATELIMIT_STATE(_rs,
1409 DEFAULT_RATELIMIT_INTERVAL,
1410 DEFAULT_RATELIMIT_BURST);
1411
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001412 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001413 if (ret)
1414 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001415
Shalaj Jain04059c52015-03-03 13:34:59 -08001416 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001417 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001418 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1419
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001420 if (!(fsr & FSR_FAULT)) {
1421 ret = IRQ_NONE;
1422 goto out_power_off;
1423 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001424
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001425 if (fatal_asf && (fsr & FSR_ASF)) {
1426 dev_err(smmu->dev,
1427 "Took an address size fault. Refusing to recover.\n");
1428 BUG();
1429 }
1430
Will Deacon45ae7cf2013-06-24 18:31:25 +01001431 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001432 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001433 if (fsr & FSR_TF)
1434 flags |= IOMMU_FAULT_TRANSLATION;
1435 if (fsr & FSR_PF)
1436 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001437 if (fsr & FSR_EF)
1438 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001439 if (fsr & FSR_SS)
1440 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001441
Robin Murphyf9a05f02016-04-13 18:13:01 +01001442 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001443 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001444 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1445 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001446 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1447 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001448 dev_dbg(smmu->dev,
1449 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1450 iova, fsr, fsynr, cfg->cbndx);
1451 dev_dbg(smmu->dev,
1452 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001453 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001454 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001455 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001456 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1457 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001458 if (__ratelimit(&_rs)) {
1459 dev_err(smmu->dev,
1460 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1461 iova, fsr, fsynr, cfg->cbndx);
1462 dev_err(smmu->dev, "FAR = %016lx\n",
1463 (unsigned long)iova);
1464 dev_err(smmu->dev,
1465 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1466 fsr,
1467 (fsr & 0x02) ? "TF " : "",
1468 (fsr & 0x04) ? "AFF " : "",
1469 (fsr & 0x08) ? "PF " : "",
1470 (fsr & 0x10) ? "EF " : "",
1471 (fsr & 0x20) ? "TLBMCF " : "",
1472 (fsr & 0x40) ? "TLBLKF " : "",
1473 (fsr & 0x80) ? "MHF " : "",
1474 (fsr & 0x40000000) ? "SS " : "",
1475 (fsr & 0x80000000) ? "MULTI " : "");
1476 dev_err(smmu->dev,
1477 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001478 if (!phys_soft)
1479 dev_err(smmu->dev,
1480 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1481 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001482 if (phys_atos)
1483 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1484 &phys_atos);
1485 else
1486 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001487 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1488 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001489 ret = IRQ_NONE;
1490 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001491 if (!non_fatal_fault) {
1492 dev_err(smmu->dev,
1493 "Unhandled arm-smmu context fault!\n");
1494 BUG();
1495 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001496 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001497
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001498 /*
1499 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1500 * if stalled. This is required to keep the IOMMU client stalled on
1501 * the outstanding fault. This gives the client a chance to take any
1502 * debug action and then terminate the stalled transaction.
1503 * So, the sequence in case of stall on fault should be:
1504 * 1) Do not clear FSR or write to RESUME here
1505 * 2) Client takes any debug action
1506 * 3) Client terminates the stalled transaction and resumes the IOMMU
1507 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1508 * not before so that the fault remains outstanding. This ensures
1509 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1510 * need to be terminated.
1511 */
1512 if (tmp != -EBUSY) {
1513 /* Clear the faulting FSR */
1514 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001515
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001516 /*
1517 * Barrier required to ensure that the FSR is cleared
1518 * before resuming SMMU operation
1519 */
1520 wmb();
1521
1522 /* Retry or terminate any stalled transactions */
1523 if (fsr & FSR_SS)
1524 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1525 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001526
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001527out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001528 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001529
Patrick Daly5ba28112016-08-30 19:18:52 -07001530 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001531}
1532
1533static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1534{
1535 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1536 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001537 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001538
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001539 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001540 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001541
Will Deacon45ae7cf2013-06-24 18:31:25 +01001542 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1543 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1544 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1545 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1546
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001547 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001548 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001549 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001550 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001551
Will Deacon45ae7cf2013-06-24 18:31:25 +01001552 dev_err_ratelimited(smmu->dev,
1553 "Unexpected global fault, this could be serious\n");
1554 dev_err_ratelimited(smmu->dev,
1555 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1556 gfsr, gfsynr0, gfsynr1, gfsynr2);
1557
1558 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001559 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001560 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001561}
1562
Shiraz Hashimeca8c2e2018-01-15 20:08:38 +05301563static bool arm_smmu_master_attached(struct arm_smmu_device *smmu,
1564 struct iommu_fwspec *fwspec)
1565{
1566 int i, idx;
1567
1568 for_each_cfg_sme(fwspec, i, idx) {
1569 if (smmu->s2crs[idx].attach_count)
1570 return true;
1571 }
1572
1573 return false;
1574}
1575
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301576static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain,
1577 struct io_pgtable_cfg *pgtbl_cfg)
1578{
1579 struct arm_smmu_device *smmu = smmu_domain->smmu;
1580 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1581 int ret = 0;
1582
1583 if ((smmu->version > ARM_SMMU_V1) &&
1584 (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) &&
1585 !arm_smmu_has_secure_vmid(smmu_domain) &&
1586 arm_smmu_is_static_cb(smmu)) {
1587 ret = msm_tz_set_cb_format(smmu->sec_id, cfg->cbndx);
1588 }
1589 return ret;
1590}
1591
Will Deacon518f7132014-11-14 17:17:54 +00001592static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1593 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001594{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001595 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001596 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001597 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001598 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1599 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001600 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001601
Will Deacon45ae7cf2013-06-24 18:31:25 +01001602 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001603 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1604 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001605
Will Deacon4a1c93c2015-03-04 12:21:03 +00001606 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001607 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1608 reg = CBA2R_RW64_64BIT;
1609 else
1610 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001611 /* 16-bit VMIDs live in CBA2R */
1612 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001613 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001614
Will Deacon4a1c93c2015-03-04 12:21:03 +00001615 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1616 }
1617
Will Deacon45ae7cf2013-06-24 18:31:25 +01001618 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001619 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001620 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001621 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001622
Will Deacon57ca90f2014-02-06 14:59:05 +00001623 /*
1624 * Use the weakest shareability/memory types, so they are
1625 * overridden by the ttbcr/pte.
1626 */
1627 if (stage1) {
1628 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1629 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001630 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1631 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001632 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001633 }
Will Deacon44680ee2014-06-25 11:29:12 +01001634 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001635
Will Deacon518f7132014-11-14 17:17:54 +00001636 /* TTBRs */
1637 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001638 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001639
Robin Murphyb94df6f2016-08-11 17:44:06 +01001640 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1641 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1642 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1643 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1644 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1645 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1646 } else {
1647 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1648 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1649 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1650 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1651 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1652 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1653 }
Will Deacon518f7132014-11-14 17:17:54 +00001654 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001655 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001656 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001657 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001658
Will Deacon518f7132014-11-14 17:17:54 +00001659 /* TTBCR */
1660 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001661 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1662 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1663 reg2 = 0;
1664 } else {
1665 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1666 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1667 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001668 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001669 if (smmu->version > ARM_SMMU_V1)
1670 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001671 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001672 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001673 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001674 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001675
Will Deacon518f7132014-11-14 17:17:54 +00001676 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001677 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001678 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1679 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1680 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1681 } else {
1682 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1683 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1684 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001685 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001686 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001687 }
1688
Will Deacon45ae7cf2013-06-24 18:31:25 +01001689 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001690 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001691
Patrick Daly7f377fe2017-10-06 17:37:10 -07001692 /* Ensure bypass transactions are Non-shareable */
1693 reg |= SCTLR_SHCFG_NSH << SCTLR_SHCFG_SHIFT;
1694
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301695 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
1696 reg &= ~SCTLR_CFCFG;
1697 reg |= SCTLR_HUPCF;
1698 }
1699
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001700 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1701 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1702 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001703 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001704 if (stage1)
1705 reg |= SCTLR_S1_ASIDPNE;
1706#ifdef __BIG_ENDIAN
1707 reg |= SCTLR_E;
1708#endif
Will Deacon25724842013-08-21 13:49:53 +01001709 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001710}
1711
Patrick Dalyc190d932016-08-30 17:23:28 -07001712static int arm_smmu_init_asid(struct iommu_domain *domain,
1713 struct arm_smmu_device *smmu)
1714{
1715 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1716 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1717 bool dynamic = is_dynamic_domain(domain);
1718 int ret;
1719
1720 if (!dynamic) {
1721 cfg->asid = cfg->cbndx + 1;
1722 } else {
1723 mutex_lock(&smmu->idr_mutex);
1724 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1725 smmu->num_context_banks + 2,
1726 MAX_ASID + 1, GFP_KERNEL);
1727
1728 mutex_unlock(&smmu->idr_mutex);
1729 if (ret < 0) {
1730 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1731 ret);
1732 return ret;
1733 }
1734 cfg->asid = ret;
1735 }
1736 return 0;
1737}
1738
1739static void arm_smmu_free_asid(struct iommu_domain *domain)
1740{
1741 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1742 struct arm_smmu_device *smmu = smmu_domain->smmu;
1743 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1744 bool dynamic = is_dynamic_domain(domain);
1745
1746 if (cfg->asid == INVALID_ASID || !dynamic)
1747 return;
1748
1749 mutex_lock(&smmu->idr_mutex);
1750 idr_remove(&smmu->asid_idr, cfg->asid);
1751 mutex_unlock(&smmu->idr_mutex);
1752}
1753
Will Deacon45ae7cf2013-06-24 18:31:25 +01001754static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001755 struct arm_smmu_device *smmu,
1756 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001757{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001758 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001759 unsigned long ias, oas;
1760 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001761 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001762 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001763 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001764 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001765 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001766 bool dynamic;
Patrick Dalyda765c62017-09-11 16:31:07 -07001767 const struct iommu_gather_ops *tlb;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001768
Will Deacon518f7132014-11-14 17:17:54 +00001769 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001770 if (smmu_domain->smmu)
1771 goto out_unlock;
1772
Patrick Dalyc190d932016-08-30 17:23:28 -07001773 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1774 smmu_domain->cfg.asid = INVALID_ASID;
1775
Patrick Dalyc190d932016-08-30 17:23:28 -07001776 dynamic = is_dynamic_domain(domain);
1777 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1778 dev_err(smmu->dev, "dynamic domains not supported\n");
1779 ret = -EPERM;
1780 goto out_unlock;
1781 }
1782
Will Deaconc752ce42014-06-25 22:46:31 +01001783 /*
1784 * Mapping the requested stage onto what we support is surprisingly
1785 * complicated, mainly because the spec allows S1+S2 SMMUs without
1786 * support for nested translation. That means we end up with the
1787 * following table:
1788 *
1789 * Requested Supported Actual
1790 * S1 N S1
1791 * S1 S1+S2 S1
1792 * S1 S2 S2
1793 * S1 S1 S1
1794 * N N N
1795 * N S1+S2 S2
1796 * N S2 S2
1797 * N S1 S1
1798 *
1799 * Note that you can't actually request stage-2 mappings.
1800 */
1801 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1802 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1803 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1804 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1805
Robin Murphy7602b872016-04-28 17:12:09 +01001806 /*
1807 * Choosing a suitable context format is even more fiddly. Until we
1808 * grow some way for the caller to express a preference, and/or move
1809 * the decision into the io-pgtable code where it arguably belongs,
1810 * just aim for the closest thing to the rest of the system, and hope
1811 * that the hardware isn't esoteric enough that we can't assume AArch64
1812 * support to be a superset of AArch32 support...
1813 */
1814 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1815 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001816 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1817 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1818 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1819 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1820 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001821 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1822 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1823 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1824 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1825 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1826
1827 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1828 ret = -EINVAL;
1829 goto out_unlock;
1830 }
1831
Will Deaconc752ce42014-06-25 22:46:31 +01001832 switch (smmu_domain->stage) {
1833 case ARM_SMMU_DOMAIN_S1:
1834 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1835 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001836 ias = smmu->va_size;
1837 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001838 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001839 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001840 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1841 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001842 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001843 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001844 ias = min(ias, 32UL);
1845 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001846 } else {
1847 fmt = ARM_V7S;
1848 ias = min(ias, 32UL);
1849 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001850 }
Will Deaconc752ce42014-06-25 22:46:31 +01001851 break;
1852 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001853 /*
1854 * We will likely want to change this if/when KVM gets
1855 * involved.
1856 */
Will Deaconc752ce42014-06-25 22:46:31 +01001857 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001858 cfg->cbar = CBAR_TYPE_S2_TRANS;
1859 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001860 ias = smmu->ipa_size;
1861 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001862 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001863 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001864 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001865 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001866 ias = min(ias, 40UL);
1867 oas = min(oas, 40UL);
1868 }
Will Deaconc752ce42014-06-25 22:46:31 +01001869 break;
1870 default:
1871 ret = -EINVAL;
1872 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001873 }
1874
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001875 if (is_fast)
1876 fmt = ARM_V8L_FAST;
1877
Patrick Dalyce6786f2016-11-09 14:19:23 -08001878 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1879 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001880 if (is_iommu_pt_coherent(smmu_domain))
1881 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Patrick Daly49ccf332017-09-27 15:10:29 -07001882 if ((quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
1883 (smmu->model == QCOM_SMMUV500))
1884 quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001885
Patrick Dalyda765c62017-09-11 16:31:07 -07001886 tlb = &arm_smmu_gather_ops;
Patrick Daly83174c12017-10-26 12:31:15 -07001887 if (smmu->options & ARM_SMMU_OPT_MMU500_ERRATA1)
Patrick Dalyda765c62017-09-11 16:31:07 -07001888 tlb = &qsmmuv500_errata1_smmu_gather_ops;
1889
Patrick Dalyda688822017-05-17 20:12:48 -07001890 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1891 if (ret < 0)
1892 goto out_unlock;
1893 cfg->cbndx = ret;
1894
Robin Murphyb7862e32016-04-13 18:13:03 +01001895 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001896 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1897 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001898 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001899 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900 }
1901
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301902 if (arm_smmu_is_slave_side_secure(smmu_domain)) {
1903 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
1904 .quirks = quirks,
1905 .pgsize_bitmap = smmu->pgsize_bitmap,
1906 .arm_msm_secure_cfg = {
1907 .sec_id = smmu->sec_id,
1908 .cbndx = cfg->cbndx,
1909 },
1910 .iommu_dev = smmu->dev,
1911 };
1912 fmt = ARM_MSM_SECURE;
1913 } else {
1914 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
1915 .quirks = quirks,
1916 .pgsize_bitmap = smmu->pgsize_bitmap,
1917 .ias = ias,
1918 .oas = oas,
1919 .tlb = tlb,
1920 .iommu_dev = smmu->dev,
1921 };
1922 }
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001923
Will Deacon518f7132014-11-14 17:17:54 +00001924 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001925 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001926 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1927 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001928 if (!pgtbl_ops) {
1929 ret = -ENOMEM;
1930 goto out_clear_smmu;
1931 }
1932
Patrick Dalyc11d1082016-09-01 15:52:44 -07001933 /*
1934 * assign any page table memory that might have been allocated
1935 * during alloc_io_pgtable_ops
1936 */
Patrick Dalye271f212016-10-04 13:24:49 -07001937 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001938 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001939 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001940
Robin Murphyd5466352016-05-09 17:20:09 +01001941 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001942 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001943 domain->geometry.aperture_end = (1UL << ias) - 1;
1944 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001945
Patrick Dalyc190d932016-08-30 17:23:28 -07001946 /* Assign an asid */
1947 ret = arm_smmu_init_asid(domain, smmu);
1948 if (ret)
1949 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001950
Patrick Dalyc190d932016-08-30 17:23:28 -07001951 if (!dynamic) {
1952 /* Initialise the context bank with our page table cfg */
1953 arm_smmu_init_context_bank(smmu_domain,
1954 &smmu_domain->pgtbl_cfg);
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301955 /* for slave side secure, we may have to force the pagetable
1956 * format to V8L.
1957 */
1958 ret = arm_smmu_set_pt_format(smmu_domain,
1959 &smmu_domain->pgtbl_cfg);
1960 if (ret)
1961 goto out_clear_smmu;
Patrick Dalyc190d932016-08-30 17:23:28 -07001962
Patrick Daly03330cc2017-08-11 14:56:38 -07001963 arm_smmu_arch_init_context_bank(smmu_domain, dev);
1964
Patrick Dalyc190d932016-08-30 17:23:28 -07001965 /*
1966 * Request context fault interrupt. Do this last to avoid the
1967 * handler seeing a half-initialised domain state.
1968 */
1969 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1970 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001971 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1972 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001973 if (ret < 0) {
1974 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1975 cfg->irptndx, irq);
1976 cfg->irptndx = INVALID_IRPTNDX;
1977 goto out_clear_smmu;
1978 }
1979 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001980 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001981 }
Will Deacon518f7132014-11-14 17:17:54 +00001982 mutex_unlock(&smmu_domain->init_mutex);
1983
1984 /* Publish page table ops for map/unmap */
1985 smmu_domain->pgtbl_ops = pgtbl_ops;
Shiraz Hashimeca8c2e2018-01-15 20:08:38 +05301986 if (arm_smmu_is_slave_side_secure(smmu_domain) &&
1987 !arm_smmu_master_attached(smmu, dev->iommu_fwspec))
1988 arm_smmu_restore_sec_cfg(smmu, cfg->cbndx);
1989
Will Deacona9a1b0b2014-05-01 18:05:08 +01001990 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001991
Will Deacon518f7132014-11-14 17:17:54 +00001992out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001993 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001994 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001995out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001996 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001997 return ret;
1998}
1999
Patrick Daly77db4f92016-10-14 15:34:10 -07002000static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
2001{
2002 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
2003 smmu_domain->cfg.cbndx = INVALID_CBNDX;
2004 smmu_domain->secure_vmid = VMID_INVAL;
2005}
2006
Will Deacon45ae7cf2013-06-24 18:31:25 +01002007static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
2008{
Joerg Roedel1d672632015-03-26 13:43:10 +01002009 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01002010 struct arm_smmu_device *smmu = smmu_domain->smmu;
2011 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01002012 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002013 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07002014 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002015 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002016
Robin Murphy7e96c742016-09-14 15:26:46 +01002017 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018 return;
2019
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002020 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002021 if (ret) {
2022 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
2023 smmu);
2024 return;
2025 }
2026
Patrick Dalyc190d932016-08-30 17:23:28 -07002027 dynamic = is_dynamic_domain(domain);
2028 if (dynamic) {
2029 arm_smmu_free_asid(domain);
2030 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002031 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07002032 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002033 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002034 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002035 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07002036 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07002037 return;
2038 }
2039
Will Deacon518f7132014-11-14 17:17:54 +00002040 /*
2041 * Disable the context bank and free the page tables before freeing
2042 * it.
2043 */
Will Deacon44680ee2014-06-25 11:29:12 +01002044 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01002045 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01002046
Will Deacon44680ee2014-06-25 11:29:12 +01002047 if (cfg->irptndx != INVALID_IRPTNDX) {
2048 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08002049 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002050 }
2051
Markus Elfring44830b02015-11-06 18:32:41 +01002052 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07002053 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002054 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002055 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002056 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01002057 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05302058 /* As the nonsecure context bank index is any way set to zero,
2059 * so, directly clearing up the secure cb bitmap.
2060 */
2061 if (arm_smmu_is_slave_side_secure(smmu_domain))
2062 __arm_smmu_free_bitmap(smmu->secure_context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002063
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002064 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07002065 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002066}
2067
Joerg Roedel1d672632015-03-26 13:43:10 +01002068static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002069{
2070 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002071
Patrick Daly09801312016-08-29 17:02:52 -07002072 /* Do not support DOMAIN_DMA for now */
2073 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01002074 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002075 /*
2076 * Allocate the domain and initialise some of its data structures.
2077 * We can't really do anything meaningful until we've added a
2078 * master.
2079 */
2080 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
2081 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01002082 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002083
Robin Murphy7e96c742016-09-14 15:26:46 +01002084 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
2085 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00002086 kfree(smmu_domain);
2087 return NULL;
2088 }
2089
Will Deacon518f7132014-11-14 17:17:54 +00002090 mutex_init(&smmu_domain->init_mutex);
2091 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002092 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
2093 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07002094 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002095 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly2d600832018-02-11 15:12:55 -08002096 INIT_LIST_HEAD(&smmu_domain->nonsecure_pool);
Patrick Daly77db4f92016-10-14 15:34:10 -07002097 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01002098
2099 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002100}
2101
Joerg Roedel1d672632015-03-26 13:43:10 +01002102static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002103{
Joerg Roedel1d672632015-03-26 13:43:10 +01002104 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01002105
2106 /*
2107 * Free the domain resources. We assume that all devices have
2108 * already been detached.
2109 */
Robin Murphy9adb9592016-01-26 18:06:36 +00002110 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002111 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002112 kfree(smmu_domain);
2113}
2114
Robin Murphy468f4942016-09-12 17:13:49 +01002115static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
2116{
2117 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01002118 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01002119
2120 if (smr->valid)
2121 reg |= SMR_VALID;
2122 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2123}
2124
Robin Murphya754fd12016-09-12 17:13:50 +01002125static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
2126{
2127 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
2128 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
2129 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
Patrick Daly7f377fe2017-10-06 17:37:10 -07002130 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT |
2131 S2CR_SHCFG_NSH << S2CR_SHCFG_SHIFT;
Robin Murphya754fd12016-09-12 17:13:50 +01002132
2133 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2134}
2135
2136static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
2137{
2138 arm_smmu_write_s2cr(smmu, idx);
2139 if (smmu->smrs)
2140 arm_smmu_write_smr(smmu, idx);
2141}
2142
Robin Murphy6668f692016-09-12 17:13:54 +01002143static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01002144{
2145 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01002146 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002147
Robin Murphy6668f692016-09-12 17:13:54 +01002148 /* Stream indexing is blissfully easy */
2149 if (!smrs)
2150 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01002151
Robin Murphy6668f692016-09-12 17:13:54 +01002152 /* Validating SMRs is... less so */
2153 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2154 if (!smrs[i].valid) {
2155 /*
2156 * Note the first free entry we come across, which
2157 * we'll claim in the end if nothing else matches.
2158 */
2159 if (free_idx < 0)
2160 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01002161 continue;
2162 }
Robin Murphy6668f692016-09-12 17:13:54 +01002163 /*
2164 * If the new entry is _entirely_ matched by an existing entry,
2165 * then reuse that, with the guarantee that there also cannot
2166 * be any subsequent conflicting entries. In normal use we'd
2167 * expect simply identical entries for this case, but there's
2168 * no harm in accommodating the generalisation.
2169 */
2170 if ((mask & smrs[i].mask) == mask &&
2171 !((id ^ smrs[i].id) & ~smrs[i].mask))
2172 return i;
2173 /*
2174 * If the new entry has any other overlap with an existing one,
2175 * though, then there always exists at least one stream ID
2176 * which would cause a conflict, and we can't allow that risk.
2177 */
2178 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
2179 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002180 }
2181
Robin Murphy6668f692016-09-12 17:13:54 +01002182 return free_idx;
2183}
2184
2185static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
2186{
2187 if (--smmu->s2crs[idx].count)
2188 return false;
2189
2190 smmu->s2crs[idx] = s2cr_init_val;
2191 if (smmu->smrs)
2192 smmu->smrs[idx].valid = false;
2193
2194 return true;
2195}
2196
2197static int arm_smmu_master_alloc_smes(struct device *dev)
2198{
Robin Murphy06e393e2016-09-12 17:13:55 +01002199 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2200 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01002201 struct arm_smmu_device *smmu = cfg->smmu;
2202 struct arm_smmu_smr *smrs = smmu->smrs;
2203 struct iommu_group *group;
2204 int i, idx, ret;
2205
2206 mutex_lock(&smmu->stream_map_mutex);
2207 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01002208 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002209 u16 sid = fwspec->ids[i];
2210 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
2211
Robin Murphy6668f692016-09-12 17:13:54 +01002212 if (idx != INVALID_SMENDX) {
2213 ret = -EEXIST;
2214 goto out_err;
2215 }
2216
Robin Murphy7e96c742016-09-14 15:26:46 +01002217 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01002218 if (ret < 0)
2219 goto out_err;
2220
2221 idx = ret;
2222 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002223 smrs[idx].id = sid;
2224 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01002225 smrs[idx].valid = true;
2226 }
2227 smmu->s2crs[idx].count++;
2228 cfg->smendx[i] = (s16)idx;
2229 }
2230
2231 group = iommu_group_get_for_dev(dev);
2232 if (!group)
2233 group = ERR_PTR(-ENOMEM);
2234 if (IS_ERR(group)) {
2235 ret = PTR_ERR(group);
2236 goto out_err;
2237 }
2238 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01002239
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002240 /* It worked! Don't poke the actual hardware until we've attached */
2241 for_each_cfg_sme(fwspec, i, idx)
Robin Murphy6668f692016-09-12 17:13:54 +01002242 smmu->s2crs[idx].group = group;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002243
Robin Murphy6668f692016-09-12 17:13:54 +01002244 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002245 return 0;
2246
Robin Murphy6668f692016-09-12 17:13:54 +01002247out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01002248 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01002249 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01002250 cfg->smendx[i] = INVALID_SMENDX;
2251 }
Robin Murphy6668f692016-09-12 17:13:54 +01002252 mutex_unlock(&smmu->stream_map_mutex);
2253 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002254}
2255
Robin Murphy06e393e2016-09-12 17:13:55 +01002256static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002257{
Robin Murphy06e393e2016-09-12 17:13:55 +01002258 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
2259 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01002260 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01002261
Robin Murphy6668f692016-09-12 17:13:54 +01002262 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002263 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002264 if (arm_smmu_free_sme(smmu, idx))
2265 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01002266 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002267 }
Robin Murphy6668f692016-09-12 17:13:54 +01002268 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002269}
2270
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002271static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
2272 struct iommu_fwspec *fwspec)
2273{
2274 struct arm_smmu_device *smmu = smmu_domain->smmu;
2275 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2276 int i, idx;
2277 const struct iommu_gather_ops *tlb;
2278
2279 tlb = smmu_domain->pgtbl_cfg.tlb;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302280 if (!tlb)
2281 return;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002282
2283 mutex_lock(&smmu->stream_map_mutex);
2284 for_each_cfg_sme(fwspec, i, idx) {
2285 WARN_ON(s2cr[idx].attach_count == 0);
2286 s2cr[idx].attach_count -= 1;
2287
2288 if (s2cr[idx].attach_count > 0)
2289 continue;
2290
2291 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2292 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2293 }
2294 mutex_unlock(&smmu->stream_map_mutex);
2295
2296 /* Ensure there are no stale mappings for this context bank */
2297 tlb->tlb_flush_all(smmu_domain);
2298}
2299
Will Deacon45ae7cf2013-06-24 18:31:25 +01002300static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01002301 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002302{
Will Deacon44680ee2014-06-25 11:29:12 +01002303 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002304 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2305 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2306 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002307 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002308
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002309 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002310 for_each_cfg_sme(fwspec, i, idx) {
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002311 if (s2cr[idx].attach_count++ > 0)
Robin Murphy6668f692016-09-12 17:13:54 +01002312 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002313
2314 s2cr[idx].type = type;
2315 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2316 s2cr[idx].cbndx = cbndx;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002317 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002318 }
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002319 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002320
2321 return 0;
2322}
2323
Patrick Daly09801312016-08-29 17:02:52 -07002324static void arm_smmu_detach_dev(struct iommu_domain *domain,
2325 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002326{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002327 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002328 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002329 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly09801312016-08-29 17:02:52 -07002330 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002331 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002332
2333 if (dynamic)
2334 return;
2335
Patrick Daly09801312016-08-29 17:02:52 -07002336 if (!smmu) {
2337 dev_err(dev, "Domain not attached; cannot detach!\n");
2338 return;
2339 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002340
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302341 if (atomic_domain)
2342 arm_smmu_power_on_atomic(smmu->pwr);
2343 else
2344 arm_smmu_power_on(smmu->pwr);
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002345
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302346 arm_smmu_domain_remove_master(smmu_domain, fwspec);
2347 arm_smmu_power_off(smmu->pwr);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002348}
2349
Patrick Dalye271f212016-10-04 13:24:49 -07002350static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002351{
Patrick Dalye271f212016-10-04 13:24:49 -07002352 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002353 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2354 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2355 int source_vmid = VMID_HLOS;
2356 struct arm_smmu_pte_info *pte_info, *temp;
2357
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302358 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -07002359 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002360
Patrick Dalye271f212016-10-04 13:24:49 -07002361 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002362 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2363 PAGE_SIZE, &source_vmid, 1,
2364 dest_vmids, dest_perms, 2);
2365 if (WARN_ON(ret))
2366 break;
2367 }
2368
2369 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2370 entry) {
2371 list_del(&pte_info->entry);
2372 kfree(pte_info);
2373 }
Patrick Dalye271f212016-10-04 13:24:49 -07002374 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002375}
2376
2377static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2378{
2379 int ret;
2380 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002381 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002382 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2383 struct arm_smmu_pte_info *pte_info, *temp;
2384
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302385 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002386 return;
2387
2388 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2389 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2390 PAGE_SIZE, source_vmlist, 2,
2391 &dest_vmids, &dest_perms, 1);
2392 if (WARN_ON(ret))
2393 break;
2394 free_pages_exact(pte_info->virt_addr, pte_info->size);
2395 }
2396
2397 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2398 entry) {
2399 list_del(&pte_info->entry);
2400 kfree(pte_info);
2401 }
2402}
2403
2404static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2405{
2406 struct arm_smmu_domain *smmu_domain = cookie;
2407 struct arm_smmu_pte_info *pte_info;
2408
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302409 if (smmu_domain->slave_side_secure ||
2410 !arm_smmu_has_secure_vmid(smmu_domain)) {
2411 if (smmu_domain->slave_side_secure)
2412 WARN(1, "slave side secure is enforced\n");
2413 else
2414 WARN(1, "Invalid VMID is set !!\n");
2415 return;
2416 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002417
2418 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2419 if (!pte_info)
2420 return;
2421
2422 pte_info->virt_addr = addr;
2423 pte_info->size = size;
2424 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2425}
2426
2427static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2428{
2429 struct arm_smmu_domain *smmu_domain = cookie;
2430 struct arm_smmu_pte_info *pte_info;
2431
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302432 if (smmu_domain->slave_side_secure ||
2433 !arm_smmu_has_secure_vmid(smmu_domain)) {
2434 if (smmu_domain->slave_side_secure)
2435 WARN(1, "slave side secure is enforced\n");
2436 else
2437 WARN(1, "Invalid VMID is set !!\n");
2438 return -EINVAL;
2439 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002440
2441 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2442 if (!pte_info)
2443 return -ENOMEM;
2444 pte_info->virt_addr = addr;
2445 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2446 return 0;
2447}
2448
Patrick Daly2d600832018-02-11 15:12:55 -08002449static void arm_smmu_prealloc_memory(struct arm_smmu_domain *smmu_domain,
2450 struct scatterlist *sgl, int nents,
2451 struct list_head *pool)
2452{
2453 u32 nr = 0;
2454 int i;
2455 size_t size = 0;
2456 struct scatterlist *sg;
2457 struct page *page;
2458
2459 if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
2460 arm_smmu_has_secure_vmid(smmu_domain))
2461 return;
2462
2463 for_each_sg(sgl, sg, nents, i)
2464 size += sg->length;
2465
2466 /* number of 2nd level pagetable entries */
2467 nr += round_up(size, SZ_1G) >> 30;
2468 /* number of 3rd level pagetabel entries */
2469 nr += round_up(size, SZ_2M) >> 21;
2470
2471 /* Retry later with atomic allocation on error */
2472 for (i = 0; i < nr; i++) {
2473 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
2474 if (!page)
2475 break;
2476 list_add(&page->lru, pool);
2477 }
2478}
2479
2480static void arm_smmu_release_prealloc_memory(
2481 struct arm_smmu_domain *smmu_domain, struct list_head *list)
2482{
2483 struct page *page, *tmp;
2484 u32 remaining = 0;
2485
2486 list_for_each_entry_safe(page, tmp, list, lru) {
2487 list_del(&page->lru);
2488 __free_pages(page, 0);
2489 remaining++;
2490 }
2491}
2492
Will Deacon45ae7cf2013-06-24 18:31:25 +01002493static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2494{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002495 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002496 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002497 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002498 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002499 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002500
Robin Murphy06e393e2016-09-12 17:13:55 +01002501 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002502 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2503 return -ENXIO;
2504 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002505
Robin Murphy4f79b142016-10-17 12:06:21 +01002506 /*
2507 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2508 * domains between of_xlate() and add_device() - we have no way to cope
2509 * with that, so until ARM gets converted to rely on groups and default
2510 * domains, just say no (but more politely than by dereferencing NULL).
2511 * This should be at least a WARN_ON once that's sorted.
2512 */
2513 if (!fwspec->iommu_priv)
2514 return -ENODEV;
2515
Robin Murphy06e393e2016-09-12 17:13:55 +01002516 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002517
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002518 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002519 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002520 if (ret)
2521 return ret;
2522
Will Deacon518f7132014-11-14 17:17:54 +00002523 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002524 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002525 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002526 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002527
Patrick Dalyc190d932016-08-30 17:23:28 -07002528 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002529 if (is_dynamic_domain(domain)) {
2530 ret = 0;
2531 goto out_power_off;
2532 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002533
Will Deacon45ae7cf2013-06-24 18:31:25 +01002534 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002535 * Sanity check the domain. We don't support domains across
2536 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002537 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002538 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002539 dev_err(dev,
2540 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002541 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002542 ret = -EINVAL;
2543 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002544 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002545
2546 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002547 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002548
2549out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002550 /*
2551 * Keep an additional vote for non-atomic power until domain is
2552 * detached
2553 */
2554 if (!ret && atomic_domain) {
2555 WARN_ON(arm_smmu_power_on(smmu->pwr));
2556 arm_smmu_power_off_atomic(smmu->pwr);
2557 }
2558
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002559 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002560
Will Deacon45ae7cf2013-06-24 18:31:25 +01002561 return ret;
2562}
2563
Will Deacon45ae7cf2013-06-24 18:31:25 +01002564static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002565 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002566{
Will Deacon518f7132014-11-14 17:17:54 +00002567 int ret;
2568 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002569 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002570 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002571
Will Deacon518f7132014-11-14 17:17:54 +00002572 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002573 return -ENODEV;
2574
Patrick Dalye271f212016-10-04 13:24:49 -07002575 arm_smmu_secure_domain_lock(smmu_domain);
2576
Will Deacon518f7132014-11-14 17:17:54 +00002577 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2578 ret = ops->map(ops, iova, paddr, size, prot);
2579 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002580
2581 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002582 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002583
Will Deacon518f7132014-11-14 17:17:54 +00002584 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002585}
2586
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002587static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2588 dma_addr_t iova)
2589{
2590 uint64_t ret;
2591 unsigned long flags;
2592 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2593 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2594
2595 if (!ops)
2596 return 0;
2597
2598 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2599 ret = ops->iova_to_pte(ops, iova);
2600 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2601 return ret;
2602}
2603
Will Deacon45ae7cf2013-06-24 18:31:25 +01002604static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2605 size_t size)
2606{
Will Deacon518f7132014-11-14 17:17:54 +00002607 size_t ret;
2608 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002609 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002610 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002611
Will Deacon518f7132014-11-14 17:17:54 +00002612 if (!ops)
2613 return 0;
2614
Patrick Daly8befb662016-08-17 20:03:28 -07002615 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002616 if (ret)
2617 return ret;
2618
Patrick Dalye271f212016-10-04 13:24:49 -07002619 arm_smmu_secure_domain_lock(smmu_domain);
2620
Will Deacon518f7132014-11-14 17:17:54 +00002621 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2622 ret = ops->unmap(ops, iova, size);
2623 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002624
Patrick Daly8befb662016-08-17 20:03:28 -07002625 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002626 /*
2627 * While splitting up block mappings, we might allocate page table
2628 * memory during unmap, so the vmids needs to be assigned to the
2629 * memory here as well.
2630 */
2631 arm_smmu_assign_table(smmu_domain);
2632 /* Also unassign any pages that were free'd during unmap */
2633 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002634 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002635 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002636}
2637
Patrick Daly88d321d2017-02-09 18:02:13 -08002638#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002639static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2640 struct scatterlist *sg, unsigned int nents, int prot)
2641{
2642 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002643 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002644 unsigned long flags;
2645 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2646 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002647 unsigned int idx_start, idx_end;
2648 struct scatterlist *sg_start, *sg_end;
2649 unsigned long __saved_iova_start;
Patrick Daly2d600832018-02-11 15:12:55 -08002650 LIST_HEAD(nonsecure_pool);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002651
2652 if (!ops)
2653 return -ENODEV;
2654
Patrick Daly2d600832018-02-11 15:12:55 -08002655 arm_smmu_prealloc_memory(smmu_domain, sg, nents, &nonsecure_pool);
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002656 arm_smmu_secure_domain_lock(smmu_domain);
2657
Patrick Daly88d321d2017-02-09 18:02:13 -08002658 __saved_iova_start = iova;
2659 idx_start = idx_end = 0;
2660 sg_start = sg_end = sg;
2661 while (idx_end < nents) {
2662 batch_size = sg_end->length;
2663 sg_end = sg_next(sg_end);
2664 idx_end++;
2665 while ((idx_end < nents) &&
2666 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002667
Patrick Daly88d321d2017-02-09 18:02:13 -08002668 batch_size += sg_end->length;
2669 sg_end = sg_next(sg_end);
2670 idx_end++;
2671 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002672
Patrick Daly88d321d2017-02-09 18:02:13 -08002673 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Daly2d600832018-02-11 15:12:55 -08002674 list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
Patrick Daly88d321d2017-02-09 18:02:13 -08002675 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2676 prot, &size);
Patrick Daly2d600832018-02-11 15:12:55 -08002677 list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
Patrick Daly88d321d2017-02-09 18:02:13 -08002678 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2679 /* Returns 0 on error */
2680 if (!ret) {
2681 size_to_unmap = iova + size - __saved_iova_start;
2682 goto out;
2683 }
2684
2685 iova += batch_size;
2686 idx_start = idx_end;
2687 sg_start = sg_end;
2688 }
2689
2690out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002691 arm_smmu_assign_table(smmu_domain);
2692
Patrick Daly88d321d2017-02-09 18:02:13 -08002693 if (size_to_unmap) {
2694 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2695 iova = __saved_iova_start;
2696 }
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002697 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly2d600832018-02-11 15:12:55 -08002698 arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
Patrick Daly88d321d2017-02-09 18:02:13 -08002699 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002700}
2701
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002702static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002703 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002704{
Joerg Roedel1d672632015-03-26 13:43:10 +01002705 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002706 struct arm_smmu_device *smmu = smmu_domain->smmu;
2707 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2708 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2709 struct device *dev = smmu->dev;
2710 void __iomem *cb_base;
2711 u32 tmp;
2712 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002713 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002714
2715 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2716
Robin Murphy661d9622015-05-27 17:09:34 +01002717 /* ATS1 registers can only be written atomically */
2718 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002719 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002720 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2721 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002722 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002723
2724 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2725 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002726 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002727 dev_err(dev,
2728 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2729 &iova, &phys);
2730 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002731 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002732 }
2733
Robin Murphyf9a05f02016-04-13 18:13:01 +01002734 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002735 if (phys & CB_PAR_F) {
2736 dev_err(dev, "translation fault!\n");
2737 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002738 phys = 0;
2739 } else {
2740 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002741 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002742
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002743 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002744}
2745
Will Deacon45ae7cf2013-06-24 18:31:25 +01002746static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002747 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002748{
Will Deacon518f7132014-11-14 17:17:54 +00002749 phys_addr_t ret;
2750 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002751 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002752 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002753
Will Deacon518f7132014-11-14 17:17:54 +00002754 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002755 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002756
Will Deacon518f7132014-11-14 17:17:54 +00002757 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002758 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002759 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002760
Will Deacon518f7132014-11-14 17:17:54 +00002761 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002762}
2763
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002764/*
2765 * This function can sleep, and cannot be called from atomic context. Will
2766 * power on register block if required. This restriction does not apply to the
2767 * original iova_to_phys() op.
2768 */
2769static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2770 dma_addr_t iova)
2771{
2772 phys_addr_t ret = 0;
2773 unsigned long flags;
2774 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly62ba1922017-08-30 16:47:18 -07002775 struct arm_smmu_device *smmu = smmu_domain->smmu;
2776
2777 if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS)
2778 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002779
Patrick Dalyad441dd2016-09-15 15:50:46 -07002780 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002781 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2782 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002783 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002784 return ret;
2785 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002786
2787 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2788 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2789 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002790 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002791
2792 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2793
2794 return ret;
2795}
2796
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002797static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002798{
Will Deacond0948942014-06-24 17:30:10 +01002799 switch (cap) {
2800 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002801 /*
2802 * Return true here as the SMMU can always send out coherent
2803 * requests.
2804 */
2805 return true;
Will Deacond0948942014-06-24 17:30:10 +01002806 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002807 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002808 case IOMMU_CAP_NOEXEC:
2809 return true;
Will Deacond0948942014-06-24 17:30:10 +01002810 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002811 return false;
Will Deacond0948942014-06-24 17:30:10 +01002812 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002813}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002814
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302815#ifdef CONFIG_MSM_TZ_SMMU
2816static struct arm_smmu_device *arm_smmu_get_by_addr(void __iomem *addr)
2817{
2818 struct arm_smmu_device *smmu;
2819 unsigned long flags;
2820
2821 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2822 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2823 unsigned long base = (unsigned long)smmu->base;
2824 unsigned long mask = ~(smmu->size - 1);
2825
2826 if ((base & mask) == ((unsigned long)addr & mask)) {
2827 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2828 return smmu;
2829 }
2830 }
2831 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2832 return NULL;
2833}
2834
2835bool arm_smmu_skip_write(void __iomem *addr)
2836{
2837 struct arm_smmu_device *smmu;
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05302838 int cb;
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302839
2840 smmu = arm_smmu_get_by_addr(addr);
Shiraz Hashima28a4792018-01-13 00:39:52 +05302841
2842 /* Skip write if smmu not available by now */
2843 if (!smmu)
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302844 return true;
Shiraz Hashima28a4792018-01-13 00:39:52 +05302845
2846 /* Do not write to global space */
2847 if (((unsigned long)addr & (smmu->size - 1)) < (smmu->size >> 1))
2848 return true;
2849
2850 /* Finally skip writing to secure CB */
2851 cb = ((unsigned long)addr & ((smmu->size >> 1) - 1)) >> PAGE_SHIFT;
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05302852 if (test_bit(cb, smmu->secure_context_map))
2853 return true;
Shiraz Hashima28a4792018-01-13 00:39:52 +05302854
2855 return false;
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302856}
2857#endif
2858
Patrick Daly8e3371a2017-02-13 22:14:53 -08002859static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2860{
2861 struct arm_smmu_device *smmu;
2862 unsigned long flags;
2863
2864 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2865 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2866 if (smmu->dev->of_node == np) {
2867 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2868 return smmu;
2869 }
2870 }
2871 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2872 return NULL;
2873}
2874
Robin Murphy7e96c742016-09-14 15:26:46 +01002875static int arm_smmu_match_node(struct device *dev, void *data)
2876{
2877 return dev->of_node == data;
2878}
2879
2880static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2881{
2882 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2883 np, arm_smmu_match_node);
2884 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002885 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002886}
2887
Will Deacon03edb222015-01-19 14:27:33 +00002888static int arm_smmu_add_device(struct device *dev)
2889{
Robin Murphy06e393e2016-09-12 17:13:55 +01002890 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002891 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002892 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002893 int i, ret;
2894
Robin Murphy7e96c742016-09-14 15:26:46 +01002895 if (using_legacy_binding) {
2896 ret = arm_smmu_register_legacy_master(dev, &smmu);
2897 fwspec = dev->iommu_fwspec;
2898 if (ret)
2899 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002900 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002901 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2902 if (!smmu)
2903 return -ENODEV;
2904 } else {
2905 return -ENODEV;
2906 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002907
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002908 ret = arm_smmu_power_on(smmu->pwr);
2909 if (ret)
2910 goto out_free;
2911
Robin Murphyd5b41782016-09-14 15:21:39 +01002912 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002913 for (i = 0; i < fwspec->num_ids; i++) {
2914 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002915 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002916
Robin Murphy06e393e2016-09-12 17:13:55 +01002917 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002918 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002919 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002920 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002921 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002922 if (mask & ~smmu->smr_mask_mask) {
2923 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2924 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002925 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002926 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002927 }
Will Deacon03edb222015-01-19 14:27:33 +00002928
Robin Murphy06e393e2016-09-12 17:13:55 +01002929 ret = -ENOMEM;
2930 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2931 GFP_KERNEL);
2932 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002933 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002934
2935 cfg->smmu = smmu;
2936 fwspec->iommu_priv = cfg;
2937 while (i--)
2938 cfg->smendx[i] = INVALID_SMENDX;
2939
Robin Murphy6668f692016-09-12 17:13:54 +01002940 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002941 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002942 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002943
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002944 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002945 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002946
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002947out_pwr_off:
2948 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002949out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002950 if (fwspec)
2951 kfree(fwspec->iommu_priv);
2952 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002953 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002954}
2955
Will Deacon45ae7cf2013-06-24 18:31:25 +01002956static void arm_smmu_remove_device(struct device *dev)
2957{
Robin Murphy06e393e2016-09-12 17:13:55 +01002958 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002959 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002960
Robin Murphy06e393e2016-09-12 17:13:55 +01002961 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002962 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002963
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002964 smmu = fwspec_smmu(fwspec);
2965 if (arm_smmu_power_on(smmu->pwr)) {
2966 WARN_ON(1);
2967 return;
2968 }
2969
Robin Murphy06e393e2016-09-12 17:13:55 +01002970 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002971 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002972 kfree(fwspec->iommu_priv);
2973 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002974 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002975}
2976
Joerg Roedelaf659932015-10-21 23:51:41 +02002977static struct iommu_group *arm_smmu_device_group(struct device *dev)
2978{
Robin Murphy06e393e2016-09-12 17:13:55 +01002979 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2980 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002981 struct iommu_group *group = NULL;
2982 int i, idx;
2983
Robin Murphy06e393e2016-09-12 17:13:55 +01002984 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002985 if (group && smmu->s2crs[idx].group &&
2986 group != smmu->s2crs[idx].group)
2987 return ERR_PTR(-EINVAL);
2988
2989 group = smmu->s2crs[idx].group;
2990 }
2991
Patrick Daly03330cc2017-08-11 14:56:38 -07002992 if (!group) {
2993 if (dev_is_pci(dev))
2994 group = pci_device_group(dev);
2995 else
2996 group = generic_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002997
Patrick Daly03330cc2017-08-11 14:56:38 -07002998 if (IS_ERR(group))
2999 return NULL;
3000 }
3001
3002 if (arm_smmu_arch_device_group(dev, group)) {
3003 iommu_group_put(group);
3004 return ERR_PTR(-EINVAL);
3005 }
Joerg Roedelaf659932015-10-21 23:51:41 +02003006
Joerg Roedelaf659932015-10-21 23:51:41 +02003007 return group;
3008}
3009
Will Deaconc752ce42014-06-25 22:46:31 +01003010static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
3011 enum iommu_attr attr, void *data)
3012{
Joerg Roedel1d672632015-03-26 13:43:10 +01003013 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003014 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01003015
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003016 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01003017 switch (attr) {
3018 case DOMAIN_ATTR_NESTING:
3019 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003020 ret = 0;
3021 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08003022 case DOMAIN_ATTR_PT_BASE_ADDR:
3023 *((phys_addr_t *)data) =
3024 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003025 ret = 0;
3026 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003027 case DOMAIN_ATTR_CONTEXT_BANK:
3028 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003029 if (smmu_domain->smmu == NULL) {
3030 ret = -ENODEV;
3031 break;
3032 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003033 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
3034 ret = 0;
3035 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003036 case DOMAIN_ATTR_TTBR0: {
3037 u64 val;
3038 struct arm_smmu_device *smmu = smmu_domain->smmu;
3039 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003040 if (smmu == NULL) {
3041 ret = -ENODEV;
3042 break;
3043 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003044 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
3045 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
3046 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
3047 << (TTBRn_ASID_SHIFT);
3048 *((u64 *)data) = val;
3049 ret = 0;
3050 break;
3051 }
3052 case DOMAIN_ATTR_CONTEXTIDR:
3053 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003054 if (smmu_domain->smmu == NULL) {
3055 ret = -ENODEV;
3056 break;
3057 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003058 *((u32 *)data) = smmu_domain->cfg.procid;
3059 ret = 0;
3060 break;
3061 case DOMAIN_ATTR_PROCID:
3062 *((u32 *)data) = smmu_domain->cfg.procid;
3063 ret = 0;
3064 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07003065 case DOMAIN_ATTR_DYNAMIC:
3066 *((int *)data) = !!(smmu_domain->attributes
3067 & (1 << DOMAIN_ATTR_DYNAMIC));
3068 ret = 0;
3069 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07003070 case DOMAIN_ATTR_NON_FATAL_FAULTS:
3071 *((int *)data) = !!(smmu_domain->attributes
3072 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
3073 ret = 0;
3074 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07003075 case DOMAIN_ATTR_S1_BYPASS:
3076 *((int *)data) = !!(smmu_domain->attributes
3077 & (1 << DOMAIN_ATTR_S1_BYPASS));
3078 ret = 0;
3079 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07003080 case DOMAIN_ATTR_SECURE_VMID:
3081 *((int *)data) = smmu_domain->secure_vmid;
3082 ret = 0;
3083 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08003084 case DOMAIN_ATTR_PGTBL_INFO: {
3085 struct iommu_pgtbl_info *info = data;
3086
3087 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
3088 ret = -ENODEV;
3089 break;
3090 }
3091 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
3092 ret = 0;
3093 break;
3094 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003095 case DOMAIN_ATTR_FAST:
3096 *((int *)data) = !!(smmu_domain->attributes
3097 & (1 << DOMAIN_ATTR_FAST));
3098 ret = 0;
3099 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003100 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3101 *((int *)data) = !!(smmu_domain->attributes
3102 & (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
3103 ret = 0;
3104 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08003105 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3106 *((int *)data) = !!(smmu_domain->attributes &
3107 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
3108 ret = 0;
3109 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003110 case DOMAIN_ATTR_EARLY_MAP:
3111 *((int *)data) = !!(smmu_domain->attributes
3112 & (1 << DOMAIN_ATTR_EARLY_MAP));
3113 ret = 0;
3114 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07003115 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003116 if (!smmu_domain->smmu) {
3117 ret = -ENODEV;
3118 break;
3119 }
Liam Mark53cf2342016-12-20 11:36:07 -08003120 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
3121 ret = 0;
3122 break;
3123 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
3124 *((int *)data) = !!(smmu_domain->attributes
3125 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07003126 ret = 0;
3127 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303128 case DOMAIN_ATTR_CB_STALL_DISABLE:
3129 *((int *)data) = !!(smmu_domain->attributes
3130 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
3131 ret = 0;
3132 break;
Patrick Daly83174c12017-10-26 12:31:15 -07003133 case DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN:
Patrick Daly23301482017-10-12 16:18:25 -07003134 *((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
3135 ret = 0;
3136 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003137 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003138 ret = -ENODEV;
3139 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003140 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003141 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003142 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003143}
3144
3145static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
3146 enum iommu_attr attr, void *data)
3147{
Will Deacon518f7132014-11-14 17:17:54 +00003148 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01003149 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01003150
Will Deacon518f7132014-11-14 17:17:54 +00003151 mutex_lock(&smmu_domain->init_mutex);
3152
Will Deaconc752ce42014-06-25 22:46:31 +01003153 switch (attr) {
3154 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00003155 if (smmu_domain->smmu) {
3156 ret = -EPERM;
3157 goto out_unlock;
3158 }
3159
Will Deaconc752ce42014-06-25 22:46:31 +01003160 if (*(int *)data)
3161 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
3162 else
3163 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
3164
Will Deacon518f7132014-11-14 17:17:54 +00003165 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003166 case DOMAIN_ATTR_PROCID:
3167 if (smmu_domain->smmu != NULL) {
3168 dev_err(smmu_domain->smmu->dev,
3169 "cannot change procid attribute while attached\n");
3170 ret = -EBUSY;
3171 break;
3172 }
3173 smmu_domain->cfg.procid = *((u32 *)data);
3174 ret = 0;
3175 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07003176 case DOMAIN_ATTR_DYNAMIC: {
3177 int dynamic = *((int *)data);
3178
3179 if (smmu_domain->smmu != NULL) {
3180 dev_err(smmu_domain->smmu->dev,
3181 "cannot change dynamic attribute while attached\n");
3182 ret = -EBUSY;
3183 break;
3184 }
3185
3186 if (dynamic)
3187 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
3188 else
3189 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
3190 ret = 0;
3191 break;
3192 }
3193 case DOMAIN_ATTR_CONTEXT_BANK:
3194 /* context bank can't be set while attached */
3195 if (smmu_domain->smmu != NULL) {
3196 ret = -EBUSY;
3197 break;
3198 }
3199 /* ... and it can only be set for dynamic contexts. */
3200 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
3201 ret = -EINVAL;
3202 break;
3203 }
3204
3205 /* this will be validated during attach */
3206 smmu_domain->cfg.cbndx = *((unsigned int *)data);
3207 ret = 0;
3208 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07003209 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
3210 u32 non_fatal_faults = *((int *)data);
3211
3212 if (non_fatal_faults)
3213 smmu_domain->attributes |=
3214 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
3215 else
3216 smmu_domain->attributes &=
3217 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
3218 ret = 0;
3219 break;
3220 }
Patrick Dalye62d3362016-03-15 18:58:28 -07003221 case DOMAIN_ATTR_S1_BYPASS: {
3222 int bypass = *((int *)data);
3223
3224 /* bypass can't be changed while attached */
3225 if (smmu_domain->smmu != NULL) {
3226 ret = -EBUSY;
3227 break;
3228 }
3229 if (bypass)
3230 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
3231 else
3232 smmu_domain->attributes &=
3233 ~(1 << DOMAIN_ATTR_S1_BYPASS);
3234
3235 ret = 0;
3236 break;
3237 }
Patrick Daly8befb662016-08-17 20:03:28 -07003238 case DOMAIN_ATTR_ATOMIC:
3239 {
3240 int atomic_ctx = *((int *)data);
3241
3242 /* can't be changed while attached */
3243 if (smmu_domain->smmu != NULL) {
3244 ret = -EBUSY;
3245 break;
3246 }
3247 if (atomic_ctx)
3248 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
3249 else
3250 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
3251 break;
3252 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07003253 case DOMAIN_ATTR_SECURE_VMID:
3254 if (smmu_domain->secure_vmid != VMID_INVAL) {
3255 ret = -ENODEV;
3256 WARN(1, "secure vmid already set!");
3257 break;
3258 }
3259 smmu_domain->secure_vmid = *((int *)data);
3260 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003261 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3262 if (*((int *)data))
3263 smmu_domain->attributes |=
3264 1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
3265 ret = 0;
3266 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003267 /*
3268 * fast_smmu_unmap_page() and fast_smmu_alloc_iova() both
3269 * expect that the bus/clock/regulator are already on. Thus also
3270 * force DOMAIN_ATTR_ATOMIC to bet set.
3271 */
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003272 case DOMAIN_ATTR_FAST:
Patrick Daly0df84ac2017-10-11 17:32:41 -07003273 {
3274 int fast = *((int *)data);
3275
3276 if (fast) {
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003277 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003278 smmu_domain->attributes |= 1 << DOMAIN_ATTR_ATOMIC;
3279 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003280 ret = 0;
3281 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003282 }
Patrick Dalyce6786f2016-11-09 14:19:23 -08003283 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3284 /* can't be changed while attached */
3285 if (smmu_domain->smmu != NULL) {
3286 ret = -EBUSY;
3287 break;
3288 }
3289 if (*((int *)data))
3290 smmu_domain->attributes |=
3291 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
3292 ret = 0;
3293 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003294 case DOMAIN_ATTR_EARLY_MAP: {
3295 int early_map = *((int *)data);
3296
3297 ret = 0;
3298 if (early_map) {
3299 smmu_domain->attributes |=
3300 1 << DOMAIN_ATTR_EARLY_MAP;
3301 } else {
3302 if (smmu_domain->smmu)
3303 ret = arm_smmu_enable_s1_translations(
3304 smmu_domain);
3305
3306 if (!ret)
3307 smmu_domain->attributes &=
3308 ~(1 << DOMAIN_ATTR_EARLY_MAP);
3309 }
3310 break;
3311 }
Liam Mark53cf2342016-12-20 11:36:07 -08003312 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
3313 int force_coherent = *((int *)data);
3314
3315 if (smmu_domain->smmu != NULL) {
3316 dev_err(smmu_domain->smmu->dev,
3317 "cannot change force coherent attribute while attached\n");
3318 ret = -EBUSY;
3319 break;
3320 }
3321
3322 if (force_coherent)
3323 smmu_domain->attributes |=
3324 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
3325 else
3326 smmu_domain->attributes &=
3327 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
3328
3329 ret = 0;
3330 break;
3331 }
3332
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303333 case DOMAIN_ATTR_CB_STALL_DISABLE:
3334 if (*((int *)data))
3335 smmu_domain->attributes |=
3336 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
3337 ret = 0;
3338 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003339 default:
Will Deacon518f7132014-11-14 17:17:54 +00003340 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01003341 }
Will Deacon518f7132014-11-14 17:17:54 +00003342
3343out_unlock:
3344 mutex_unlock(&smmu_domain->init_mutex);
3345 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003346}
3347
Robin Murphy7e96c742016-09-14 15:26:46 +01003348static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
3349{
3350 u32 fwid = 0;
3351
3352 if (args->args_count > 0)
3353 fwid |= (u16)args->args[0];
3354
3355 if (args->args_count > 1)
3356 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
3357
3358 return iommu_fwspec_add_ids(dev, &fwid, 1);
3359}
3360
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003361static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
3362{
3363 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3364 struct arm_smmu_device *smmu = smmu_domain->smmu;
3365 void __iomem *cb_base;
3366 u32 reg;
3367 int ret;
3368
3369 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3370 ret = arm_smmu_power_on(smmu->pwr);
3371 if (ret)
3372 return ret;
3373
3374 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3375 reg |= SCTLR_M;
3376
3377 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
3378 arm_smmu_power_off(smmu->pwr);
3379 return ret;
3380}
3381
Liam Mark3ba41cf2016-12-09 14:39:04 -08003382static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
3383 dma_addr_t iova)
3384{
3385 bool ret;
3386 unsigned long flags;
3387 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3388 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3389
3390 if (!ops)
3391 return false;
3392
3393 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3394 ret = ops->is_iova_coherent(ops, iova);
3395 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3396 return ret;
3397}
3398
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003399static void arm_smmu_trigger_fault(struct iommu_domain *domain,
3400 unsigned long flags)
3401{
3402 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3403 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3404 struct arm_smmu_device *smmu;
3405 void __iomem *cb_base;
3406
3407 if (!smmu_domain->smmu) {
3408 pr_err("Can't trigger faults on non-attached domains\n");
3409 return;
3410 }
3411
3412 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003413 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003414 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003415
3416 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3417 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
3418 flags, cfg->cbndx);
3419 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07003420 /* give the interrupt time to fire... */
3421 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003422
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003423 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003424}
3425
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003426static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3427{
Patrick Dalyda765c62017-09-11 16:31:07 -07003428 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3429 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
3430
3431 tlb->tlb_flush_all(smmu_domain);
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003432}
3433
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003434static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3435{
3436 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3437
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003438 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003439}
3440
3441static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3442{
3443 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3444
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003445 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003446}
3447
Will Deacon518f7132014-11-14 17:17:54 +00003448static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003449 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003450 .domain_alloc = arm_smmu_domain_alloc,
3451 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003452 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003453 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003454 .map = arm_smmu_map,
3455 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003456 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003457 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003458 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003459 .add_device = arm_smmu_add_device,
3460 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003461 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003462 .domain_get_attr = arm_smmu_domain_get_attr,
3463 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003464 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003465 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003466 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003467 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003468 .enable_config_clocks = arm_smmu_enable_config_clocks,
3469 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003470 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003471 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003472};
3473
Patrick Dalyad441dd2016-09-15 15:50:46 -07003474#define IMPL_DEF1_MICRO_MMU_CTRL 0
3475#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3476#define MICRO_MMU_CTRL_IDLE (1 << 3)
3477
3478/* Definitions for implementation-defined registers */
3479#define ACTLR_QCOM_OSH_SHIFT 28
3480#define ACTLR_QCOM_OSH 1
3481
3482#define ACTLR_QCOM_ISH_SHIFT 29
3483#define ACTLR_QCOM_ISH 1
3484
3485#define ACTLR_QCOM_NSH_SHIFT 30
3486#define ACTLR_QCOM_NSH 1
3487
3488static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003489{
3490 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003491 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003492
3493 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3494 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3495 0, 30000)) {
3496 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3497 return -EBUSY;
3498 }
3499
3500 return 0;
3501}
3502
Patrick Dalyad441dd2016-09-15 15:50:46 -07003503static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003504{
3505 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3506 u32 reg;
3507
3508 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3509 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303510
3511 if (arm_smmu_is_static_cb(smmu)) {
3512 phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
3513 smmu->phys_addr;
3514
3515 if (scm_io_write(impl_def1_base_phys +
3516 IMPL_DEF1_MICRO_MMU_CTRL, reg)) {
3517 dev_err(smmu->dev,
3518 "scm_io_write fail. SMMU might not be halted");
3519 return -EINVAL;
3520 }
3521 } else {
3522 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3523 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003524
Patrick Dalyad441dd2016-09-15 15:50:46 -07003525 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003526}
3527
Patrick Dalyad441dd2016-09-15 15:50:46 -07003528static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003529{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003530 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003531}
3532
Patrick Dalyad441dd2016-09-15 15:50:46 -07003533static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003534{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003535 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003536}
3537
Patrick Dalyad441dd2016-09-15 15:50:46 -07003538static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003539{
3540 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3541 u32 reg;
3542
3543 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3544 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303545
3546 if (arm_smmu_is_static_cb(smmu)) {
3547 phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
3548 smmu->phys_addr;
3549
3550 if (scm_io_write(impl_def1_base_phys +
3551 IMPL_DEF1_MICRO_MMU_CTRL, reg))
3552 dev_err(smmu->dev,
3553 "scm_io_write fail. SMMU might not be resumed");
3554 } else {
3555 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3556 }
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003557}
3558
Patrick Dalyad441dd2016-09-15 15:50:46 -07003559static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003560{
3561 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003562 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003563 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003564 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003565
Patrick Dalyad441dd2016-09-15 15:50:46 -07003566 /*
3567 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3568 * to prevent table walks with an inconsistent state.
3569 */
3570 for (i = 0; i < smmu->num_context_banks; ++i) {
3571 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3572 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3573 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3574 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3575 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3576 }
3577
3578 /* Program implementation defined registers */
3579 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003580 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3581 writel_relaxed(regs[i].value,
3582 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003583 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003584}
3585
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003586static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3587 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003588{
3589 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3590 struct arm_smmu_device *smmu = smmu_domain->smmu;
3591 int ret;
3592 phys_addr_t phys = 0;
3593 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003594 u32 sctlr, sctlr_orig, fsr;
3595 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003596
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003597 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003598 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003599 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003600
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003601 spin_lock_irqsave(&smmu->atos_lock, flags);
3602 cb_base = ARM_SMMU_CB_BASE(smmu) +
3603 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003604
3605 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003606 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003607 qsmmuv2_wait_for_halt(smmu);
3608
3609 /* clear FSR to allow ATOS to log any faults */
3610 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3611 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3612
3613 /* disable stall mode momentarily */
3614 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3615 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3616 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3617
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003618 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003619
3620 /* restore SCTLR */
3621 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3622
3623 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003624 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3625
3626 arm_smmu_power_off(smmu_domain->smmu->pwr);
3627 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003628}
3629
3630struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3631 .device_reset = qsmmuv2_device_reset,
3632 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003633};
3634
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003635static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003636{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003637 int i;
3638 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003639 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003640 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003641
Peng Fan3ca37122016-05-03 21:50:30 +08003642 /*
3643 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3644 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3645 * bit is only present in MMU-500r2 onwards.
3646 */
3647 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3648 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3649 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3650 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3651 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3652 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3653 }
3654
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003655 /* Make sure all context banks are disabled and clear CB_FSR */
3656 for (i = 0; i < smmu->num_context_banks; ++i) {
3657 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3658 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3659 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003660 /*
3661 * Disable MMU-500's not-particularly-beneficial next-page
3662 * prefetcher for the sake of errata #841119 and #826419.
3663 */
3664 if (smmu->model == ARM_MMU500) {
3665 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3666 reg &= ~ARM_MMU500_ACTLR_CPRE;
3667 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3668 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003669 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003670}
3671
3672static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3673{
3674 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003675 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003676 u32 reg;
3677
3678 /* clear global FSR */
3679 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3680 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3681
Robin Murphy468f4942016-09-12 17:13:49 +01003682 /*
3683 * Reset stream mapping groups: Initial values mark all SMRn as
3684 * invalid and all S2CRn as bypass unless overridden.
3685 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003686 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3687 for (i = 0; i < smmu->num_mapping_groups; ++i)
3688 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003689
Patrick Daly59b6d202017-06-12 13:12:15 -07003690 arm_smmu_context_bank_reset(smmu);
3691 }
Will Deacon1463fe42013-07-31 19:21:27 +01003692
Will Deacon45ae7cf2013-06-24 18:31:25 +01003693 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003694 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3695 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3696
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003697 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003698
Will Deacon45ae7cf2013-06-24 18:31:25 +01003699 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003700 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003701
3702 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003703 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003704
Robin Murphy25a1c962016-02-10 14:25:33 +00003705 /* Enable client access, handling unmatched streams as appropriate */
3706 reg &= ~sCR0_CLIENTPD;
3707 if (disable_bypass)
3708 reg |= sCR0_USFCFG;
3709 else
3710 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003711
3712 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003713 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003714
3715 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003716 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003717
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003718 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3719 reg |= sCR0_VMID16EN;
3720
Patrick Daly7f377fe2017-10-06 17:37:10 -07003721 /* Force bypass transaction to be Non-Shareable & not io-coherent */
3722 reg &= ~(sCR0_SHCFG_MASK << sCR0_SHCFG_SHIFT);
Prakash Gupta673a79f2017-11-16 18:07:00 +05303723 reg |= sCR0_SHCFG_NSH << sCR0_SHCFG_SHIFT;
Patrick Daly7f377fe2017-10-06 17:37:10 -07003724
Will Deacon45ae7cf2013-06-24 18:31:25 +01003725 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003726 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003727 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003728
3729 /* Manage any implementation defined features */
3730 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003731}
3732
3733static int arm_smmu_id_size_to_bits(int size)
3734{
3735 switch (size) {
3736 case 0:
3737 return 32;
3738 case 1:
3739 return 36;
3740 case 2:
3741 return 40;
3742 case 3:
3743 return 42;
3744 case 4:
3745 return 44;
3746 case 5:
3747 default:
3748 return 48;
3749 }
3750}
3751
Patrick Dalyda688822017-05-17 20:12:48 -07003752
3753/*
3754 * Some context banks needs to be transferred from bootloader to HLOS in a way
3755 * that allows ongoing traffic. The current expectation is that these context
3756 * banks operate in bypass mode.
3757 * Additionally, there must be exactly one device in devicetree with stream-ids
3758 * overlapping those used by the bootloader.
3759 */
3760static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3761 struct arm_smmu_device *smmu,
3762 struct device *dev)
3763{
3764 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003765 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003766 u32 i, idx;
3767 int cb = -EINVAL;
3768 bool dynamic;
3769
Patrick Dalye72526b2017-07-18 16:21:44 -07003770 /*
3771 * Dynamic domains have already set cbndx through domain attribute.
3772 * Verify that they picked a valid value.
3773 */
Patrick Dalyda688822017-05-17 20:12:48 -07003774 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07003775 if (dynamic) {
3776 cb = smmu_domain->cfg.cbndx;
3777 if (cb < smmu->num_context_banks)
3778 return cb;
3779 else
3780 return -EINVAL;
3781 }
Patrick Dalyda688822017-05-17 20:12:48 -07003782
3783 mutex_lock(&smmu->stream_map_mutex);
3784 for_each_cfg_sme(fwspec, i, idx) {
3785 if (smmu->s2crs[idx].cb_handoff)
3786 cb = smmu->s2crs[idx].cbndx;
3787 }
3788
Shiraz Hashima28a4792018-01-13 00:39:52 +05303789 if (cb >= 0 && arm_smmu_is_static_cb(smmu)) {
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303790 smmu_domain->slave_side_secure = true;
3791
Shiraz Hashima28a4792018-01-13 00:39:52 +05303792 if (arm_smmu_is_slave_side_secure(smmu_domain))
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05303793 bitmap_set(smmu->secure_context_map, cb, 1);
Shiraz Hashima28a4792018-01-13 00:39:52 +05303794 }
3795
Charan Teja Reddyf0758df2017-09-04 18:52:07 +05303796 if (cb < 0 && !arm_smmu_is_static_cb(smmu)) {
Patrick Dalyda688822017-05-17 20:12:48 -07003797 mutex_unlock(&smmu->stream_map_mutex);
3798 return __arm_smmu_alloc_bitmap(smmu->context_map,
3799 smmu->num_s2_context_banks,
3800 smmu->num_context_banks);
3801 }
3802
3803 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07003804 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303805 if (!arm_smmu_is_static_cb(smmu))
3806 smmu->s2crs[i].cb_handoff = false;
Patrick Dalyda688822017-05-17 20:12:48 -07003807 smmu->s2crs[i].count -= 1;
3808 }
3809 }
3810 mutex_unlock(&smmu->stream_map_mutex);
3811
3812 return cb;
3813}
3814
3815static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
3816{
3817 u32 i, raw_smr, raw_s2cr;
3818 struct arm_smmu_smr smr;
3819 struct arm_smmu_s2cr s2cr;
3820
3821 for (i = 0; i < smmu->num_mapping_groups; i++) {
3822 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3823 ARM_SMMU_GR0_SMR(i));
3824 if (!(raw_smr & SMR_VALID))
3825 continue;
3826
3827 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3828 smr.id = (u16)raw_smr;
3829 smr.valid = true;
3830
3831 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3832 ARM_SMMU_GR0_S2CR(i));
Patrick Daly4ea4bdf2017-08-29 19:24:49 -07003833 memset(&s2cr, 0, sizeof(s2cr));
Patrick Dalyda688822017-05-17 20:12:48 -07003834 s2cr.group = NULL;
3835 s2cr.count = 1;
3836 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
3837 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
3838 S2CR_PRIVCFG_MASK;
3839 s2cr.cbndx = (u8)raw_s2cr;
3840 s2cr.cb_handoff = true;
3841
3842 if (s2cr.type != S2CR_TYPE_TRANS)
3843 continue;
3844
3845 smmu->smrs[i] = smr;
3846 smmu->s2crs[i] = s2cr;
3847 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
3848 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
3849 raw_smr, raw_s2cr, s2cr.cbndx);
3850 }
3851
3852 return 0;
3853}
3854
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003855static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3856{
3857 struct device *dev = smmu->dev;
3858 int i, ntuples, ret;
3859 u32 *tuples;
3860 struct arm_smmu_impl_def_reg *regs, *regit;
3861
3862 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3863 return 0;
3864
3865 ntuples /= sizeof(u32);
3866 if (ntuples % 2) {
3867 dev_err(dev,
3868 "Invalid number of attach-impl-defs registers: %d\n",
3869 ntuples);
3870 return -EINVAL;
3871 }
3872
3873 regs = devm_kmalloc(
3874 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3875 GFP_KERNEL);
3876 if (!regs)
3877 return -ENOMEM;
3878
3879 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3880 if (!tuples)
3881 return -ENOMEM;
3882
3883 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3884 tuples, ntuples);
3885 if (ret)
3886 return ret;
3887
3888 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3889 regit->offset = tuples[i];
3890 regit->value = tuples[i + 1];
3891 }
3892
3893 devm_kfree(dev, tuples);
3894
3895 smmu->impl_def_attach_registers = regs;
3896 smmu->num_impl_def_attach_registers = ntuples / 2;
3897
3898 return 0;
3899}
3900
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003901
3902static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003903{
3904 const char *cname;
3905 struct property *prop;
3906 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003907 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003908
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003909 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003910 of_property_count_strings(dev->of_node, "clock-names");
3911
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003912 if (pwr->num_clocks < 1) {
3913 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003914 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003915 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003916
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003917 pwr->clocks = devm_kzalloc(
3918 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003919 GFP_KERNEL);
3920
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003921 if (!pwr->clocks)
3922 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003923
3924 i = 0;
3925 of_property_for_each_string(dev->of_node, "clock-names",
3926 prop, cname) {
3927 struct clk *c = devm_clk_get(dev, cname);
3928
3929 if (IS_ERR(c)) {
3930 dev_err(dev, "Couldn't get clock: %s",
3931 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003932 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003933 }
3934
3935 if (clk_get_rate(c) == 0) {
3936 long rate = clk_round_rate(c, 1000);
3937
3938 clk_set_rate(c, rate);
3939 }
3940
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003941 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003942
3943 ++i;
3944 }
3945 return 0;
3946}
3947
Charan Teja Reddyf8464882017-12-05 20:29:05 +05303948static int regulator_notifier(struct notifier_block *nb,
3949 unsigned long event, void *data)
3950{
3951 int ret = 0;
3952 struct arm_smmu_device *smmu = container_of(nb, struct arm_smmu_device,
3953 regulator_nb);
3954
3955 if (event != REGULATOR_EVENT_PRE_DISABLE &&
3956 event != REGULATOR_EVENT_ENABLE)
3957 return NOTIFY_OK;
3958
3959 ret = arm_smmu_prepare_clocks(smmu->pwr);
3960 if (ret)
3961 goto out;
3962
3963 ret = arm_smmu_power_on_atomic(smmu->pwr);
3964 if (ret)
3965 goto unprepare_clock;
3966
3967 if (event == REGULATOR_EVENT_PRE_DISABLE)
3968 qsmmuv2_halt(smmu);
3969 else if (event == REGULATOR_EVENT_ENABLE) {
Charan Teja Reddyec6f7822018-01-10 17:32:52 +05303970 if (arm_smmu_restore_sec_cfg(smmu, 0))
Charan Teja Reddyf8464882017-12-05 20:29:05 +05303971 goto power_off;
3972 qsmmuv2_resume(smmu);
3973 }
3974power_off:
3975 arm_smmu_power_off_atomic(smmu->pwr);
3976unprepare_clock:
3977 arm_smmu_unprepare_clocks(smmu->pwr);
3978out:
3979 return NOTIFY_OK;
3980}
3981
3982static int register_regulator_notifier(struct arm_smmu_device *smmu)
3983{
3984 struct device *dev = smmu->dev;
3985 struct regulator_bulk_data *consumers;
3986 int ret = 0, num_consumers;
3987 struct arm_smmu_power_resources *pwr = smmu->pwr;
3988
3989 if (!(smmu->options & ARM_SMMU_OPT_HALT))
3990 goto out;
3991
3992 num_consumers = pwr->num_gdscs;
3993 consumers = pwr->gdscs;
3994
3995 if (!num_consumers) {
3996 dev_info(dev, "no regulator info exist for %s\n",
3997 dev_name(dev));
3998 goto out;
3999 }
4000
4001 smmu->regulator_nb.notifier_call = regulator_notifier;
4002 /* registering the notifier against one gdsc is sufficient as
4003 * we do enable/disable regulators in group.
4004 */
4005 ret = regulator_register_notifier(consumers[0].consumer,
4006 &smmu->regulator_nb);
4007 if (ret)
4008 dev_err(dev, "Regulator notifier request failed\n");
4009out:
4010 return ret;
4011}
4012
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004013static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004014{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004015 const char *cname;
4016 struct property *prop;
4017 int i, ret = 0;
4018 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004019
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004020 pwr->num_gdscs =
4021 of_property_count_strings(dev->of_node, "qcom,regulator-names");
4022
4023 if (pwr->num_gdscs < 1) {
4024 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004025 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004026 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004027
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004028 pwr->gdscs = devm_kzalloc(
4029 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
4030
4031 if (!pwr->gdscs)
4032 return -ENOMEM;
4033
Prakash Guptafad87ca2017-05-16 12:13:02 +05304034 if (!of_property_read_u32(dev->of_node,
4035 "qcom,deferred-regulator-disable-delay",
4036 &(pwr->regulator_defer)))
4037 dev_info(dev, "regulator defer delay %d\n",
4038 pwr->regulator_defer);
4039
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004040 i = 0;
4041 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
4042 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07004043 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004044
4045 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
4046 return ret;
4047}
4048
4049static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
4050{
4051 struct device *dev = pwr->dev;
4052
4053 /* We don't want the bus APIs to print an error message */
4054 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
4055 dev_dbg(dev, "No bus scaling info\n");
4056 return 0;
4057 }
4058
4059 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
4060 if (!pwr->bus_dt_data) {
4061 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
4062 return -EINVAL;
4063 }
4064
4065 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
4066 if (!pwr->bus_client) {
4067 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004068 return -EINVAL;
4069 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004070
4071 return 0;
4072}
4073
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004074/*
4075 * Cleanup done by devm. Any non-devm resources must clean up themselves.
4076 */
4077static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
4078 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07004079{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004080 struct arm_smmu_power_resources *pwr;
4081 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07004082
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004083 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
4084 if (!pwr)
4085 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07004086
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004087 pwr->dev = &pdev->dev;
4088 pwr->pdev = pdev;
4089 mutex_init(&pwr->power_lock);
4090 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07004091
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004092 ret = arm_smmu_init_clocks(pwr);
4093 if (ret)
4094 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004095
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004096 ret = arm_smmu_init_regulators(pwr);
4097 if (ret)
4098 return ERR_PTR(ret);
4099
4100 ret = arm_smmu_init_bus_scaling(pwr);
4101 if (ret)
4102 return ERR_PTR(ret);
4103
4104 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07004105}
4106
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004107/*
Patrick Dalyabeee952017-04-13 18:14:59 -07004108 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004109 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004110static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004111{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004112 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004113}
4114
Will Deacon45ae7cf2013-06-24 18:31:25 +01004115static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
4116{
4117 unsigned long size;
4118 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
4119 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01004120 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01004121 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004122
Charan Teja Reddyec6f7822018-01-10 17:32:52 +05304123 if (arm_smmu_restore_sec_cfg(smmu, 0))
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304124 return -ENODEV;
4125
Mitchel Humpherysba822582015-10-20 11:37:41 -07004126 dev_dbg(smmu->dev, "probing hardware configuration...\n");
4127 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01004128 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004129
4130 /* ID0 */
4131 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01004132
4133 /* Restrict available stages based on module parameter */
4134 if (force_stage == 1)
4135 id &= ~(ID0_S2TS | ID0_NTS);
4136 else if (force_stage == 2)
4137 id &= ~(ID0_S1TS | ID0_NTS);
4138
Will Deacon45ae7cf2013-06-24 18:31:25 +01004139 if (id & ID0_S1TS) {
4140 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004141 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004142 }
4143
4144 if (id & ID0_S2TS) {
4145 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004146 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004147 }
4148
4149 if (id & ID0_NTS) {
4150 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004151 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004152 }
4153
4154 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01004155 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004156 dev_err(smmu->dev, "\tno translation support!\n");
4157 return -ENODEV;
4158 }
4159
Robin Murphyb7862e32016-04-13 18:13:03 +01004160 if ((id & ID0_S1TS) &&
4161 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00004162 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004163 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00004164 }
4165
Robin Murphybae2c2d2015-07-29 19:46:05 +01004166 /*
4167 * In order for DMA API calls to work properly, we must defer to what
4168 * the DT says about coherency, regardless of what the hardware claims.
4169 * Fortunately, this also opens up a workaround for systems where the
4170 * ID register value has ended up configured incorrectly.
4171 */
4172 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
4173 cttw_reg = !!(id & ID0_CTTW);
4174 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01004175 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01004176 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004177 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01004178 cttw_dt ? "" : "non-");
4179 if (cttw_dt != cttw_reg)
4180 dev_notice(smmu->dev,
4181 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004182
Robin Murphy53867802016-09-12 17:13:48 +01004183 /* Max. number of entries we have for stream matching/indexing */
4184 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
4185 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004186 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01004187 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08004188 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004189
4190 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01004191 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
4192 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004193 dev_err(smmu->dev,
4194 "stream-matching supported, but no SMRs present!\n");
4195 return -ENODEV;
4196 }
4197
Robin Murphy53867802016-09-12 17:13:48 +01004198 /*
4199 * SMR.ID bits may not be preserved if the corresponding MASK
4200 * bits are set, so check each one separately. We can reject
4201 * masters later if they try to claim IDs outside these masks.
4202 */
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304203 if (!arm_smmu_is_static_cb(smmu)) {
4204 for (i = 0; i < size; i++) {
4205 smr = readl_relaxed(
4206 gr0_base + ARM_SMMU_GR0_SMR(i));
4207 if (!(smr & SMR_VALID))
4208 break;
4209 }
4210 if (i == size) {
4211 dev_err(smmu->dev,
4212 "Unable to compute streamid_masks\n");
4213 return -ENODEV;
4214 }
4215
4216 smr = smmu->streamid_mask << SMR_ID_SHIFT;
4217 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
Patrick Daly937de532016-12-12 18:44:09 -08004218 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304219 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08004220
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304221 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
4222 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
4223 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
4224 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
4225 } else {
4226 smmu->smr_mask_mask = SMR_MASK_MASK;
4227 smmu->streamid_mask = SID_MASK;
4228 }
Dhaval Patel031d7462015-05-09 14:47:29 -07004229
Robin Murphy468f4942016-09-12 17:13:49 +01004230 /* Zero-initialised to mark as invalid */
4231 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
4232 GFP_KERNEL);
4233 if (!smmu->smrs)
4234 return -ENOMEM;
4235
Robin Murphy53867802016-09-12 17:13:48 +01004236 dev_notice(smmu->dev,
4237 "\tstream matching with %lu register groups, mask 0x%x",
4238 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004239 }
Robin Murphya754fd12016-09-12 17:13:50 +01004240 /* s2cr->type == 0 means translation, so initialise explicitly */
4241 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
4242 GFP_KERNEL);
4243 if (!smmu->s2crs)
4244 return -ENOMEM;
4245 for (i = 0; i < size; i++)
4246 smmu->s2crs[i] = s2cr_init_val;
4247
Robin Murphy53867802016-09-12 17:13:48 +01004248 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01004249 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004250
Robin Murphy7602b872016-04-28 17:12:09 +01004251 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
4252 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
4253 if (!(id & ID0_PTFS_NO_AARCH32S))
4254 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
4255 }
4256
Will Deacon45ae7cf2013-06-24 18:31:25 +01004257 /* ID1 */
4258 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01004259 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004260
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01004261 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00004262 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01004263 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01004264 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07004265 dev_warn(smmu->dev,
4266 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
4267 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004268
Will Deacon518f7132014-11-14 17:17:54 +00004269 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004270 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
4271 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
4272 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
4273 return -ENODEV;
4274 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07004275 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01004276 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01004277 /*
4278 * Cavium CN88xx erratum #27704.
4279 * Ensure ASID and VMID allocation is unique across all SMMUs in
4280 * the system.
4281 */
4282 if (smmu->model == CAVIUM_SMMUV2) {
4283 smmu->cavium_id_base =
4284 atomic_add_return(smmu->num_context_banks,
4285 &cavium_smmu_context_count);
4286 smmu->cavium_id_base -= smmu->num_context_banks;
4287 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004288
4289 /* ID2 */
4290 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
4291 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004292 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004293
Will Deacon518f7132014-11-14 17:17:54 +00004294 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01004295 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004296 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004297
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08004298 if (id & ID2_VMID16)
4299 smmu->features |= ARM_SMMU_FEAT_VMID16;
4300
Robin Murphyf1d84542015-03-04 16:41:05 +00004301 /*
4302 * What the page table walker can address actually depends on which
4303 * descriptor format is in use, but since a) we don't know that yet,
4304 * and b) it can vary per context bank, this will have to do...
4305 */
4306 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
4307 dev_warn(smmu->dev,
4308 "failed to set DMA mask for table walker\n");
4309
Robin Murphyb7862e32016-04-13 18:13:03 +01004310 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00004311 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01004312 if (smmu->version == ARM_SMMU_V1_64K)
4313 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004314 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004315 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00004316 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00004317 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01004318 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00004319 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01004320 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00004321 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01004322 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004323 }
4324
Robin Murphy7602b872016-04-28 17:12:09 +01004325 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01004326 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01004327 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01004328 if (smmu->features &
4329 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01004330 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01004331 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01004332 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01004333 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01004334 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01004335
Robin Murphyd5466352016-05-09 17:20:09 +01004336 if (arm_smmu_ops.pgsize_bitmap == -1UL)
4337 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
4338 else
4339 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004340 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01004341 smmu->pgsize_bitmap);
4342
Will Deacon518f7132014-11-14 17:17:54 +00004343
Will Deacon28d60072014-09-01 16:24:48 +01004344 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004345 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
4346 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004347
4348 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004349 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
4350 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004351
Will Deacon45ae7cf2013-06-24 18:31:25 +01004352 return 0;
4353}
4354
Robin Murphy67b65a32016-04-13 18:12:57 +01004355struct arm_smmu_match_data {
4356 enum arm_smmu_arch_version version;
4357 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004358 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01004359};
4360
Patrick Dalyd7476202016-09-08 18:23:28 -07004361#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
4362static struct arm_smmu_match_data name = { \
4363.version = ver, \
4364.model = imp, \
4365.arch_ops = ops, \
4366} \
Robin Murphy67b65a32016-04-13 18:12:57 +01004367
Patrick Daly1f8a2882016-09-12 17:32:05 -07004368struct arm_smmu_arch_ops qsmmuv500_arch_ops;
4369
Patrick Dalyd7476202016-09-08 18:23:28 -07004370ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
4371ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
4372ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
4373ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
4374ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07004375ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004376ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
4377 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01004378
Joerg Roedel09b52692014-10-02 12:24:45 +02004379static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01004380 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
4381 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
4382 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01004383 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01004384 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01004385 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07004386 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07004387 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01004388 { },
4389};
4390MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
4391
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304392#ifdef CONFIG_MSM_TZ_SMMU
4393int register_iommu_sec_ptbl(void)
4394{
4395 struct device_node *np;
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004396
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304397 for_each_matching_node(np, arm_smmu_of_match)
4398 if (of_find_property(np, "qcom,tz-device-id", NULL) &&
4399 of_device_is_available(np))
4400 break;
4401 if (!np)
4402 return -ENODEV;
4403
4404 of_node_put(np);
4405
4406 return msm_iommu_sec_pgtbl_init();
4407}
4408#endif
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004409static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
4410{
4411 if (!dev->iommu_fwspec)
4412 of_iommu_configure(dev, dev->of_node);
4413 return 0;
4414}
4415
Patrick Daly000a2f22017-02-13 22:18:12 -08004416static int arm_smmu_add_device_fixup(struct device *dev, void *data)
4417{
4418 struct iommu_ops *ops = data;
4419
4420 ops->add_device(dev);
4421 return 0;
4422}
4423
Patrick Daly1f8a2882016-09-12 17:32:05 -07004424static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004425static int arm_smmu_device_dt_probe(struct platform_device *pdev)
4426{
Robin Murphy67b65a32016-04-13 18:12:57 +01004427 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004428 struct resource *res;
4429 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004430 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01004431 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01004432 bool legacy_binding;
4433
4434 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
4435 if (legacy_binding && !using_generic_binding) {
4436 if (!using_legacy_binding)
4437 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
4438 using_legacy_binding = true;
4439 } else if (!legacy_binding && !using_legacy_binding) {
4440 using_generic_binding = true;
4441 } else {
4442 dev_err(dev, "not probing due to mismatched DT properties\n");
4443 return -ENODEV;
4444 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004445
4446 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4447 if (!smmu) {
4448 dev_err(dev, "failed to allocate arm_smmu_device\n");
4449 return -ENOMEM;
4450 }
4451 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08004452 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07004453 idr_init(&smmu->asid_idr);
4454 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004455
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004456 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01004457 smmu->version = data->version;
4458 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004459 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01004460
Will Deacon45ae7cf2013-06-24 18:31:25 +01004461 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304462 if (res)
4463 smmu->phys_addr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01004464 smmu->base = devm_ioremap_resource(dev, res);
4465 if (IS_ERR(smmu->base))
4466 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004467 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004468
4469 if (of_property_read_u32(dev->of_node, "#global-interrupts",
4470 &smmu->num_global_irqs)) {
4471 dev_err(dev, "missing #global-interrupts property\n");
4472 return -ENODEV;
4473 }
4474
4475 num_irqs = 0;
4476 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
4477 num_irqs++;
4478 if (num_irqs > smmu->num_global_irqs)
4479 smmu->num_context_irqs++;
4480 }
4481
Andreas Herrmann44a08de2013-10-01 13:39:07 +01004482 if (!smmu->num_context_irqs) {
4483 dev_err(dev, "found %d interrupts but expected at least %d\n",
4484 num_irqs, smmu->num_global_irqs + 1);
4485 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004486 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004487
4488 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
4489 GFP_KERNEL);
4490 if (!smmu->irqs) {
4491 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
4492 return -ENOMEM;
4493 }
4494
4495 for (i = 0; i < num_irqs; ++i) {
4496 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07004497
Will Deacon45ae7cf2013-06-24 18:31:25 +01004498 if (irq < 0) {
4499 dev_err(dev, "failed to get irq index %d\n", i);
4500 return -ENODEV;
4501 }
4502 smmu->irqs[i] = irq;
4503 }
4504
Dhaval Patel031d7462015-05-09 14:47:29 -07004505 parse_driver_options(smmu);
4506
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004507 smmu->pwr = arm_smmu_init_power_resources(pdev);
4508 if (IS_ERR(smmu->pwr))
4509 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004510
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004511 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004512 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004513 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004514
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304515 smmu->sec_id = msm_dev_to_device_id(dev);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004516 err = arm_smmu_device_cfg_probe(smmu);
4517 if (err)
4518 goto out_power_off;
4519
Patrick Dalyda688822017-05-17 20:12:48 -07004520 err = arm_smmu_handoff_cbs(smmu);
4521 if (err)
4522 goto out_power_off;
4523
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004524 err = arm_smmu_parse_impl_def_registers(smmu);
4525 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004526 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004527
Robin Murphyb7862e32016-04-13 18:13:03 +01004528 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004529 smmu->num_context_banks != smmu->num_context_irqs) {
4530 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004531 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4532 smmu->num_context_irqs, smmu->num_context_banks,
4533 smmu->num_context_banks);
4534 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004535 }
4536
Will Deacon45ae7cf2013-06-24 18:31:25 +01004537 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004538 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4539 NULL, arm_smmu_global_fault,
4540 IRQF_ONESHOT | IRQF_SHARED,
4541 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004542 if (err) {
4543 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4544 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004545 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004546 }
4547 }
4548
Patrick Dalyd7476202016-09-08 18:23:28 -07004549 err = arm_smmu_arch_init(smmu);
4550 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004551 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004552
Robin Murphy06e393e2016-09-12 17:13:55 +01004553 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004554 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004555 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004556 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004557
Patrick Daly8e3371a2017-02-13 22:14:53 -08004558 INIT_LIST_HEAD(&smmu->list);
4559 spin_lock(&arm_smmu_devices_lock);
4560 list_add(&smmu->list, &arm_smmu_devices);
4561 spin_unlock(&arm_smmu_devices_lock);
4562
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004563 /* bus_set_iommu depends on this. */
4564 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4565 arm_smmu_of_iommu_configure_fixup);
4566
Robin Murphy7e96c742016-09-14 15:26:46 +01004567 /* Oh, for a proper bus abstraction */
4568 if (!iommu_present(&platform_bus_type))
4569 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004570 else
4571 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4572 arm_smmu_add_device_fixup);
Charan Teja Reddyf8464882017-12-05 20:29:05 +05304573
4574 err = register_regulator_notifier(smmu);
4575 if (err)
4576 goto out_power_off;
4577
Robin Murphy7e96c742016-09-14 15:26:46 +01004578#ifdef CONFIG_ARM_AMBA
4579 if (!iommu_present(&amba_bustype))
4580 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4581#endif
4582#ifdef CONFIG_PCI
4583 if (!iommu_present(&pci_bus_type)) {
4584 pci_request_acs();
4585 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4586 }
4587#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004588 return 0;
4589
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004590out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004591 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004592
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004593out_exit_power_resources:
4594 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004595
Will Deacon45ae7cf2013-06-24 18:31:25 +01004596 return err;
4597}
4598
4599static int arm_smmu_device_remove(struct platform_device *pdev)
4600{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004601 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004602
4603 if (!smmu)
4604 return -ENODEV;
4605
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004606 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004607 return -EINVAL;
4608
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05304609 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS) ||
4610 !bitmap_empty(smmu->secure_context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004611 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004612
Patrick Dalyc190d932016-08-30 17:23:28 -07004613 idr_destroy(&smmu->asid_idr);
4614
Will Deacon45ae7cf2013-06-24 18:31:25 +01004615 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004616 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004617 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004618
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004619 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004620
Will Deacon45ae7cf2013-06-24 18:31:25 +01004621 return 0;
4622}
4623
Will Deacon45ae7cf2013-06-24 18:31:25 +01004624static struct platform_driver arm_smmu_driver = {
4625 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004626 .name = "arm-smmu",
4627 .of_match_table = of_match_ptr(arm_smmu_of_match),
4628 },
4629 .probe = arm_smmu_device_dt_probe,
4630 .remove = arm_smmu_device_remove,
4631};
4632
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004633static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004634static int __init arm_smmu_init(void)
4635{
Robin Murphy7e96c742016-09-14 15:26:46 +01004636 static bool registered;
4637 int ret = 0;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004638 ktime_t cur;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004639
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004640 if (registered)
4641 return 0;
4642
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004643 cur = ktime_get();
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004644 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4645 if (ret)
4646 return ret;
4647
4648 ret = platform_driver_register(&arm_smmu_driver);
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304649#ifdef CONFIG_MSM_TZ_SMMU
4650 ret = register_iommu_sec_ptbl();
4651#endif
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004652 registered = !ret;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004653 trace_smmu_init(ktime_us_delta(ktime_get(), cur));
4654
Robin Murphy7e96c742016-09-14 15:26:46 +01004655 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004656}
4657
4658static void __exit arm_smmu_exit(void)
4659{
4660 return platform_driver_unregister(&arm_smmu_driver);
4661}
4662
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004663subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004664module_exit(arm_smmu_exit);
4665
Robin Murphy7e96c742016-09-14 15:26:46 +01004666static int __init arm_smmu_of_init(struct device_node *np)
4667{
4668 int ret = arm_smmu_init();
4669
4670 if (ret)
4671 return ret;
4672
4673 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4674 return -ENODEV;
4675
4676 return 0;
4677}
4678IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4679IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4680IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4681IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4682IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4683IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004684
Patrick Dalya0fddb62017-03-27 19:26:59 -07004685#define TCU_HW_VERSION_HLOS1 (0x18)
4686
Patrick Daly1f8a2882016-09-12 17:32:05 -07004687#define DEBUG_SID_HALT_REG 0x0
4688#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004689#define DEBUG_SID_HALT_SID_MASK 0x3ff
4690
4691#define DEBUG_VA_ADDR_REG 0x8
4692
4693#define DEBUG_TXN_TRIGG_REG 0x18
4694#define DEBUG_TXN_AXPROT_SHIFT 6
4695#define DEBUG_TXN_AXCACHE_SHIFT 2
4696#define DEBUG_TRX_WRITE (0x1 << 1)
4697#define DEBUG_TXN_READ (0x0 << 1)
4698#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004699
4700#define DEBUG_SR_HALT_ACK_REG 0x20
4701#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004702#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4703
4704#define DEBUG_PAR_REG 0x28
4705#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4706#define DEBUG_PAR_PA_SHIFT 12
4707#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004708
Patrick Daly8c1202b2017-05-10 15:42:30 -07004709#define TBU_DBG_TIMEOUT_US 100
Patrick Daly1f8a2882016-09-12 17:32:05 -07004710
Patrick Daly23301482017-10-12 16:18:25 -07004711#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK 0x3
4712#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT 0x8
4713
Patrick Daly03330cc2017-08-11 14:56:38 -07004714
4715struct actlr_setting {
4716 struct arm_smmu_smr smr;
4717 u32 actlr;
4718};
4719
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004720struct qsmmuv500_archdata {
4721 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004722 void __iomem *tcu_base;
4723 u32 version;
Patrick Dalyda765c62017-09-11 16:31:07 -07004724
4725 struct actlr_setting *actlrs;
4726 u32 actlr_tbl_size;
4727
4728 struct arm_smmu_smr *errata1_clients;
4729 u32 num_errata1_clients;
4730 remote_spinlock_t errata1_lock;
4731 ktime_t last_tlbi_ktime;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004732};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004733#define get_qsmmuv500_archdata(smmu) \
4734 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004735
Patrick Daly1f8a2882016-09-12 17:32:05 -07004736struct qsmmuv500_tbu_device {
4737 struct list_head list;
4738 struct device *dev;
4739 struct arm_smmu_device *smmu;
4740 void __iomem *base;
4741 void __iomem *status_reg;
4742
4743 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004744 u32 sid_start;
4745 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004746
4747 /* Protects halt count */
4748 spinlock_t halt_lock;
4749 u32 halt_count;
4750};
4751
Patrick Daly03330cc2017-08-11 14:56:38 -07004752struct qsmmuv500_group_iommudata {
4753 bool has_actlr;
4754 u32 actlr;
4755};
4756#define to_qsmmuv500_group_iommudata(group) \
4757 ((struct qsmmuv500_group_iommudata *) \
4758 (iommu_group_get_iommudata(group)))
4759
4760
4761static bool arm_smmu_fwspec_match_smr(struct iommu_fwspec *fwspec,
Patrick Dalyda765c62017-09-11 16:31:07 -07004762 struct arm_smmu_smr *smr)
4763{
4764 struct arm_smmu_smr *smr2;
Patrick Daly03330cc2017-08-11 14:56:38 -07004765 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Patrick Dalyda765c62017-09-11 16:31:07 -07004766 int i, idx;
4767
Patrick Daly03330cc2017-08-11 14:56:38 -07004768 for_each_cfg_sme(fwspec, i, idx) {
4769 smr2 = &smmu->smrs[idx];
Patrick Dalyda765c62017-09-11 16:31:07 -07004770 /* Continue if table entry does not match */
4771 if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
4772 continue;
4773 return true;
4774 }
4775 return false;
4776}
4777
4778#define ERRATA1_REMOTE_SPINLOCK "S:6"
4779#define ERRATA1_TLBI_INTERVAL_US 10
4780static bool
4781qsmmuv500_errata1_required(struct arm_smmu_domain *smmu_domain,
4782 struct qsmmuv500_archdata *data)
4783{
4784 bool ret = false;
4785 int j;
4786 struct arm_smmu_smr *smr;
Patrick Daly03330cc2017-08-11 14:56:38 -07004787 struct iommu_fwspec *fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004788
4789 if (smmu_domain->qsmmuv500_errata1_init)
4790 return smmu_domain->qsmmuv500_errata1_client;
4791
Patrick Daly03330cc2017-08-11 14:56:38 -07004792 fwspec = smmu_domain->dev->iommu_fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004793 for (j = 0; j < data->num_errata1_clients; j++) {
4794 smr = &data->errata1_clients[j];
Patrick Daly03330cc2017-08-11 14:56:38 -07004795 if (arm_smmu_fwspec_match_smr(fwspec, smr)) {
Patrick Dalyda765c62017-09-11 16:31:07 -07004796 ret = true;
4797 break;
4798 }
4799 }
4800
4801 smmu_domain->qsmmuv500_errata1_init = true;
4802 smmu_domain->qsmmuv500_errata1_client = ret;
4803 return ret;
4804}
4805
Patrick Daly86960052017-12-04 18:53:13 -08004806#define SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
4807#define SCM_CONFIG_ERRATA1 0x3
Patrick Dalyda765c62017-09-11 16:31:07 -07004808static void __qsmmuv500_errata1_tlbiall(struct arm_smmu_domain *smmu_domain)
4809{
4810 struct arm_smmu_device *smmu = smmu_domain->smmu;
4811 struct device *dev = smmu_domain->dev;
4812 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4813 void __iomem *base;
Patrick Daly86960052017-12-04 18:53:13 -08004814 int ret;
Patrick Dalyda765c62017-09-11 16:31:07 -07004815 ktime_t cur;
4816 u32 val;
Patrick Daly86960052017-12-04 18:53:13 -08004817 struct scm_desc desc = {
4818 .args[0] = SCM_CONFIG_ERRATA1_CLIENT_ALL,
4819 .args[1] = false,
4820 .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
4821 };
Patrick Dalyda765c62017-09-11 16:31:07 -07004822
4823 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4824 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
4825 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
Patrick Daly86960052017-12-04 18:53:13 -08004826 if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
4827 !(val & TLBSTATUS_SACTIVE), 0, 100))
4828 return;
4829
4830 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
4831 SCM_CONFIG_ERRATA1),
4832 &desc);
4833 if (ret) {
4834 dev_err(smmu->dev, "Calling into TZ to disable ERRATA1 failed - IOMMU hardware in bad state\n");
4835 BUG();
4836 return;
4837 }
4838
4839 cur = ktime_get();
4840 trace_tlbi_throttle_start(dev, 0);
4841 msm_bus_noc_throttle_wa(true);
4842
Patrick Dalyda765c62017-09-11 16:31:07 -07004843 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
Patrick Daly86960052017-12-04 18:53:13 -08004844 !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
4845 dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout - IOMMU hardware in bad state");
4846 trace_tlbsync_timeout(dev, 0);
4847 BUG();
4848 }
Patrick Dalyda765c62017-09-11 16:31:07 -07004849
Patrick Daly86960052017-12-04 18:53:13 -08004850 msm_bus_noc_throttle_wa(false);
4851 trace_tlbi_throttle_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07004852
Patrick Daly86960052017-12-04 18:53:13 -08004853 desc.args[1] = true;
4854 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
4855 SCM_CONFIG_ERRATA1),
4856 &desc);
4857 if (ret) {
4858 dev_err(smmu->dev, "Calling into TZ to reenable ERRATA1 failed - IOMMU hardware in bad state\n");
4859 BUG();
Patrick Dalyda765c62017-09-11 16:31:07 -07004860 }
4861}
4862
4863/* Must be called with clocks/regulators enabled */
4864static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
4865{
4866 struct arm_smmu_domain *smmu_domain = cookie;
4867 struct device *dev = smmu_domain->dev;
4868 struct qsmmuv500_archdata *data =
4869 get_qsmmuv500_archdata(smmu_domain->smmu);
4870 ktime_t cur;
Patrick Daly1faa3112017-10-31 16:40:40 -07004871 unsigned long flags;
Patrick Dalyda765c62017-09-11 16:31:07 -07004872 bool errata;
4873
4874 cur = ktime_get();
Prakash Gupta25f90512017-11-20 14:56:54 +05304875 trace_tlbi_start(dev, 0);
Patrick Dalyda765c62017-09-11 16:31:07 -07004876
4877 errata = qsmmuv500_errata1_required(smmu_domain, data);
Patrick Daly1faa3112017-10-31 16:40:40 -07004878 remote_spin_lock_irqsave(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07004879 if (errata) {
4880 s64 delta;
4881
4882 delta = ktime_us_delta(ktime_get(), data->last_tlbi_ktime);
4883 if (delta < ERRATA1_TLBI_INTERVAL_US)
4884 udelay(ERRATA1_TLBI_INTERVAL_US - delta);
4885
4886 __qsmmuv500_errata1_tlbiall(smmu_domain);
4887
4888 data->last_tlbi_ktime = ktime_get();
4889 } else {
4890 __qsmmuv500_errata1_tlbiall(smmu_domain);
4891 }
Patrick Daly1faa3112017-10-31 16:40:40 -07004892 remote_spin_unlock_irqrestore(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07004893
Prakash Gupta25f90512017-11-20 14:56:54 +05304894 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07004895}
4896
4897static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
4898 .tlb_flush_all = qsmmuv500_errata1_tlb_inv_context,
4899 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
4900 .free_pages_exact = arm_smmu_free_pages_exact,
4901};
4902
Patrick Daly8c1202b2017-05-10 15:42:30 -07004903static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
4904 struct arm_smmu_domain *smmu_domain)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004905{
4906 unsigned long flags;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004907 u32 halt, fsr, sctlr_orig, sctlr, status;
4908 void __iomem *base, *cb_base;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004909
4910 spin_lock_irqsave(&tbu->halt_lock, flags);
4911 if (tbu->halt_count) {
4912 tbu->halt_count++;
4913 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4914 return 0;
4915 }
4916
Patrick Daly8c1202b2017-05-10 15:42:30 -07004917 cb_base = ARM_SMMU_CB_BASE(smmu_domain->smmu) +
4918 ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004919 base = tbu->base;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004920 halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
4921 halt |= DEBUG_SID_HALT_VAL;
4922 writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004923
Patrick Daly8c1202b2017-05-10 15:42:30 -07004924 if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
4925 (status & DEBUG_SR_HALT_ACK_VAL),
4926 0, TBU_DBG_TIMEOUT_US))
4927 goto out;
4928
4929 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4930 if (!(fsr & FSR_FAULT)) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004931 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4932 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4933 return -ETIMEDOUT;
4934 }
4935
Patrick Daly8c1202b2017-05-10 15:42:30 -07004936 /*
4937 * We are in a fault; Our request to halt the bus will not complete
4938 * until transactions in front of us (such as the fault itself) have
4939 * completed. Disable iommu faults and terminate any existing
4940 * transactions.
4941 */
4942 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4943 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4944 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4945
4946 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4947 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4948
4949 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
4950 (status & DEBUG_SR_HALT_ACK_VAL),
4951 0, TBU_DBG_TIMEOUT_US)) {
4952 dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
4953 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4954 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4955 return -ETIMEDOUT;
4956 }
4957
4958 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4959out:
Patrick Daly1f8a2882016-09-12 17:32:05 -07004960 tbu->halt_count = 1;
4961 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4962 return 0;
4963}
4964
4965static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4966{
4967 unsigned long flags;
4968 u32 val;
4969 void __iomem *base;
4970
4971 spin_lock_irqsave(&tbu->halt_lock, flags);
4972 if (!tbu->halt_count) {
4973 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4974 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4975 return;
4976
4977 } else if (tbu->halt_count > 1) {
4978 tbu->halt_count--;
4979 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4980 return;
4981 }
4982
4983 base = tbu->base;
4984 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4985 val &= ~DEBUG_SID_HALT_VAL;
4986 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4987
4988 tbu->halt_count = 0;
4989 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4990}
4991
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004992static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4993 struct arm_smmu_device *smmu, u32 sid)
4994{
4995 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004996 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004997
4998 list_for_each_entry(tbu, &data->tbus, list) {
4999 if (tbu->sid_start <= sid &&
5000 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07005001 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005002 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07005003 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005004}
5005
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005006static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
5007 struct qsmmuv500_tbu_device *tbu,
5008 unsigned long *flags)
5009{
5010 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005011 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005012 u32 val;
5013
5014 spin_lock_irqsave(&smmu->atos_lock, *flags);
5015 /* The status register is not accessible on version 1.0 */
5016 if (data->version == 0x01000000)
5017 return 0;
5018
5019 if (readl_poll_timeout_atomic(tbu->status_reg,
5020 val, (val == 0x1), 0,
5021 TBU_DBG_TIMEOUT_US)) {
5022 dev_err(tbu->dev, "ECATS hw busy!\n");
5023 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
5024 return -ETIMEDOUT;
5025 }
5026
5027 return 0;
5028}
5029
5030static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
5031 struct qsmmuv500_tbu_device *tbu,
5032 unsigned long *flags)
5033{
5034 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005035 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005036
5037 /* The status register is not accessible on version 1.0 */
5038 if (data->version != 0x01000000)
5039 writel_relaxed(0, tbu->status_reg);
5040 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
5041}
5042
5043/*
5044 * Zero means failure.
5045 */
5046static phys_addr_t qsmmuv500_iova_to_phys(
5047 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
5048{
5049 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
5050 struct arm_smmu_device *smmu = smmu_domain->smmu;
5051 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
5052 struct qsmmuv500_tbu_device *tbu;
5053 int ret;
5054 phys_addr_t phys = 0;
5055 u64 val, fsr;
5056 unsigned long flags;
5057 void __iomem *cb_base;
5058 u32 sctlr_orig, sctlr;
5059 int needs_redo = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005060 ktime_t timeout;
5061
5062 /* only 36 bit iova is supported */
5063 if (iova >= (1ULL << 36)) {
5064 dev_err_ratelimited(smmu->dev, "ECATS: address too large: %pad\n",
5065 &iova);
5066 return 0;
5067 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005068
5069 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
5070 tbu = qsmmuv500_find_tbu(smmu, sid);
5071 if (!tbu)
5072 return 0;
5073
5074 ret = arm_smmu_power_on(tbu->pwr);
5075 if (ret)
5076 return 0;
5077
Patrick Daly8c1202b2017-05-10 15:42:30 -07005078 ret = qsmmuv500_tbu_halt(tbu, smmu_domain);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005079 if (ret)
5080 goto out_power_off;
5081
Patrick Daly8c1202b2017-05-10 15:42:30 -07005082 /*
5083 * ECATS can trigger the fault interrupt, so disable it temporarily
5084 * and check for an interrupt manually.
5085 */
5086 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
5087 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
5088 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
5089
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005090 /* Only one concurrent atos operation */
5091 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
5092 if (ret)
5093 goto out_resume;
5094
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005095redo:
5096 /* Set address and stream-id */
5097 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
5098 val |= sid & DEBUG_SID_HALT_SID_MASK;
5099 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
5100 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
5101
5102 /*
5103 * Write-back Read and Write-Allocate
5104 * Priviledged, nonsecure, data transaction
5105 * Read operation.
5106 */
5107 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
5108 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
5109 val |= DEBUG_TXN_TRIGGER;
5110 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
5111
5112 ret = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005113 //based on readx_poll_timeout_atomic
5114 timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
5115 for (;;) {
5116 val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
5117 if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
5118 break;
5119 val = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5120 if (val & FSR_FAULT)
5121 break;
5122 if (ktime_compare(ktime_get(), timeout) > 0) {
5123 dev_err(tbu->dev, "ECATS translation timed out!\n");
5124 ret = -ETIMEDOUT;
5125 break;
5126 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005127 }
5128
5129 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5130 if (fsr & FSR_FAULT) {
5131 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
Patrick Daly8c1202b2017-05-10 15:42:30 -07005132 fsr);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005133 ret = -EINVAL;
5134
5135 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
5136 /*
5137 * Clear pending interrupts
5138 * Barrier required to ensure that the FSR is cleared
5139 * before resuming SMMU operation
5140 */
5141 wmb();
5142 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
5143 }
5144
5145 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
5146 if (val & DEBUG_PAR_FAULT_VAL) {
5147 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
5148 val);
5149 ret = -EINVAL;
5150 }
5151
5152 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
5153 if (ret < 0)
5154 phys = 0;
5155
5156 /* Reset hardware */
5157 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
5158 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
5159
5160 /*
5161 * After a failed translation, the next successful translation will
5162 * incorrectly be reported as a failure.
5163 */
5164 if (!phys && needs_redo++ < 2)
5165 goto redo;
5166
5167 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
5168 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
5169
5170out_resume:
5171 qsmmuv500_tbu_resume(tbu);
5172
5173out_power_off:
5174 arm_smmu_power_off(tbu->pwr);
5175
5176 return phys;
5177}
5178
5179static phys_addr_t qsmmuv500_iova_to_phys_hard(
5180 struct iommu_domain *domain, dma_addr_t iova)
5181{
5182 u16 sid;
5183 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
5184 struct iommu_fwspec *fwspec;
5185
5186 /* Select a sid */
5187 fwspec = smmu_domain->dev->iommu_fwspec;
5188 sid = (u16)fwspec->ids[0];
5189
5190 return qsmmuv500_iova_to_phys(domain, iova, sid);
5191}
5192
Patrick Daly03330cc2017-08-11 14:56:38 -07005193static void qsmmuv500_release_group_iommudata(void *data)
5194{
5195 kfree(data);
5196}
5197
5198/* If a device has a valid actlr, it must match */
5199static int qsmmuv500_device_group(struct device *dev,
5200 struct iommu_group *group)
5201{
5202 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
5203 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
5204 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5205 struct qsmmuv500_group_iommudata *iommudata;
5206 u32 actlr, i;
5207 struct arm_smmu_smr *smr;
5208
5209 iommudata = to_qsmmuv500_group_iommudata(group);
5210 if (!iommudata) {
5211 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
5212 if (!iommudata)
5213 return -ENOMEM;
5214
5215 iommu_group_set_iommudata(group, iommudata,
5216 qsmmuv500_release_group_iommudata);
5217 }
5218
5219 for (i = 0; i < data->actlr_tbl_size; i++) {
5220 smr = &data->actlrs[i].smr;
5221 actlr = data->actlrs[i].actlr;
5222
5223 if (!arm_smmu_fwspec_match_smr(fwspec, smr))
5224 continue;
5225
5226 if (!iommudata->has_actlr) {
5227 iommudata->actlr = actlr;
5228 iommudata->has_actlr = true;
5229 } else if (iommudata->actlr != actlr) {
5230 return -EINVAL;
5231 }
5232 }
5233
5234 return 0;
5235}
5236
5237static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
5238 struct device *dev)
5239{
5240 struct arm_smmu_device *smmu = smmu_domain->smmu;
5241 struct qsmmuv500_group_iommudata *iommudata =
5242 to_qsmmuv500_group_iommudata(dev->iommu_group);
5243 void __iomem *cb_base;
5244 const struct iommu_gather_ops *tlb;
5245
5246 if (!iommudata->has_actlr)
5247 return;
5248
5249 tlb = smmu_domain->pgtbl_cfg.tlb;
5250 cb_base = ARM_SMMU_CB_BASE(smmu) +
5251 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
5252
5253 writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR);
5254
5255 /*
Patrick Daly23301482017-10-12 16:18:25 -07005256 * Prefetch only works properly if the start and end of all
5257 * buffers in the page table are aligned to 16 Kb.
5258 */
Patrick Daly27bd9292017-11-22 13:59:59 -08005259 if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
Patrick Daly23301482017-10-12 16:18:25 -07005260 QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
5261 smmu_domain->qsmmuv500_errata2_min_align = true;
5262
5263 /*
Patrick Daly03330cc2017-08-11 14:56:38 -07005264 * Flush the context bank after modifying ACTLR to ensure there
5265 * are no cache entries with stale state
5266 */
5267 tlb->tlb_flush_all(smmu_domain);
5268}
5269
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005270static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005271{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005272 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005273 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005274 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005275
5276 if (!dev->driver) {
5277 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
5278 return -EINVAL;
5279 }
5280
5281 tbu = dev_get_drvdata(dev);
5282
5283 INIT_LIST_HEAD(&tbu->list);
5284 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005285 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005286 return 0;
5287}
5288
Patrick Dalyda765c62017-09-11 16:31:07 -07005289static int qsmmuv500_parse_errata1(struct arm_smmu_device *smmu)
5290{
5291 int len, i;
5292 struct device *dev = smmu->dev;
5293 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5294 struct arm_smmu_smr *smrs;
5295 const __be32 *cell;
5296
5297 cell = of_get_property(dev->of_node, "qcom,mmu500-errata-1", NULL);
5298 if (!cell)
5299 return 0;
5300
5301 remote_spin_lock_init(&data->errata1_lock, ERRATA1_REMOTE_SPINLOCK);
5302 len = of_property_count_elems_of_size(
5303 dev->of_node, "qcom,mmu500-errata-1", sizeof(u32) * 2);
5304 if (len < 0)
5305 return 0;
5306
5307 smrs = devm_kzalloc(dev, sizeof(*smrs) * len, GFP_KERNEL);
5308 if (!smrs)
5309 return -ENOMEM;
5310
5311 for (i = 0; i < len; i++) {
5312 smrs[i].id = of_read_number(cell++, 1);
5313 smrs[i].mask = of_read_number(cell++, 1);
5314 }
5315
5316 data->errata1_clients = smrs;
5317 data->num_errata1_clients = len;
5318 return 0;
5319}
5320
Patrick Daly03330cc2017-08-11 14:56:38 -07005321static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
5322{
5323 int len, i;
5324 struct device *dev = smmu->dev;
5325 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5326 struct actlr_setting *actlrs;
5327 const __be32 *cell;
5328
5329 cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
5330 if (!cell)
5331 return 0;
5332
5333 len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
5334 sizeof(u32) * 3);
5335 if (len < 0)
5336 return 0;
5337
5338 actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
5339 if (!actlrs)
5340 return -ENOMEM;
5341
5342 for (i = 0; i < len; i++) {
5343 actlrs[i].smr.id = of_read_number(cell++, 1);
5344 actlrs[i].smr.mask = of_read_number(cell++, 1);
5345 actlrs[i].actlr = of_read_number(cell++, 1);
5346 }
5347
5348 data->actlrs = actlrs;
5349 data->actlr_tbl_size = len;
5350 return 0;
5351}
5352
Patrick Daly1f8a2882016-09-12 17:32:05 -07005353static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
5354{
Patrick Dalya0fddb62017-03-27 19:26:59 -07005355 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005356 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005357 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07005358 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005359 int ret;
Patrick Daly03330cc2017-08-11 14:56:38 -07005360 u32 val;
5361 void __iomem *reg;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005362
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005363 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
5364 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005365 return -ENOMEM;
5366
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005367 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07005368
5369 pdev = container_of(dev, struct platform_device, dev);
5370 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
5371 data->tcu_base = devm_ioremap_resource(dev, res);
5372 if (IS_ERR(data->tcu_base))
5373 return PTR_ERR(data->tcu_base);
5374
5375 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005376 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005377
Charan Teja Reddy424ed342018-01-18 12:25:06 +05305378 if (arm_smmu_is_static_cb(smmu))
5379 return 0;
5380
Patrick Dalyda765c62017-09-11 16:31:07 -07005381 ret = qsmmuv500_parse_errata1(smmu);
5382 if (ret)
5383 return ret;
5384
Patrick Daly03330cc2017-08-11 14:56:38 -07005385 ret = qsmmuv500_read_actlr_tbl(smmu);
5386 if (ret)
5387 return ret;
5388
5389 reg = ARM_SMMU_GR0(smmu);
5390 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5391 val &= ~ARM_MMU500_ACR_CACHE_LOCK;
5392 writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
5393 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5394 /*
5395 * Modifiying the nonsecure copy of the sACR register is only
5396 * allowed if permission is given in the secure sACR register.
5397 * Attempt to detect if we were able to update the value.
5398 */
5399 WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
5400
Patrick Daly1f8a2882016-09-12 17:32:05 -07005401 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
5402 if (ret)
5403 return ret;
5404
5405 /* Attempt to register child devices */
5406 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
5407 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07005408 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005409
5410 return 0;
5411}
5412
5413struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
5414 .init = qsmmuv500_arch_init,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005415 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly03330cc2017-08-11 14:56:38 -07005416 .init_context_bank = qsmmuv500_init_cb,
5417 .device_group = qsmmuv500_device_group,
Patrick Daly1f8a2882016-09-12 17:32:05 -07005418};
5419
5420static const struct of_device_id qsmmuv500_tbu_of_match[] = {
5421 {.compatible = "qcom,qsmmuv500-tbu"},
5422 {}
5423};
5424
5425static int qsmmuv500_tbu_probe(struct platform_device *pdev)
5426{
5427 struct resource *res;
5428 struct device *dev = &pdev->dev;
5429 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005430 const __be32 *cell;
5431 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005432
5433 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
5434 if (!tbu)
5435 return -ENOMEM;
5436
5437 INIT_LIST_HEAD(&tbu->list);
5438 tbu->dev = dev;
5439 spin_lock_init(&tbu->halt_lock);
5440
5441 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
5442 tbu->base = devm_ioremap_resource(dev, res);
5443 if (IS_ERR(tbu->base))
5444 return PTR_ERR(tbu->base);
5445
5446 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
5447 tbu->status_reg = devm_ioremap_resource(dev, res);
5448 if (IS_ERR(tbu->status_reg))
5449 return PTR_ERR(tbu->status_reg);
5450
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005451 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
5452 if (!cell || len < 8)
5453 return -EINVAL;
5454
5455 tbu->sid_start = of_read_number(cell, 1);
5456 tbu->num_sids = of_read_number(cell + 1, 1);
5457
Patrick Daly1f8a2882016-09-12 17:32:05 -07005458 tbu->pwr = arm_smmu_init_power_resources(pdev);
5459 if (IS_ERR(tbu->pwr))
5460 return PTR_ERR(tbu->pwr);
5461
5462 dev_set_drvdata(dev, tbu);
5463 return 0;
5464}
5465
5466static struct platform_driver qsmmuv500_tbu_driver = {
5467 .driver = {
5468 .name = "qsmmuv500-tbu",
5469 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
5470 },
5471 .probe = qsmmuv500_tbu_probe,
5472};
5473
Will Deacon45ae7cf2013-06-24 18:31:25 +01005474MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
5475MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
5476MODULE_LICENSE("GPL v2");