blob: 004460d8501872995f56638c37da385ea73dbcc2 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Daly86960052017-12-04 18:53:13 -080050#include <soc/qcom/scm.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070051#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070052#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070053#include <linux/msm-bus.h>
54#include <dt-bindings/msm/msm-bus-ids.h>
Patrick Dalyda765c62017-09-11 16:31:07 -070055#include <linux/remote_spinlock.h>
56#include <linux/ktime.h>
57#include <trace/events/iommu.h>
Charan Teja Reddyf8464882017-12-05 20:29:05 +053058#include <linux/notifier.h>
Prakash Gupta5b8eb322018-01-09 15:16:39 +053059#include <dt-bindings/arm/arm-smmu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010060
61#include <linux/amba/bus.h>
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +053062#include <soc/qcom/msm_tz_smmu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010063
Will Deacon518f7132014-11-14 17:17:54 +000064#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Will Deacon45ae7cf2013-06-24 18:31:25 +010066/* Maximum number of context banks per SMMU */
67#define ARM_SMMU_MAX_CBS 128
68
Will Deacon45ae7cf2013-06-24 18:31:25 +010069/* SMMU global address space */
70#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010071#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010072
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000073/*
74 * SMMU global address space with conditional offset to access secure
75 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
76 * nsGFSYNR0: 0x450)
77 */
78#define ARM_SMMU_GR0_NS(smmu) \
79 ((smmu)->base + \
80 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
81 ? 0x400 : 0))
82
Robin Murphyf9a05f02016-04-13 18:13:01 +010083/*
84 * Some 64-bit registers only make sense to write atomically, but in such
85 * cases all the data relevant to AArch32 formats lies within the lower word,
86 * therefore this actually makes more sense than it might first appear.
87 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010088#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010089#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010090#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010091#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010092#endif
93
Will Deacon45ae7cf2013-06-24 18:31:25 +010094/* Configuration registers */
95#define ARM_SMMU_GR0_sCR0 0x0
96#define sCR0_CLIENTPD (1 << 0)
97#define sCR0_GFRE (1 << 1)
98#define sCR0_GFIE (1 << 2)
99#define sCR0_GCFGFRE (1 << 4)
100#define sCR0_GCFGFIE (1 << 5)
101#define sCR0_USFCFG (1 << 10)
102#define sCR0_VMIDPNE (1 << 11)
103#define sCR0_PTM (1 << 12)
104#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800105#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100106#define sCR0_BSU_SHIFT 14
107#define sCR0_BSU_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700108#define sCR0_SHCFG_SHIFT 22
109#define sCR0_SHCFG_MASK 0x3
110#define sCR0_SHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100111
Peng Fan3ca37122016-05-03 21:50:30 +0800112/* Auxiliary Configuration register */
113#define ARM_SMMU_GR0_sACR 0x10
114
Will Deacon45ae7cf2013-06-24 18:31:25 +0100115/* Identification registers */
116#define ARM_SMMU_GR0_ID0 0x20
117#define ARM_SMMU_GR0_ID1 0x24
118#define ARM_SMMU_GR0_ID2 0x28
119#define ARM_SMMU_GR0_ID3 0x2c
120#define ARM_SMMU_GR0_ID4 0x30
121#define ARM_SMMU_GR0_ID5 0x34
122#define ARM_SMMU_GR0_ID6 0x38
123#define ARM_SMMU_GR0_ID7 0x3c
124#define ARM_SMMU_GR0_sGFSR 0x48
125#define ARM_SMMU_GR0_sGFSYNR0 0x50
126#define ARM_SMMU_GR0_sGFSYNR1 0x54
127#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128
129#define ID0_S1TS (1 << 30)
130#define ID0_S2TS (1 << 29)
131#define ID0_NTS (1 << 28)
132#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000133#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100134#define ID0_PTFS_NO_AARCH32 (1 << 25)
135#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100136#define ID0_CTTW (1 << 14)
137#define ID0_NUMIRPT_SHIFT 16
138#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700139#define ID0_NUMSIDB_SHIFT 9
140#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100141#define ID0_NUMSMRG_SHIFT 0
142#define ID0_NUMSMRG_MASK 0xff
143
144#define ID1_PAGESIZE (1 << 31)
145#define ID1_NUMPAGENDXB_SHIFT 28
146#define ID1_NUMPAGENDXB_MASK 7
147#define ID1_NUMS2CB_SHIFT 16
148#define ID1_NUMS2CB_MASK 0xff
149#define ID1_NUMCB_SHIFT 0
150#define ID1_NUMCB_MASK 0xff
151
152#define ID2_OAS_SHIFT 4
153#define ID2_OAS_MASK 0xf
154#define ID2_IAS_SHIFT 0
155#define ID2_IAS_MASK 0xf
156#define ID2_UBS_SHIFT 8
157#define ID2_UBS_MASK 0xf
158#define ID2_PTFS_4K (1 << 12)
159#define ID2_PTFS_16K (1 << 13)
160#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800161#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100162
Peng Fan3ca37122016-05-03 21:50:30 +0800163#define ID7_MAJOR_SHIFT 4
164#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100167#define ARM_SMMU_GR0_TLBIVMID 0x64
168#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
169#define ARM_SMMU_GR0_TLBIALLH 0x6c
170#define ARM_SMMU_GR0_sTLBGSYNC 0x70
171#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
172#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800173#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100174
175/* Stream mapping registers */
176#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
177#define SMR_VALID (1 << 31)
178#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700179#define SMR_MASK_MASK 0x7FFF
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530180#define SID_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100181#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100182
183#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
184#define S2CR_CBNDX_SHIFT 0
185#define S2CR_CBNDX_MASK 0xff
186#define S2CR_TYPE_SHIFT 16
187#define S2CR_TYPE_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700188#define S2CR_SHCFG_SHIFT 8
189#define S2CR_SHCFG_MASK 0x3
190#define S2CR_SHCFG_NSH 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100191enum arm_smmu_s2cr_type {
192 S2CR_TYPE_TRANS,
193 S2CR_TYPE_BYPASS,
194 S2CR_TYPE_FAULT,
195};
196
197#define S2CR_PRIVCFG_SHIFT 24
198#define S2CR_PRIVCFG_MASK 0x3
199enum arm_smmu_s2cr_privcfg {
200 S2CR_PRIVCFG_DEFAULT,
201 S2CR_PRIVCFG_DIPAN,
202 S2CR_PRIVCFG_UNPRIV,
203 S2CR_PRIVCFG_PRIV,
204};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100205
206/* Context bank attribute registers */
207#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
208#define CBAR_VMID_SHIFT 0
209#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000210#define CBAR_S1_BPSHCFG_SHIFT 8
211#define CBAR_S1_BPSHCFG_MASK 3
212#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define CBAR_S1_MEMATTR_SHIFT 12
214#define CBAR_S1_MEMATTR_MASK 0xf
215#define CBAR_S1_MEMATTR_WB 0xf
216#define CBAR_TYPE_SHIFT 16
217#define CBAR_TYPE_MASK 0x3
218#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
219#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
220#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
221#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
222#define CBAR_IRPTNDX_SHIFT 24
223#define CBAR_IRPTNDX_MASK 0xff
224
Shalaj Jain04059c52015-03-03 13:34:59 -0800225#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
226#define CBFRSYNRA_SID_MASK (0xffff)
227
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
229#define CBA2R_RW64_32BIT (0 << 0)
230#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800231#define CBA2R_VMID_SHIFT 16
232#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233
234/* Translation context bank */
235#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100236#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237
238#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100239#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100240#define ARM_SMMU_CB_RESUME 0x8
241#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100242#define ARM_SMMU_CB_TTBR0 0x20
243#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100244#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600245#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000247#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100248#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100249#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700250#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100251#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100252#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000253#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100254#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700255#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000256#define ARM_SMMU_CB_S1_TLBIVAL 0x620
257#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
258#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700259#define ARM_SMMU_CB_TLBSYNC 0x7f0
260#define ARM_SMMU_CB_TLBSTATUS 0x7f4
261#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100262#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000263#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264
Patrick Daly7f377fe2017-10-06 17:37:10 -0700265#define SCTLR_SHCFG_SHIFT 22
266#define SCTLR_SHCFG_MASK 0x3
267#define SCTLR_SHCFG_NSH 0x3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100268#define SCTLR_S1_ASIDPNE (1 << 12)
269#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530270#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100271#define SCTLR_CFIE (1 << 6)
272#define SCTLR_CFRE (1 << 5)
273#define SCTLR_E (1 << 4)
274#define SCTLR_AFE (1 << 2)
275#define SCTLR_TRE (1 << 1)
276#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100278#define ARM_MMU500_ACTLR_CPRE (1 << 1)
279
Peng Fan3ca37122016-05-03 21:50:30 +0800280#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
281
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700282#define ARM_SMMU_IMPL_DEF0(smmu) \
283 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
284#define ARM_SMMU_IMPL_DEF1(smmu) \
285 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000286#define CB_PAR_F (1 << 0)
287
288#define ATSR_ACTIVE (1 << 0)
289
Will Deacon45ae7cf2013-06-24 18:31:25 +0100290#define RESUME_RETRY (0 << 0)
291#define RESUME_TERMINATE (1 << 0)
292
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100294#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Tomasz Nowicki681e6612017-01-16 08:16:07 +0100295#define TTBCR2_AS (1 << 4)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100297#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100298
299#define FSR_MULTI (1 << 31)
300#define FSR_SS (1 << 30)
301#define FSR_UUT (1 << 8)
302#define FSR_ASF (1 << 7)
303#define FSR_TLBLKF (1 << 6)
304#define FSR_TLBMCF (1 << 5)
305#define FSR_EF (1 << 4)
306#define FSR_PF (1 << 3)
307#define FSR_AFF (1 << 2)
308#define FSR_TF (1 << 1)
309
Mitchel Humpherys29073202014-07-08 09:52:18 -0700310#define FSR_IGN (FSR_AFF | FSR_ASF | \
311 FSR_TLBMCF | FSR_TLBLKF)
312#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100313 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100314
315#define FSYNR0_WNR (1 << 4)
316
Will Deacon4cf740b2014-07-14 19:47:39 +0100317static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000318module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100319MODULE_PARM_DESC(force_stage,
320 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800321static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000322module_param(disable_bypass, bool, S_IRUGO);
323MODULE_PARM_DESC(disable_bypass,
324 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100325
Robin Murphy09360402014-08-28 17:51:59 +0100326enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100327 ARM_SMMU_V1,
328 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100329 ARM_SMMU_V2,
330};
331
Robin Murphy67b65a32016-04-13 18:12:57 +0100332enum arm_smmu_implementation {
333 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100334 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100335 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700336 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700337 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100338};
339
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700340struct arm_smmu_impl_def_reg {
341 u32 offset;
342 u32 value;
343};
344
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700345/*
346 * attach_count
347 * The SMR and S2CR registers are only programmed when the number of
348 * devices attached to the iommu using these registers is > 0. This
349 * is required for the "SID switch" use case for secure display.
350 * Protected by stream_map_mutex.
351 */
Robin Murphya754fd12016-09-12 17:13:50 +0100352struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100353 struct iommu_group *group;
354 int count;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700355 int attach_count;
Robin Murphya754fd12016-09-12 17:13:50 +0100356 enum arm_smmu_s2cr_type type;
357 enum arm_smmu_s2cr_privcfg privcfg;
358 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700359 bool cb_handoff;
Robin Murphya754fd12016-09-12 17:13:50 +0100360};
361
362#define s2cr_init_val (struct arm_smmu_s2cr){ \
363 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700364 .cb_handoff = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100365}
366
Will Deacon45ae7cf2013-06-24 18:31:25 +0100367struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100368 u16 mask;
369 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100370 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371};
372
Robin Murphy6549a1f2017-08-08 14:56:14 +0100373struct arm_smmu_cb {
374 u64 ttbr[2];
375 u32 tcr[2];
376 u32 mair[2];
377 struct arm_smmu_cfg *cfg;
Patrick Dalyad521082018-04-06 18:07:13 -0700378 u32 actlr;
Patrick Daly25317e82018-05-07 12:35:29 -0700379 bool has_actlr;
Robin Murphy6549a1f2017-08-08 14:56:14 +0100380 u32 attributes;
381};
382
Will Deacona9a1b0b2014-05-01 18:05:08 +0100383struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100384 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100385 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100386};
Robin Murphy468f4942016-09-12 17:13:49 +0100387#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100388#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
389#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000390#define fwspec_smendx(fw, i) \
391 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100392#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000393 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700395/*
396 * Describes resources required for on/off power operation.
397 * Separate reference count is provided for atomic/nonatomic
398 * operations.
399 */
400struct arm_smmu_power_resources {
401 struct platform_device *pdev;
402 struct device *dev;
403
404 struct clk **clocks;
405 int num_clocks;
406
407 struct regulator_bulk_data *gdscs;
408 int num_gdscs;
409
410 uint32_t bus_client;
411 struct msm_bus_scale_pdata *bus_dt_data;
412
413 /* Protects power_count */
414 struct mutex power_lock;
415 int power_count;
416
417 /* Protects clock_refs_count */
418 spinlock_t clock_refs_lock;
419 int clock_refs_count;
Prakash Guptafad87ca2017-05-16 12:13:02 +0530420 int regulator_defer;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700421};
422
Patrick Daly03330cc2017-08-11 14:56:38 -0700423struct arm_smmu_arch_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100424struct arm_smmu_device {
425 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100426
427 void __iomem *base;
428 unsigned long size;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530429 phys_addr_t phys_addr;
Will Deaconc757e852014-07-30 11:33:25 +0100430 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100431
432#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
433#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
434#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
435#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
436#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000437#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800438#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100439#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
440#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
441#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
442#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
443#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100444 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000445
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000446 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100447 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100448 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100449
450 u32 num_context_banks;
451 u32 num_s2_context_banks;
452 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Charan Teja Reddy4971ca42018-01-23 18:27:08 +0530453 DECLARE_BITMAP(secure_context_map, ARM_SMMU_MAX_CBS);
Robin Murphy6549a1f2017-08-08 14:56:14 +0100454 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100455 atomic_t irptndx;
456
457 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100458 u16 streamid_mask;
459 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100460 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100461 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100462 struct mutex stream_map_mutex;
Swathi Sridharfa26bd52018-04-25 18:26:14 -0700463 struct mutex iommu_group_mutex;
Will Deacon518f7132014-11-14 17:17:54 +0000464 unsigned long va_size;
465 unsigned long ipa_size;
466 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100467 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100468
469 u32 num_global_irqs;
470 u32 num_context_irqs;
471 unsigned int *irqs;
472
Patrick Daly8e3371a2017-02-13 22:14:53 -0800473 struct list_head list;
474
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800475 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700476 /* Specific to QCOM */
477 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
478 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800479
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700480 struct arm_smmu_power_resources *pwr;
Charan Teja Reddyf8464882017-12-05 20:29:05 +0530481 struct notifier_block regulator_nb;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700482
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800483 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700484
485 /* protects idr */
486 struct mutex idr_mutex;
487 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700488
489 struct arm_smmu_arch_ops *arch_ops;
490 void *archdata;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530491
492 enum tz_smmu_device_id sec_id;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100493};
494
Robin Murphy7602b872016-04-28 17:12:09 +0100495enum arm_smmu_context_fmt {
496 ARM_SMMU_CTX_FMT_NONE,
497 ARM_SMMU_CTX_FMT_AARCH64,
498 ARM_SMMU_CTX_FMT_AARCH32_L,
499 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100500};
501
502struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100503 u8 cbndx;
504 u8 irptndx;
505 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600506 u32 procid;
507 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100508 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100509};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100510#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600511#define INVALID_CBNDX 0xff
512#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700513/*
514 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
515 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
516 */
517#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100518
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600519#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800520#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100521
Will Deaconc752ce42014-06-25 22:46:31 +0100522enum arm_smmu_domain_stage {
523 ARM_SMMU_DOMAIN_S1 = 0,
524 ARM_SMMU_DOMAIN_S2,
525 ARM_SMMU_DOMAIN_NESTED,
526};
527
Patrick Dalyc11d1082016-09-01 15:52:44 -0700528struct arm_smmu_pte_info {
529 void *virt_addr;
530 size_t size;
531 struct list_head entry;
532};
533
Will Deacon45ae7cf2013-06-24 18:31:25 +0100534struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100535 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800536 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000537 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700538 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000539 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100540 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100541 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000542 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700543 u32 attributes;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530544 bool slave_side_secure;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700545 u32 secure_vmid;
546 struct list_head pte_info_list;
547 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700548 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700549 struct list_head secure_pool_list;
Patrick Daly2d600832018-02-11 15:12:55 -0800550 /* nonsecure pool protected by pgtbl_lock */
551 struct list_head nonsecure_pool;
Joerg Roedel1d672632015-03-26 13:43:10 +0100552 struct iommu_domain domain;
Patrick Dalyda765c62017-09-11 16:31:07 -0700553
554 bool qsmmuv500_errata1_init;
555 bool qsmmuv500_errata1_client;
Patrick Daly23301482017-10-12 16:18:25 -0700556 bool qsmmuv500_errata2_min_align;
Prakash Guptac2e909a2018-03-29 11:23:06 +0530557 bool is_force_guard_page;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100558};
559
Patrick Daly8e3371a2017-02-13 22:14:53 -0800560static DEFINE_SPINLOCK(arm_smmu_devices_lock);
561static LIST_HEAD(arm_smmu_devices);
562
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000563struct arm_smmu_option_prop {
564 u32 opt;
565 const char *prop;
566};
567
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800568static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
569
Robin Murphy7e96c742016-09-14 15:26:46 +0100570static bool using_legacy_binding, using_generic_binding;
571
Mitchel Humpherys29073202014-07-08 09:52:18 -0700572static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000573 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800574 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700575 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700576 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700577 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700578 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Patrick Daly62ba1922017-08-30 16:47:18 -0700579 { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
Patrick Daly83174c12017-10-26 12:31:15 -0700580 { ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530581 { ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
Charan Teja Reddyf8464882017-12-05 20:29:05 +0530582 { ARM_SMMU_OPT_HALT, "qcom,enable-smmu-halt"},
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000583 { 0, NULL},
584};
585
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800586static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
587 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700588static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
589 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600590static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800591
Patrick Dalyc11d1082016-09-01 15:52:44 -0700592static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
593static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700594static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700595static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
596
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700597static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
598 dma_addr_t iova);
599
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800600static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
601
Patrick Dalyda688822017-05-17 20:12:48 -0700602static int arm_smmu_alloc_cb(struct iommu_domain *domain,
603 struct arm_smmu_device *smmu,
604 struct device *dev);
Patrick Dalyda765c62017-09-11 16:31:07 -0700605static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops;
Patrick Dalyda688822017-05-17 20:12:48 -0700606
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530607static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530608static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
609static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
Patrick Dalycf93cac2018-05-16 20:51:04 -0700610static bool arm_smmu_opt_hibernation(struct arm_smmu_device *smmu);
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530611
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530612static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
613 phys_addr_t paddr, size_t size, int prot);
614static size_t msm_secure_smmu_unmap(struct iommu_domain *domain,
615 unsigned long iova,
616 size_t size);
617static size_t msm_secure_smmu_map_sg(struct iommu_domain *domain,
618 unsigned long iova,
619 struct scatterlist *sg,
620 unsigned int nents, int prot);
621
Joerg Roedel1d672632015-03-26 13:43:10 +0100622static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
623{
624 return container_of(dom, struct arm_smmu_domain, domain);
625}
626
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000627static void parse_driver_options(struct arm_smmu_device *smmu)
628{
629 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700630
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000631 do {
632 if (of_property_read_bool(smmu->dev->of_node,
633 arm_smmu_options[i].prop)) {
634 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700635 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000636 arm_smmu_options[i].prop);
637 }
638 } while (arm_smmu_options[++i].opt);
Patrick Dalycf93cac2018-05-16 20:51:04 -0700639
640 if (arm_smmu_opt_hibernation(smmu) &&
Vijayanand Jittaad9f9a52019-03-06 10:10:45 +0530641 (smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
Patrick Dalycf93cac2018-05-16 20:51:04 -0700642 dev_info(smmu->dev,
643 "Disabling incompatible option: skip-init\n");
644 smmu->options &= ~ARM_SMMU_OPT_SKIP_INIT;
645 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000646}
647
Patrick Dalyc190d932016-08-30 17:23:28 -0700648static bool is_dynamic_domain(struct iommu_domain *domain)
649{
650 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
651
652 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
653}
654
Charan Teja Reddyec6f7822018-01-10 17:32:52 +0530655static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu, u32 cb)
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530656{
657 int ret;
658 int scm_ret = 0;
659
660 if (!arm_smmu_is_static_cb(smmu))
661 return 0;
662
Charan Teja Reddyec6f7822018-01-10 17:32:52 +0530663 ret = scm_restore_sec_cfg(smmu->sec_id, cb, &scm_ret);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530664 if (ret || scm_ret) {
665 pr_err("scm call IOMMU_SECURE_CFG failed\n");
666 return -EINVAL;
667 }
668
669 return 0;
670}
Liam Mark53cf2342016-12-20 11:36:07 -0800671static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
672{
673 if (smmu_domain->attributes &
674 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
675 return true;
676 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
677 return smmu_domain->smmu->dev->archdata.dma_coherent;
678 else
679 return false;
680}
681
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530682static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu)
683{
684 return smmu->options & ARM_SMMU_OPT_STATIC_CB;
685}
686
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530687static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain)
Patrick Dalye271f212016-10-04 13:24:49 -0700688{
689 return (smmu_domain->secure_vmid != VMID_INVAL);
690}
691
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530692static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain)
693{
694 return arm_smmu_has_secure_vmid(smmu_domain) &&
695 smmu_domain->slave_side_secure;
696}
697
698static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain)
699{
700 return arm_smmu_has_secure_vmid(smmu_domain)
701 && !smmu_domain->slave_side_secure;
702}
703
Patrick Dalye271f212016-10-04 13:24:49 -0700704static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
705{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530706 if (arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -0700707 mutex_lock(&smmu_domain->assign_lock);
708}
709
710static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
711{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530712 if (arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -0700713 mutex_unlock(&smmu_domain->assign_lock);
714}
715
Patrick Dalyaddf1f82018-04-23 14:39:19 -0700716static bool arm_smmu_opt_hibernation(struct arm_smmu_device *smmu)
717{
Patrick Dalycf93cac2018-05-16 20:51:04 -0700718 return IS_ENABLED(CONFIG_HIBERNATION);
Patrick Dalyaddf1f82018-04-23 14:39:19 -0700719}
720
Patrick Daly03330cc2017-08-11 14:56:38 -0700721/*
722 * init()
723 * Hook for additional device tree parsing at probe time.
724 *
725 * device_reset()
726 * Hook for one-time architecture-specific register settings.
727 *
728 * iova_to_phys_hard()
729 * Provides debug information. May be called from the context fault irq handler.
730 *
731 * init_context_bank()
732 * Hook for architecture-specific settings which require knowledge of the
733 * dynamically allocated context bank number.
734 *
735 * device_group()
736 * Hook for checking whether a device is compatible with a said group.
737 */
738struct arm_smmu_arch_ops {
739 int (*init)(struct arm_smmu_device *smmu);
740 void (*device_reset)(struct arm_smmu_device *smmu);
741 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
742 dma_addr_t iova);
743 void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
744 struct device *dev);
745 int (*device_group)(struct device *dev, struct iommu_group *group);
746};
747
748static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
749{
750 if (!smmu->arch_ops)
751 return 0;
752 if (!smmu->arch_ops->init)
753 return 0;
754 return smmu->arch_ops->init(smmu);
755}
756
757static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
758{
759 if (!smmu->arch_ops)
760 return;
761 if (!smmu->arch_ops->device_reset)
762 return;
763 return smmu->arch_ops->device_reset(smmu);
764}
765
766static void arm_smmu_arch_init_context_bank(
767 struct arm_smmu_domain *smmu_domain, struct device *dev)
768{
769 struct arm_smmu_device *smmu = smmu_domain->smmu;
770
771 if (!smmu->arch_ops)
772 return;
773 if (!smmu->arch_ops->init_context_bank)
774 return;
775 return smmu->arch_ops->init_context_bank(smmu_domain, dev);
776}
777
778static int arm_smmu_arch_device_group(struct device *dev,
779 struct iommu_group *group)
780{
781 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
782 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
783
784 if (!smmu->arch_ops)
785 return 0;
786 if (!smmu->arch_ops->device_group)
787 return 0;
788 return smmu->arch_ops->device_group(dev, group);
789}
790
Will Deacon8f68f8e2014-07-15 11:27:08 +0100791static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100792{
793 if (dev_is_pci(dev)) {
794 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700795
Will Deacona9a1b0b2014-05-01 18:05:08 +0100796 while (!pci_is_root_bus(bus))
797 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100798 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100799 }
800
Robin Murphyd5b41782016-09-14 15:21:39 +0100801 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100802}
803
Robin Murphyd5b41782016-09-14 15:21:39 +0100804static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100805{
Robin Murphyd5b41782016-09-14 15:21:39 +0100806 *((__be32 *)data) = cpu_to_be32(alias);
807 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100808}
809
Robin Murphyd5b41782016-09-14 15:21:39 +0100810static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100811{
Robin Murphyd5b41782016-09-14 15:21:39 +0100812 struct of_phandle_iterator *it = *(void **)data;
813 struct device_node *np = it->node;
814 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100815
Robin Murphyd5b41782016-09-14 15:21:39 +0100816 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
817 "#stream-id-cells", 0)
818 if (it->node == np) {
819 *(void **)data = dev;
820 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700821 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100822 it->node = np;
823 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100824}
825
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100826static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100827static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100828
Robin Murphy06e393e2016-09-12 17:13:55 +0100829static int arm_smmu_register_legacy_master(struct device *dev,
830 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100831{
Robin Murphy06e393e2016-09-12 17:13:55 +0100832 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100833 struct device_node *np;
834 struct of_phandle_iterator it;
835 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100836 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100837 __be32 pci_sid;
838 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100839
Stephen Boydfecdeef2017-03-01 16:53:19 -0800840 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100841 np = dev_get_dev_node(dev);
842 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
843 of_node_put(np);
844 return -ENODEV;
845 }
846
847 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100848 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
849 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100850 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100851 of_node_put(np);
852 if (err == 0)
853 return -ENODEV;
854 if (err < 0)
855 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100856
Robin Murphyd5b41782016-09-14 15:21:39 +0100857 if (dev_is_pci(dev)) {
858 /* "mmu-masters" assumes Stream ID == Requester ID */
859 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
860 &pci_sid);
861 it.cur = &pci_sid;
862 it.cur_count = 1;
863 }
864
Robin Murphy06e393e2016-09-12 17:13:55 +0100865 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
866 &arm_smmu_ops);
867 if (err)
868 return err;
869
870 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
871 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100872 return -ENOMEM;
873
Robin Murphy06e393e2016-09-12 17:13:55 +0100874 *smmu = dev_get_drvdata(smmu_dev);
875 of_phandle_iterator_args(&it, sids, it.cur_count);
876 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
877 kfree(sids);
878 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100879}
880
881static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
882{
883 int idx;
884
885 do {
886 idx = find_next_zero_bit(map, end, start);
887 if (idx == end)
888 return -ENOSPC;
889 } while (test_and_set_bit(idx, map));
890
891 return idx;
892}
893
894static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
895{
896 clear_bit(idx, map);
897}
898
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700899static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700900{
901 int i, ret = 0;
902
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700903 for (i = 0; i < pwr->num_clocks; ++i) {
904 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700905 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700906 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700907 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700908 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700909 break;
910 }
911 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700912 return ret;
913}
914
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700915static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700916{
917 int i;
918
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700919 for (i = pwr->num_clocks; i; --i)
920 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700921}
922
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700923static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700924{
925 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700926
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700927 for (i = 0; i < pwr->num_clocks; ++i) {
928 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700929 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700930 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700931 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700932 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700933 break;
934 }
935 }
936
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700937 return ret;
938}
Patrick Daly8befb662016-08-17 20:03:28 -0700939
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700940static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
941{
942 int i;
943
944 for (i = pwr->num_clocks; i; --i)
945 clk_disable(pwr->clocks[i - 1]);
946}
947
948static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
949{
950 if (!pwr->bus_client)
951 return 0;
952 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
953}
954
955static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
956{
957 if (!pwr->bus_client)
958 return;
959 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
960}
961
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700962static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
963{
964 struct regulator_bulk_data *consumers;
965 int num_consumers, ret;
966 int i;
967
968 num_consumers = pwr->num_gdscs;
969 consumers = pwr->gdscs;
970 for (i = 0; i < num_consumers; i++) {
971 ret = regulator_enable(consumers[i].consumer);
972 if (ret)
973 goto out;
974 }
975 return 0;
976
977out:
978 i -= 1;
979 for (; i >= 0; i--)
980 regulator_disable(consumers[i].consumer);
981 return ret;
982}
983
Prakash Guptafad87ca2017-05-16 12:13:02 +0530984static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
985{
986 struct regulator_bulk_data *consumers;
987 int i;
988 int num_consumers, ret, r;
989
990 num_consumers = pwr->num_gdscs;
991 consumers = pwr->gdscs;
992 for (i = num_consumers - 1; i >= 0; --i) {
993 ret = regulator_disable_deferred(consumers[i].consumer,
994 pwr->regulator_defer);
995 if (ret != 0)
996 goto err;
997 }
998
999 return 0;
1000
1001err:
1002 pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
1003 for (++i; i < num_consumers; ++i) {
1004 r = regulator_enable(consumers[i].consumer);
1005 if (r != 0)
1006 pr_err("Failed to reename %s: %d\n",
1007 consumers[i].supply, r);
1008 }
1009
1010 return ret;
1011}
1012
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001013/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
1014static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
1015{
1016 int ret = 0;
1017 unsigned long flags;
1018
1019 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
1020 if (pwr->clock_refs_count > 0) {
1021 pwr->clock_refs_count++;
1022 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
1023 return 0;
1024 }
1025
1026 ret = arm_smmu_enable_clocks(pwr);
1027 if (!ret)
1028 pwr->clock_refs_count = 1;
1029
1030 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001031 return ret;
1032}
1033
1034/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001035static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001036{
Patrick Daly8befb662016-08-17 20:03:28 -07001037 unsigned long flags;
1038
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001039 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
1040 if (pwr->clock_refs_count == 0) {
1041 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
1042 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
1043 return;
1044
1045 } else if (pwr->clock_refs_count > 1) {
1046 pwr->clock_refs_count--;
1047 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001048 return;
1049 }
1050
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001051 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001052
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001053 pwr->clock_refs_count = 0;
1054 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001055}
1056
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001057static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001058{
1059 int ret;
1060
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001061 mutex_lock(&pwr->power_lock);
1062 if (pwr->power_count > 0) {
1063 pwr->power_count += 1;
1064 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001065 return 0;
1066 }
1067
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001068 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001069 if (ret)
1070 goto out_unlock;
1071
Patrick Dalyb26f97c2017-08-11 15:24:20 -07001072 ret = arm_smmu_enable_regulators(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001073 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001074 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001075
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001076 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07001077 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001078 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -07001079
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001080 pwr->power_count = 1;
1081 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001082 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001083
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001084out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001085 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001086out_disable_bus:
1087 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001088out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001089 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001090 return ret;
1091}
1092
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001093static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001094{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001095 mutex_lock(&pwr->power_lock);
1096 if (pwr->power_count == 0) {
1097 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
1098 mutex_unlock(&pwr->power_lock);
1099 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001100
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001101 } else if (pwr->power_count > 1) {
1102 pwr->power_count--;
1103 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001104 return;
1105 }
1106
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001107 arm_smmu_unprepare_clocks(pwr);
Prakash Guptafad87ca2017-05-16 12:13:02 +05301108 arm_smmu_disable_regulators(pwr);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001109 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -07001110 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001111 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001112}
1113
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001114static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001115{
1116 int ret;
1117
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001118 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001119 if (ret)
1120 return ret;
1121
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001122 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001123 if (ret)
1124 goto out_disable;
1125
1126 return 0;
1127
1128out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001129 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001130 return ret;
1131}
1132
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001133static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001134{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001135 arm_smmu_power_off_atomic(pwr);
1136 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001137}
1138
1139/*
1140 * Must be used instead of arm_smmu_power_on if it may be called from
1141 * atomic context
1142 */
1143static int arm_smmu_domain_power_on(struct iommu_domain *domain,
1144 struct arm_smmu_device *smmu)
1145{
1146 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1147 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1148
1149 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001150 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001151
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001152 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001153}
1154
1155/*
1156 * Must be used instead of arm_smmu_power_on if it may be called from
1157 * atomic context
1158 */
1159static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1160 struct arm_smmu_device *smmu)
1161{
1162 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1163 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1164
1165 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001166 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001167 return;
1168 }
1169
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001170 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001171}
1172
Will Deacon45ae7cf2013-06-24 18:31:25 +01001173/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001174static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1175 int cbndx)
1176{
1177 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1178 u32 val;
1179
1180 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1181 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1182 !(val & TLBSTATUS_SACTIVE),
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301183 0, TLB_LOOP_TIMEOUT)) {
1184 trace_tlbsync_timeout(smmu->dev, 0);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001185 dev_err(smmu->dev, "TLBSYNC timeout!\n");
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301186 }
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001187}
1188
Will Deacon518f7132014-11-14 17:17:54 +00001189static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001190{
1191 int count = 0;
1192 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1193
1194 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1195 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1196 & sTLBGSTATUS_GSACTIVE) {
1197 cpu_relax();
1198 if (++count == TLB_LOOP_TIMEOUT) {
1199 dev_err_ratelimited(smmu->dev,
1200 "TLB sync timed out -- SMMU may be deadlocked\n");
1201 return;
1202 }
1203 udelay(1);
1204 }
1205}
1206
Will Deacon518f7132014-11-14 17:17:54 +00001207static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001208{
Will Deacon518f7132014-11-14 17:17:54 +00001209 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001210 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001211}
1212
Patrick Daly8befb662016-08-17 20:03:28 -07001213/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001214static void arm_smmu_tlb_inv_context(void *cookie)
1215{
1216 struct arm_smmu_domain *smmu_domain = cookie;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301217 struct device *dev = smmu_domain->dev;
Will Deacon44680ee2014-06-25 11:29:12 +01001218 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1219 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001220 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001221 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -07001222 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301223 ktime_t cur = ktime_get();
1224
1225 trace_tlbi_start(dev, 0);
Will Deacon1463fe42013-07-31 19:21:27 +01001226
Patrick Dalye7069342017-07-11 12:35:55 -07001227 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001228 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001229 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001230 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001231 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001232 } else if (stage1 && use_tlbiall) {
1233 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1234 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1235 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001236 } else {
1237 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001238 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001239 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001240 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001241 }
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301242
1243 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Will Deacon1463fe42013-07-31 19:21:27 +01001244}
1245
Will Deacon518f7132014-11-14 17:17:54 +00001246static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001247 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001248{
1249 struct arm_smmu_domain *smmu_domain = cookie;
1250 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1251 struct arm_smmu_device *smmu = smmu_domain->smmu;
1252 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1253 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001254 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001255
Patrick Dalye7069342017-07-11 12:35:55 -07001256 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001257 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1258 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1259
Robin Murphy7602b872016-04-28 17:12:09 +01001260 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001261 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001262 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001263 do {
1264 writel_relaxed(iova, reg);
1265 iova += granule;
1266 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001267 } else {
1268 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001269 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001270 do {
1271 writeq_relaxed(iova, reg);
1272 iova += granule >> 12;
1273 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001274 }
Patrick Dalye7069342017-07-11 12:35:55 -07001275 } else if (stage1 && use_tlbiall) {
1276 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1277 reg += ARM_SMMU_CB_S1_TLBIALL;
1278 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001279 } else if (smmu->version == ARM_SMMU_V2) {
1280 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1281 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1282 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001283 iova >>= 12;
1284 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001285 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001286 iova += granule >> 12;
1287 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001288 } else {
1289 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001290 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001291 }
1292}
1293
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001294struct arm_smmu_secure_pool_chunk {
1295 void *addr;
1296 size_t size;
1297 struct list_head list;
1298};
1299
1300static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1301 size_t size)
1302{
1303 struct arm_smmu_secure_pool_chunk *it;
1304
1305 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1306 if (it->size == size) {
1307 void *addr = it->addr;
1308
1309 list_del(&it->list);
1310 kfree(it);
1311 return addr;
1312 }
1313 }
1314
1315 return NULL;
1316}
1317
1318static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1319 void *addr, size_t size)
1320{
1321 struct arm_smmu_secure_pool_chunk *chunk;
1322
1323 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1324 if (!chunk)
1325 return -ENOMEM;
1326
1327 chunk->addr = addr;
1328 chunk->size = size;
1329 memset(addr, 0, size);
1330 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1331
1332 return 0;
1333}
1334
1335static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1336{
1337 struct arm_smmu_secure_pool_chunk *it, *i;
1338
1339 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1340 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1341 /* pages will be freed later (after being unassigned) */
Prakash Gupta8e827be2017-10-04 12:37:11 +05301342 list_del(&it->list);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001343 kfree(it);
1344 }
1345}
1346
Patrick Dalyc11d1082016-09-01 15:52:44 -07001347static void *arm_smmu_alloc_pages_exact(void *cookie,
1348 size_t size, gfp_t gfp_mask)
1349{
1350 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001351 void *page;
1352 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001353
Patrick Daly2d600832018-02-11 15:12:55 -08001354 if (!arm_smmu_is_master_side_secure(smmu_domain)) {
1355 struct page *pg;
1356 /* size is expected to be 4K with current configuration */
1357 if (size == PAGE_SIZE) {
1358 pg = list_first_entry_or_null(
1359 &smmu_domain->nonsecure_pool, struct page, lru);
1360 if (pg) {
1361 list_del_init(&pg->lru);
1362 return page_address(pg);
1363 }
1364 }
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001365 return alloc_pages_exact(size, gfp_mask);
Patrick Daly2d600832018-02-11 15:12:55 -08001366 }
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001367
1368 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1369 if (page)
1370 return page;
1371
1372 page = alloc_pages_exact(size, gfp_mask);
1373 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001374 ret = arm_smmu_prepare_pgtable(page, cookie);
1375 if (ret) {
1376 free_pages_exact(page, size);
1377 return NULL;
1378 }
1379 }
1380
1381 return page;
1382}
1383
1384static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1385{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001386 struct arm_smmu_domain *smmu_domain = cookie;
1387
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301388 if (!arm_smmu_is_master_side_secure(smmu_domain)) {
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001389 free_pages_exact(virt, size);
1390 return;
1391 }
1392
1393 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1394 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001395}
1396
Will Deacon518f7132014-11-14 17:17:54 +00001397static struct iommu_gather_ops arm_smmu_gather_ops = {
1398 .tlb_flush_all = arm_smmu_tlb_inv_context,
1399 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1400 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001401 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1402 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001403};
1404
Charan Teja Reddy8e4c3bdc2018-03-02 14:15:21 +05301405static void msm_smmu_tlb_inv_context(void *cookie)
1406{
1407}
1408
1409static void msm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1410 size_t granule, bool leaf,
1411 void *cookie)
1412{
1413}
1414
1415static void msm_smmu_tlb_sync(void *cookie)
1416{
1417}
1418
1419static struct iommu_gather_ops msm_smmu_gather_ops = {
1420 .tlb_flush_all = msm_smmu_tlb_inv_context,
1421 .tlb_add_flush = msm_smmu_tlb_inv_range_nosync,
1422 .tlb_sync = msm_smmu_tlb_sync,
1423 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1424 .free_pages_exact = arm_smmu_free_pages_exact,
1425};
1426
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001427static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1428 dma_addr_t iova, u32 fsr)
1429{
1430 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001431 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyda765c62017-09-11 16:31:07 -07001432 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001433 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001434 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001435
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001436 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyda765c62017-09-11 16:31:07 -07001437 tlb->tlb_flush_all(smmu_domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001438 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001439
Patrick Dalyad441dd2016-09-15 15:50:46 -07001440 if (phys != phys_post_tlbiall) {
1441 dev_err(smmu->dev,
1442 "ATOS results differed across TLBIALL...\n"
1443 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1444 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001445
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001446 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001447}
1448
Will Deacon45ae7cf2013-06-24 18:31:25 +01001449static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1450{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001451 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001452 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001453 unsigned long iova;
1454 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001455 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001456 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1457 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001458 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001459 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001460 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001461 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001462 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001463 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001464 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001465
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001466 static DEFINE_RATELIMIT_STATE(_rs,
1467 DEFAULT_RATELIMIT_INTERVAL,
1468 DEFAULT_RATELIMIT_BURST);
1469
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001470 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001471 if (ret)
1472 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001473
Shalaj Jain04059c52015-03-03 13:34:59 -08001474 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001475 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001476 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1477
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001478 if (!(fsr & FSR_FAULT)) {
1479 ret = IRQ_NONE;
1480 goto out_power_off;
1481 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001482
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001483 if (fatal_asf && (fsr & FSR_ASF)) {
1484 dev_err(smmu->dev,
1485 "Took an address size fault. Refusing to recover.\n");
1486 BUG();
1487 }
1488
Will Deacon45ae7cf2013-06-24 18:31:25 +01001489 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001490 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001491 if (fsr & FSR_TF)
1492 flags |= IOMMU_FAULT_TRANSLATION;
1493 if (fsr & FSR_PF)
1494 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001495 if (fsr & FSR_EF)
1496 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001497 if (fsr & FSR_SS)
1498 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001499
Robin Murphyf9a05f02016-04-13 18:13:01 +01001500 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001501 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001502 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1503 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001504 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1505 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001506 dev_dbg(smmu->dev,
1507 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1508 iova, fsr, fsynr, cfg->cbndx);
1509 dev_dbg(smmu->dev,
1510 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001511 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001512 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001513 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001514 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1515 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001516 if (__ratelimit(&_rs)) {
1517 dev_err(smmu->dev,
1518 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1519 iova, fsr, fsynr, cfg->cbndx);
1520 dev_err(smmu->dev, "FAR = %016lx\n",
1521 (unsigned long)iova);
1522 dev_err(smmu->dev,
1523 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1524 fsr,
1525 (fsr & 0x02) ? "TF " : "",
1526 (fsr & 0x04) ? "AFF " : "",
1527 (fsr & 0x08) ? "PF " : "",
1528 (fsr & 0x10) ? "EF " : "",
1529 (fsr & 0x20) ? "TLBMCF " : "",
1530 (fsr & 0x40) ? "TLBLKF " : "",
1531 (fsr & 0x80) ? "MHF " : "",
1532 (fsr & 0x40000000) ? "SS " : "",
1533 (fsr & 0x80000000) ? "MULTI " : "");
1534 dev_err(smmu->dev,
1535 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001536 if (!phys_soft)
1537 dev_err(smmu->dev,
1538 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1539 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001540 if (phys_atos)
1541 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1542 &phys_atos);
1543 else
1544 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001545 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1546 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001547 ret = IRQ_NONE;
1548 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001549 if (!non_fatal_fault) {
1550 dev_err(smmu->dev,
1551 "Unhandled arm-smmu context fault!\n");
1552 BUG();
1553 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001554 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001555
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001556 /*
1557 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1558 * if stalled. This is required to keep the IOMMU client stalled on
1559 * the outstanding fault. This gives the client a chance to take any
1560 * debug action and then terminate the stalled transaction.
1561 * So, the sequence in case of stall on fault should be:
1562 * 1) Do not clear FSR or write to RESUME here
1563 * 2) Client takes any debug action
1564 * 3) Client terminates the stalled transaction and resumes the IOMMU
1565 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1566 * not before so that the fault remains outstanding. This ensures
1567 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1568 * need to be terminated.
1569 */
1570 if (tmp != -EBUSY) {
1571 /* Clear the faulting FSR */
1572 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001573
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001574 /*
1575 * Barrier required to ensure that the FSR is cleared
1576 * before resuming SMMU operation
1577 */
1578 wmb();
1579
1580 /* Retry or terminate any stalled transactions */
1581 if (fsr & FSR_SS)
1582 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1583 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001584
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001585out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001586 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001587
Patrick Daly5ba28112016-08-30 19:18:52 -07001588 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001589}
1590
1591static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1592{
1593 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1594 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001595 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001596
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001597 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001598 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001599
Will Deacon45ae7cf2013-06-24 18:31:25 +01001600 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1601 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1602 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1603 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1604
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001605 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001606 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001607 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001608 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001609
Will Deacon45ae7cf2013-06-24 18:31:25 +01001610 dev_err_ratelimited(smmu->dev,
1611 "Unexpected global fault, this could be serious\n");
1612 dev_err_ratelimited(smmu->dev,
1613 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1614 gfsr, gfsynr0, gfsynr1, gfsynr2);
1615
1616 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001617 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001618 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001619}
1620
Shiraz Hashimeca8c2e2018-01-15 20:08:38 +05301621static bool arm_smmu_master_attached(struct arm_smmu_device *smmu,
1622 struct iommu_fwspec *fwspec)
1623{
1624 int i, idx;
1625
1626 for_each_cfg_sme(fwspec, i, idx) {
1627 if (smmu->s2crs[idx].attach_count)
1628 return true;
1629 }
1630
1631 return false;
1632}
1633
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301634static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain,
1635 struct io_pgtable_cfg *pgtbl_cfg)
1636{
1637 struct arm_smmu_device *smmu = smmu_domain->smmu;
1638 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1639 int ret = 0;
1640
1641 if ((smmu->version > ARM_SMMU_V1) &&
1642 (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) &&
1643 !arm_smmu_has_secure_vmid(smmu_domain) &&
1644 arm_smmu_is_static_cb(smmu)) {
1645 ret = msm_tz_set_cb_format(smmu->sec_id, cfg->cbndx);
1646 }
1647 return ret;
1648}
1649
Will Deacon518f7132014-11-14 17:17:54 +00001650static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1651 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001652{
Will Deacon44680ee2014-06-25 11:29:12 +01001653 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy6549a1f2017-08-08 14:56:14 +01001654 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
1655 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1656
1657 cb->cfg = cfg;
1658
1659 /* TTBCR */
1660 if (stage1) {
1661 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1662 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
1663 } else {
1664 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1665 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1666 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
1667 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1668 cb->tcr[1] |= TTBCR2_AS;
1669 }
1670 } else {
1671 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1672 }
1673
1674 /* TTBRs */
1675 if (stage1) {
1676 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1677 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1678 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1679 } else {
1680 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1681 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
1682 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1683 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
1684 }
1685 } else {
1686 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1687 }
1688
1689 /* MAIRs (stage-1 only) */
1690 if (stage1) {
1691 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1692 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
1693 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
1694 } else {
1695 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1696 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1697 }
1698 }
1699
1700 cb->attributes = smmu_domain->attributes;
1701}
1702
1703static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
1704{
1705 u32 reg;
1706 bool stage1;
1707 struct arm_smmu_cb *cb = &smmu->cbs[idx];
1708 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001709 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001710
Robin Murphy6549a1f2017-08-08 14:56:14 +01001711 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, idx);
1712
1713 /* Unassigned context banks only need disabling */
1714 if (!cfg) {
1715 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1716 return;
1717 }
1718
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001720 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001721
Robin Murphy6549a1f2017-08-08 14:56:14 +01001722 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +00001723 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001724 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1725 reg = CBA2R_RW64_64BIT;
1726 else
1727 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001728 /* 16-bit VMIDs live in CBA2R */
1729 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001730 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001731
Robin Murphy6549a1f2017-08-08 14:56:14 +01001732 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +00001733 }
1734
Will Deacon45ae7cf2013-06-24 18:31:25 +01001735 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001736 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001737 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001738 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001739
Will Deacon57ca90f2014-02-06 14:59:05 +00001740 /*
1741 * Use the weakest shareability/memory types, so they are
1742 * overridden by the ttbcr/pte.
1743 */
1744 if (stage1) {
1745 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1746 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001747 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1748 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001749 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001750 }
Robin Murphy6549a1f2017-08-08 14:56:14 +01001751 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752
Sunil Gouthamf0b0a2a2017-03-28 16:11:12 +05301753 /*
1754 * TTBCR
1755 * We must write this before the TTBRs, since it determines the
1756 * access behaviour of some fields (in particular, ASID[15:8]).
1757 */
Robin Murphy6549a1f2017-08-08 14:56:14 +01001758 if (stage1 && smmu->version > ARM_SMMU_V1)
1759 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
1760 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Sunil Gouthamf0b0a2a2017-03-28 16:11:12 +05301761
Will Deacon518f7132014-11-14 17:17:54 +00001762 /* TTBRs */
Robin Murphy6549a1f2017-08-08 14:56:14 +01001763 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1764 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1765 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
1766 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001767 } else {
Robin Murphy6549a1f2017-08-08 14:56:14 +01001768 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
1769 if (stage1)
1770 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001771 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772
Will Deacon518f7132014-11-14 17:17:54 +00001773 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001774 if (stage1) {
Robin Murphy6549a1f2017-08-08 14:56:14 +01001775 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
1776 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001777 }
1778
Patrick Dalyad521082018-04-06 18:07:13 -07001779 /* ACTLR (implementation defined) */
Patrick Daly25317e82018-05-07 12:35:29 -07001780 if (cb->has_actlr)
1781 writel_relaxed(cb->actlr, cb_base + ARM_SMMU_CB_ACTLR);
Patrick Dalyad521082018-04-06 18:07:13 -07001782
Will Deacon45ae7cf2013-06-24 18:31:25 +01001783 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001784 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001785
Patrick Daly7f377fe2017-10-06 17:37:10 -07001786 /* Ensure bypass transactions are Non-shareable */
1787 reg |= SCTLR_SHCFG_NSH << SCTLR_SHCFG_SHIFT;
1788
Robin Murphy6549a1f2017-08-08 14:56:14 +01001789 if (cb->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301790 reg &= ~SCTLR_CFCFG;
1791 reg |= SCTLR_HUPCF;
1792 }
Patrick Daly7c29f782018-08-16 15:36:20 -07001793 if (cb->attributes & (1 << DOMAIN_ATTR_NO_CFRE))
1794 reg &= ~SCTLR_CFRE;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301795
Robin Murphy6549a1f2017-08-08 14:56:14 +01001796 if ((!(cb->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1797 !(cb->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001798 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001799 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001800 if (stage1)
1801 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy6549a1f2017-08-08 14:56:14 +01001802 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1803 reg |= SCTLR_E;
1804
Will Deacon25724842013-08-21 13:49:53 +01001805 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001806}
1807
Patrick Dalyc190d932016-08-30 17:23:28 -07001808static int arm_smmu_init_asid(struct iommu_domain *domain,
1809 struct arm_smmu_device *smmu)
1810{
1811 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1812 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1813 bool dynamic = is_dynamic_domain(domain);
1814 int ret;
1815
1816 if (!dynamic) {
1817 cfg->asid = cfg->cbndx + 1;
1818 } else {
1819 mutex_lock(&smmu->idr_mutex);
1820 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1821 smmu->num_context_banks + 2,
1822 MAX_ASID + 1, GFP_KERNEL);
1823
1824 mutex_unlock(&smmu->idr_mutex);
1825 if (ret < 0) {
1826 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1827 ret);
1828 return ret;
1829 }
1830 cfg->asid = ret;
1831 }
1832 return 0;
1833}
1834
1835static void arm_smmu_free_asid(struct iommu_domain *domain)
1836{
1837 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1838 struct arm_smmu_device *smmu = smmu_domain->smmu;
1839 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1840 bool dynamic = is_dynamic_domain(domain);
1841
1842 if (cfg->asid == INVALID_ASID || !dynamic)
1843 return;
1844
1845 mutex_lock(&smmu->idr_mutex);
1846 idr_remove(&smmu->asid_idr, cfg->asid);
1847 mutex_unlock(&smmu->idr_mutex);
1848}
1849
Will Deacon45ae7cf2013-06-24 18:31:25 +01001850static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001851 struct arm_smmu_device *smmu,
1852 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001853{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001854 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001855 unsigned long ias, oas;
1856 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001857 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001858 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001859 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001860 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001861 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001862 bool dynamic;
Patrick Dalyda765c62017-09-11 16:31:07 -07001863 const struct iommu_gather_ops *tlb;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001864
Will Deacon518f7132014-11-14 17:17:54 +00001865 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001866 if (smmu_domain->smmu)
1867 goto out_unlock;
1868
Patrick Dalyc190d932016-08-30 17:23:28 -07001869 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1870 smmu_domain->cfg.asid = INVALID_ASID;
1871
Patrick Dalyc190d932016-08-30 17:23:28 -07001872 dynamic = is_dynamic_domain(domain);
1873 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1874 dev_err(smmu->dev, "dynamic domains not supported\n");
1875 ret = -EPERM;
1876 goto out_unlock;
1877 }
1878
Patrick Dalyaddf1f82018-04-23 14:39:19 -07001879 if (arm_smmu_has_secure_vmid(smmu_domain) &&
1880 arm_smmu_opt_hibernation(smmu)) {
1881 dev_err(smmu->dev,
1882 "Secure usecases not supported with hibernation\n");
1883 ret = -EPERM;
1884 goto out_unlock;
1885 }
1886
Will Deaconc752ce42014-06-25 22:46:31 +01001887 /*
1888 * Mapping the requested stage onto what we support is surprisingly
1889 * complicated, mainly because the spec allows S1+S2 SMMUs without
1890 * support for nested translation. That means we end up with the
1891 * following table:
1892 *
1893 * Requested Supported Actual
1894 * S1 N S1
1895 * S1 S1+S2 S1
1896 * S1 S2 S2
1897 * S1 S1 S1
1898 * N N N
1899 * N S1+S2 S2
1900 * N S2 S2
1901 * N S1 S1
1902 *
1903 * Note that you can't actually request stage-2 mappings.
1904 */
1905 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1906 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1907 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1908 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1909
Robin Murphy7602b872016-04-28 17:12:09 +01001910 /*
1911 * Choosing a suitable context format is even more fiddly. Until we
1912 * grow some way for the caller to express a preference, and/or move
1913 * the decision into the io-pgtable code where it arguably belongs,
1914 * just aim for the closest thing to the rest of the system, and hope
1915 * that the hardware isn't esoteric enough that we can't assume AArch64
1916 * support to be a superset of AArch32 support...
1917 */
1918 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1919 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001920 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1921 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1922 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1923 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1924 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001925 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1926 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1927 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1928 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1929 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1930
1931 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1932 ret = -EINVAL;
1933 goto out_unlock;
1934 }
1935
Will Deaconc752ce42014-06-25 22:46:31 +01001936 switch (smmu_domain->stage) {
1937 case ARM_SMMU_DOMAIN_S1:
1938 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1939 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001940 ias = smmu->va_size;
1941 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001942 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001943 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001944 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1945 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001946 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001947 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001948 ias = min(ias, 32UL);
1949 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001950 } else {
1951 fmt = ARM_V7S;
1952 ias = min(ias, 32UL);
1953 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001954 }
Will Deaconc752ce42014-06-25 22:46:31 +01001955 break;
1956 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001957 /*
1958 * We will likely want to change this if/when KVM gets
1959 * involved.
1960 */
Will Deaconc752ce42014-06-25 22:46:31 +01001961 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001962 cfg->cbar = CBAR_TYPE_S2_TRANS;
1963 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001964 ias = smmu->ipa_size;
1965 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001966 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001967 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001968 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001969 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001970 ias = min(ias, 40UL);
1971 oas = min(oas, 40UL);
1972 }
Will Deaconc752ce42014-06-25 22:46:31 +01001973 break;
1974 default:
1975 ret = -EINVAL;
1976 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001977 }
1978
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001979 if (is_fast)
1980 fmt = ARM_V8L_FAST;
1981
Patrick Dalyce6786f2016-11-09 14:19:23 -08001982 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1983 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001984 if (is_iommu_pt_coherent(smmu_domain))
1985 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Patrick Daly49ccf332017-09-27 15:10:29 -07001986 if ((quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
1987 (smmu->model == QCOM_SMMUV500))
1988 quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001989
Patrick Dalyda765c62017-09-11 16:31:07 -07001990 tlb = &arm_smmu_gather_ops;
Patrick Daly83174c12017-10-26 12:31:15 -07001991 if (smmu->options & ARM_SMMU_OPT_MMU500_ERRATA1)
Patrick Dalyda765c62017-09-11 16:31:07 -07001992 tlb = &qsmmuv500_errata1_smmu_gather_ops;
1993
Charan Teja Reddy8e4c3bdc2018-03-02 14:15:21 +05301994 if (arm_smmu_is_slave_side_secure(smmu_domain))
1995 tlb = &msm_smmu_gather_ops;
1996
Patrick Dalyda688822017-05-17 20:12:48 -07001997 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1998 if (ret < 0)
1999 goto out_unlock;
2000 cfg->cbndx = ret;
2001
Robin Murphyb7862e32016-04-13 18:13:03 +01002002 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01002003 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
2004 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002005 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01002006 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002007 }
2008
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302009 if (arm_smmu_is_slave_side_secure(smmu_domain)) {
2010 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
2011 .quirks = quirks,
2012 .pgsize_bitmap = smmu->pgsize_bitmap,
2013 .arm_msm_secure_cfg = {
2014 .sec_id = smmu->sec_id,
2015 .cbndx = cfg->cbndx,
2016 },
Charan Teja Reddy8e4c3bdc2018-03-02 14:15:21 +05302017 .tlb = tlb,
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302018 .iommu_dev = smmu->dev,
2019 };
2020 fmt = ARM_MSM_SECURE;
2021 } else {
2022 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
2023 .quirks = quirks,
2024 .pgsize_bitmap = smmu->pgsize_bitmap,
2025 .ias = ias,
2026 .oas = oas,
2027 .tlb = tlb,
2028 .iommu_dev = smmu->dev,
2029 };
2030 }
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002031
Will Deacon518f7132014-11-14 17:17:54 +00002032 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08002033 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07002034 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
2035 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002036 if (!pgtbl_ops) {
2037 ret = -ENOMEM;
2038 goto out_clear_smmu;
2039 }
2040
Patrick Dalyc11d1082016-09-01 15:52:44 -07002041 /*
2042 * assign any page table memory that might have been allocated
2043 * during alloc_io_pgtable_ops
2044 */
Patrick Dalye271f212016-10-04 13:24:49 -07002045 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002046 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002047 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002048
Robin Murphyd5466352016-05-09 17:20:09 +01002049 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07002050 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01002051 domain->geometry.aperture_end = (1UL << ias) - 1;
2052 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00002053
Patrick Dalyc190d932016-08-30 17:23:28 -07002054 /* Assign an asid */
2055 ret = arm_smmu_init_asid(domain, smmu);
2056 if (ret)
2057 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00002058
Patrick Dalyc190d932016-08-30 17:23:28 -07002059 if (!dynamic) {
2060 /* Initialise the context bank with our page table cfg */
2061 arm_smmu_init_context_bank(smmu_domain,
Robin Murphy6549a1f2017-08-08 14:56:14 +01002062 &smmu_domain->pgtbl_cfg);
Patrick Dalyad521082018-04-06 18:07:13 -07002063 arm_smmu_arch_init_context_bank(smmu_domain, dev);
Robin Murphy6549a1f2017-08-08 14:56:14 +01002064 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302065 /* for slave side secure, we may have to force the pagetable
2066 * format to V8L.
2067 */
2068 ret = arm_smmu_set_pt_format(smmu_domain,
2069 &smmu_domain->pgtbl_cfg);
2070 if (ret)
2071 goto out_clear_smmu;
Patrick Dalyc190d932016-08-30 17:23:28 -07002072
2073 /*
2074 * Request context fault interrupt. Do this last to avoid the
2075 * handler seeing a half-initialised domain state.
2076 */
2077 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
2078 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08002079 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
2080 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07002081 if (ret < 0) {
2082 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
2083 cfg->irptndx, irq);
2084 cfg->irptndx = INVALID_IRPTNDX;
2085 goto out_clear_smmu;
2086 }
2087 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01002088 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002089 }
Will Deacon518f7132014-11-14 17:17:54 +00002090 mutex_unlock(&smmu_domain->init_mutex);
2091
2092 /* Publish page table ops for map/unmap */
2093 smmu_domain->pgtbl_ops = pgtbl_ops;
Shiraz Hashimeca8c2e2018-01-15 20:08:38 +05302094 if (arm_smmu_is_slave_side_secure(smmu_domain) &&
2095 !arm_smmu_master_attached(smmu, dev->iommu_fwspec))
2096 arm_smmu_restore_sec_cfg(smmu, cfg->cbndx);
2097
Will Deacona9a1b0b2014-05-01 18:05:08 +01002098 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002099
Will Deacon518f7132014-11-14 17:17:54 +00002100out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06002101 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002102 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002103out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00002104 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002105 return ret;
2106}
2107
Patrick Daly77db4f92016-10-14 15:34:10 -07002108static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
2109{
2110 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
2111 smmu_domain->cfg.cbndx = INVALID_CBNDX;
2112 smmu_domain->secure_vmid = VMID_INVAL;
2113}
2114
Will Deacon45ae7cf2013-06-24 18:31:25 +01002115static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
2116{
Joerg Roedel1d672632015-03-26 13:43:10 +01002117 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01002118 struct arm_smmu_device *smmu = smmu_domain->smmu;
2119 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002120 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07002121 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002122 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002123
Robin Murphy7e96c742016-09-14 15:26:46 +01002124 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002125 return;
2126
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002127 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002128 if (ret) {
2129 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
2130 smmu);
2131 return;
2132 }
2133
Patrick Dalyc190d932016-08-30 17:23:28 -07002134 dynamic = is_dynamic_domain(domain);
2135 if (dynamic) {
2136 arm_smmu_free_asid(domain);
2137 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002138 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07002139 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002140 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002141 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002142 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07002143 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07002144 return;
2145 }
2146
Will Deacon518f7132014-11-14 17:17:54 +00002147 /*
2148 * Disable the context bank and free the page tables before freeing
2149 * it.
2150 */
Robin Murphy6549a1f2017-08-08 14:56:14 +01002151 smmu->cbs[cfg->cbndx].cfg = NULL;
2152 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01002153
Will Deacon44680ee2014-06-25 11:29:12 +01002154 if (cfg->irptndx != INVALID_IRPTNDX) {
2155 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08002156 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002157 }
2158
Markus Elfring44830b02015-11-06 18:32:41 +01002159 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07002160 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002161 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002162 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002163 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01002164 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05302165 /* As the nonsecure context bank index is any way set to zero,
2166 * so, directly clearing up the secure cb bitmap.
2167 */
2168 if (arm_smmu_is_slave_side_secure(smmu_domain))
2169 __arm_smmu_free_bitmap(smmu->secure_context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002170
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002171 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07002172 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002173}
2174
Joerg Roedel1d672632015-03-26 13:43:10 +01002175static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002176{
2177 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002178
Patrick Daly09801312016-08-29 17:02:52 -07002179 /* Do not support DOMAIN_DMA for now */
2180 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01002181 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002182 /*
2183 * Allocate the domain and initialise some of its data structures.
2184 * We can't really do anything meaningful until we've added a
2185 * master.
2186 */
2187 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
2188 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01002189 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002190
Robin Murphy7e96c742016-09-14 15:26:46 +01002191 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
2192 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00002193 kfree(smmu_domain);
2194 return NULL;
2195 }
2196
Will Deacon518f7132014-11-14 17:17:54 +00002197 mutex_init(&smmu_domain->init_mutex);
2198 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002199 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
2200 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07002201 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002202 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly2d600832018-02-11 15:12:55 -08002203 INIT_LIST_HEAD(&smmu_domain->nonsecure_pool);
Patrick Daly77db4f92016-10-14 15:34:10 -07002204 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01002205
2206 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002207}
2208
Joerg Roedel1d672632015-03-26 13:43:10 +01002209static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002210{
Joerg Roedel1d672632015-03-26 13:43:10 +01002211 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01002212
2213 /*
2214 * Free the domain resources. We assume that all devices have
2215 * already been detached.
2216 */
Robin Murphy9adb9592016-01-26 18:06:36 +00002217 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002218 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002219 kfree(smmu_domain);
2220}
2221
Robin Murphy468f4942016-09-12 17:13:49 +01002222static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
2223{
2224 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01002225 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01002226
2227 if (smr->valid)
2228 reg |= SMR_VALID;
2229 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2230}
2231
Robin Murphya754fd12016-09-12 17:13:50 +01002232static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
2233{
2234 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
2235 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
2236 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
Patrick Daly7f377fe2017-10-06 17:37:10 -07002237 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT |
2238 S2CR_SHCFG_NSH << S2CR_SHCFG_SHIFT;
Robin Murphya754fd12016-09-12 17:13:50 +01002239
2240 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2241}
2242
2243static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
2244{
2245 arm_smmu_write_s2cr(smmu, idx);
2246 if (smmu->smrs)
2247 arm_smmu_write_smr(smmu, idx);
2248}
2249
Robin Murphy6668f692016-09-12 17:13:54 +01002250static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01002251{
2252 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01002253 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002254
Robin Murphy6668f692016-09-12 17:13:54 +01002255 /* Stream indexing is blissfully easy */
2256 if (!smrs)
2257 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01002258
Robin Murphy6668f692016-09-12 17:13:54 +01002259 /* Validating SMRs is... less so */
2260 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2261 if (!smrs[i].valid) {
2262 /*
2263 * Note the first free entry we come across, which
2264 * we'll claim in the end if nothing else matches.
2265 */
2266 if (free_idx < 0)
2267 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01002268 continue;
2269 }
Robin Murphy6668f692016-09-12 17:13:54 +01002270 /*
2271 * If the new entry is _entirely_ matched by an existing entry,
2272 * then reuse that, with the guarantee that there also cannot
2273 * be any subsequent conflicting entries. In normal use we'd
2274 * expect simply identical entries for this case, but there's
2275 * no harm in accommodating the generalisation.
2276 */
2277 if ((mask & smrs[i].mask) == mask &&
2278 !((id ^ smrs[i].id) & ~smrs[i].mask))
2279 return i;
2280 /*
2281 * If the new entry has any other overlap with an existing one,
2282 * though, then there always exists at least one stream ID
2283 * which would cause a conflict, and we can't allow that risk.
2284 */
2285 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
2286 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002287 }
2288
Robin Murphy6668f692016-09-12 17:13:54 +01002289 return free_idx;
2290}
2291
2292static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
2293{
2294 if (--smmu->s2crs[idx].count)
2295 return false;
2296
2297 smmu->s2crs[idx] = s2cr_init_val;
2298 if (smmu->smrs)
2299 smmu->smrs[idx].valid = false;
2300
2301 return true;
2302}
2303
2304static int arm_smmu_master_alloc_smes(struct device *dev)
2305{
Robin Murphy06e393e2016-09-12 17:13:55 +01002306 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2307 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01002308 struct arm_smmu_device *smmu = cfg->smmu;
2309 struct arm_smmu_smr *smrs = smmu->smrs;
2310 struct iommu_group *group;
2311 int i, idx, ret;
2312
Swathi Sridharfa26bd52018-04-25 18:26:14 -07002313 mutex_lock(&smmu->iommu_group_mutex);
Robin Murphy6668f692016-09-12 17:13:54 +01002314 mutex_lock(&smmu->stream_map_mutex);
2315 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01002316 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002317 u16 sid = fwspec->ids[i];
2318 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
2319
Robin Murphy6668f692016-09-12 17:13:54 +01002320 if (idx != INVALID_SMENDX) {
2321 ret = -EEXIST;
Swathi Sridharfa26bd52018-04-25 18:26:14 -07002322 goto sme_err;
Robin Murphy6668f692016-09-12 17:13:54 +01002323 }
2324
Robin Murphy7e96c742016-09-14 15:26:46 +01002325 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01002326 if (ret < 0)
Swathi Sridharfa26bd52018-04-25 18:26:14 -07002327 goto sme_err;
Robin Murphy6668f692016-09-12 17:13:54 +01002328
2329 idx = ret;
2330 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002331 smrs[idx].id = sid;
2332 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01002333 smrs[idx].valid = true;
2334 }
2335 smmu->s2crs[idx].count++;
2336 cfg->smendx[i] = (s16)idx;
2337 }
Swathi Sridharfa26bd52018-04-25 18:26:14 -07002338 mutex_unlock(&smmu->stream_map_mutex);
Robin Murphy6668f692016-09-12 17:13:54 +01002339
2340 group = iommu_group_get_for_dev(dev);
2341 if (!group)
2342 group = ERR_PTR(-ENOMEM);
2343 if (IS_ERR(group)) {
2344 ret = PTR_ERR(group);
Swathi Sridharfa26bd52018-04-25 18:26:14 -07002345 goto iommu_group_err;
Robin Murphy6668f692016-09-12 17:13:54 +01002346 }
2347 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01002348
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002349 /* It worked! Don't poke the actual hardware until we've attached */
2350 for_each_cfg_sme(fwspec, i, idx)
Robin Murphy6668f692016-09-12 17:13:54 +01002351 smmu->s2crs[idx].group = group;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002352
Swathi Sridharfa26bd52018-04-25 18:26:14 -07002353 mutex_unlock(&smmu->iommu_group_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002354 return 0;
2355
Swathi Sridharfa26bd52018-04-25 18:26:14 -07002356iommu_group_err:
2357 mutex_lock(&smmu->stream_map_mutex);
2358
2359sme_err:
Robin Murphy468f4942016-09-12 17:13:49 +01002360 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01002361 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01002362 cfg->smendx[i] = INVALID_SMENDX;
2363 }
Robin Murphy6668f692016-09-12 17:13:54 +01002364 mutex_unlock(&smmu->stream_map_mutex);
Swathi Sridharfa26bd52018-04-25 18:26:14 -07002365 mutex_unlock(&smmu->iommu_group_mutex);
Robin Murphy6668f692016-09-12 17:13:54 +01002366 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002367}
2368
Robin Murphy06e393e2016-09-12 17:13:55 +01002369static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002370{
Robin Murphy06e393e2016-09-12 17:13:55 +01002371 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
2372 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01002373 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01002374
Robin Murphy6668f692016-09-12 17:13:54 +01002375 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002376 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002377 if (arm_smmu_free_sme(smmu, idx))
2378 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01002379 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002380 }
Robin Murphy6668f692016-09-12 17:13:54 +01002381 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002382}
2383
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002384static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
2385 struct iommu_fwspec *fwspec)
2386{
2387 struct arm_smmu_device *smmu = smmu_domain->smmu;
2388 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2389 int i, idx;
2390 const struct iommu_gather_ops *tlb;
2391
2392 tlb = smmu_domain->pgtbl_cfg.tlb;
2393
2394 mutex_lock(&smmu->stream_map_mutex);
2395 for_each_cfg_sme(fwspec, i, idx) {
2396 WARN_ON(s2cr[idx].attach_count == 0);
2397 s2cr[idx].attach_count -= 1;
2398
2399 if (s2cr[idx].attach_count > 0)
2400 continue;
2401
2402 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2403 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2404 }
2405 mutex_unlock(&smmu->stream_map_mutex);
2406
2407 /* Ensure there are no stale mappings for this context bank */
2408 tlb->tlb_flush_all(smmu_domain);
2409}
2410
Will Deacon45ae7cf2013-06-24 18:31:25 +01002411static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01002412 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002413{
Will Deacon44680ee2014-06-25 11:29:12 +01002414 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002415 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2416 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2417 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002418 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002419
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002420 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002421 for_each_cfg_sme(fwspec, i, idx) {
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002422 if (s2cr[idx].attach_count++ > 0)
Robin Murphy6668f692016-09-12 17:13:54 +01002423 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002424
2425 s2cr[idx].type = type;
2426 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2427 s2cr[idx].cbndx = cbndx;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002428 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002429 }
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002430 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002431
2432 return 0;
2433}
2434
Patrick Daly09801312016-08-29 17:02:52 -07002435static void arm_smmu_detach_dev(struct iommu_domain *domain,
2436 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002437{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002438 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002439 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002440 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly09801312016-08-29 17:02:52 -07002441 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002442 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002443
2444 if (dynamic)
2445 return;
2446
Patrick Daly09801312016-08-29 17:02:52 -07002447 if (!smmu) {
2448 dev_err(dev, "Domain not attached; cannot detach!\n");
2449 return;
2450 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002451
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302452 if (atomic_domain)
2453 arm_smmu_power_on_atomic(smmu->pwr);
2454 else
2455 arm_smmu_power_on(smmu->pwr);
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002456
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302457 arm_smmu_domain_remove_master(smmu_domain, fwspec);
2458 arm_smmu_power_off(smmu->pwr);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002459}
2460
Patrick Dalye271f212016-10-04 13:24:49 -07002461static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002462{
Patrick Dalye271f212016-10-04 13:24:49 -07002463 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002464 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2465 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2466 int source_vmid = VMID_HLOS;
2467 struct arm_smmu_pte_info *pte_info, *temp;
2468
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302469 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -07002470 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002471
Patrick Dalye271f212016-10-04 13:24:49 -07002472 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002473 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2474 PAGE_SIZE, &source_vmid, 1,
2475 dest_vmids, dest_perms, 2);
2476 if (WARN_ON(ret))
2477 break;
2478 }
2479
2480 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2481 entry) {
2482 list_del(&pte_info->entry);
2483 kfree(pte_info);
2484 }
Patrick Dalye271f212016-10-04 13:24:49 -07002485 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002486}
2487
2488static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2489{
2490 int ret;
2491 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002492 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002493 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2494 struct arm_smmu_pte_info *pte_info, *temp;
2495
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302496 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002497 return;
2498
2499 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2500 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2501 PAGE_SIZE, source_vmlist, 2,
2502 &dest_vmids, &dest_perms, 1);
2503 if (WARN_ON(ret))
2504 break;
2505 free_pages_exact(pte_info->virt_addr, pte_info->size);
2506 }
2507
2508 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2509 entry) {
2510 list_del(&pte_info->entry);
2511 kfree(pte_info);
2512 }
2513}
2514
2515static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2516{
2517 struct arm_smmu_domain *smmu_domain = cookie;
2518 struct arm_smmu_pte_info *pte_info;
2519
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302520 if (smmu_domain->slave_side_secure ||
2521 !arm_smmu_has_secure_vmid(smmu_domain)) {
2522 if (smmu_domain->slave_side_secure)
2523 WARN(1, "slave side secure is enforced\n");
2524 else
2525 WARN(1, "Invalid VMID is set !!\n");
2526 return;
2527 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002528
2529 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2530 if (!pte_info)
2531 return;
2532
2533 pte_info->virt_addr = addr;
2534 pte_info->size = size;
2535 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2536}
2537
2538static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2539{
2540 struct arm_smmu_domain *smmu_domain = cookie;
2541 struct arm_smmu_pte_info *pte_info;
2542
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302543 if (smmu_domain->slave_side_secure ||
2544 !arm_smmu_has_secure_vmid(smmu_domain)) {
2545 if (smmu_domain->slave_side_secure)
2546 WARN(1, "slave side secure is enforced\n");
2547 else
2548 WARN(1, "Invalid VMID is set !!\n");
2549 return -EINVAL;
2550 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002551
2552 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2553 if (!pte_info)
2554 return -ENOMEM;
2555 pte_info->virt_addr = addr;
2556 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2557 return 0;
2558}
2559
Patrick Daly2d600832018-02-11 15:12:55 -08002560static void arm_smmu_prealloc_memory(struct arm_smmu_domain *smmu_domain,
Patrick Dalya7414b12018-03-21 14:30:31 -07002561 size_t size, struct list_head *pool)
Patrick Daly2d600832018-02-11 15:12:55 -08002562{
Patrick Daly2d600832018-02-11 15:12:55 -08002563 int i;
Patrick Dalya7414b12018-03-21 14:30:31 -07002564 u32 nr = 0;
Patrick Daly2d600832018-02-11 15:12:55 -08002565 struct page *page;
2566
2567 if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
2568 arm_smmu_has_secure_vmid(smmu_domain))
2569 return;
2570
Patrick Daly2d600832018-02-11 15:12:55 -08002571 /* number of 2nd level pagetable entries */
2572 nr += round_up(size, SZ_1G) >> 30;
2573 /* number of 3rd level pagetabel entries */
2574 nr += round_up(size, SZ_2M) >> 21;
2575
2576 /* Retry later with atomic allocation on error */
2577 for (i = 0; i < nr; i++) {
2578 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
2579 if (!page)
2580 break;
2581 list_add(&page->lru, pool);
2582 }
2583}
2584
Patrick Dalya7414b12018-03-21 14:30:31 -07002585static void arm_smmu_prealloc_memory_sg(struct arm_smmu_domain *smmu_domain,
2586 struct scatterlist *sgl, int nents,
2587 struct list_head *pool)
2588{
2589 int i;
2590 size_t size = 0;
2591 struct scatterlist *sg;
2592
2593 if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
2594 arm_smmu_has_secure_vmid(smmu_domain))
2595 return;
2596
2597 for_each_sg(sgl, sg, nents, i)
2598 size += sg->length;
2599
2600 arm_smmu_prealloc_memory(smmu_domain, size, pool);
2601}
2602
Patrick Daly2d600832018-02-11 15:12:55 -08002603static void arm_smmu_release_prealloc_memory(
2604 struct arm_smmu_domain *smmu_domain, struct list_head *list)
2605{
2606 struct page *page, *tmp;
Patrick Daly2d600832018-02-11 15:12:55 -08002607
2608 list_for_each_entry_safe(page, tmp, list, lru) {
2609 list_del(&page->lru);
2610 __free_pages(page, 0);
Patrick Daly2d600832018-02-11 15:12:55 -08002611 }
2612}
2613
Will Deacon45ae7cf2013-06-24 18:31:25 +01002614static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2615{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002616 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002617 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002618 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002619 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002620 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002621
Robin Murphy06e393e2016-09-12 17:13:55 +01002622 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002623 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2624 return -ENXIO;
2625 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002626
Robin Murphy4f79b142016-10-17 12:06:21 +01002627 /*
2628 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2629 * domains between of_xlate() and add_device() - we have no way to cope
2630 * with that, so until ARM gets converted to rely on groups and default
2631 * domains, just say no (but more politely than by dereferencing NULL).
2632 * This should be at least a WARN_ON once that's sorted.
2633 */
2634 if (!fwspec->iommu_priv)
2635 return -ENODEV;
2636
Robin Murphy06e393e2016-09-12 17:13:55 +01002637 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002638
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002639 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002640 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002641 if (ret)
2642 return ret;
2643
Will Deacon518f7132014-11-14 17:17:54 +00002644 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002645 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002646 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002647 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002648
Patrick Dalyc190d932016-08-30 17:23:28 -07002649 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002650 if (is_dynamic_domain(domain)) {
2651 ret = 0;
2652 goto out_power_off;
2653 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002654
Will Deacon45ae7cf2013-06-24 18:31:25 +01002655 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002656 * Sanity check the domain. We don't support domains across
2657 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002658 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002659 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002660 dev_err(dev,
2661 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002662 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002663 ret = -EINVAL;
2664 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002665 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002666
2667 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002668 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002669
2670out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002671 /*
2672 * Keep an additional vote for non-atomic power until domain is
2673 * detached
2674 */
2675 if (!ret && atomic_domain) {
2676 WARN_ON(arm_smmu_power_on(smmu->pwr));
2677 arm_smmu_power_off_atomic(smmu->pwr);
2678 }
2679
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002680 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002681
Will Deacon45ae7cf2013-06-24 18:31:25 +01002682 return ret;
2683}
2684
Will Deacon45ae7cf2013-06-24 18:31:25 +01002685static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002686 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002687{
Will Deacon518f7132014-11-14 17:17:54 +00002688 int ret;
2689 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002690 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002691 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Patrick Dalya7414b12018-03-21 14:30:31 -07002692 LIST_HEAD(nonsecure_pool);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002693
Will Deacon518f7132014-11-14 17:17:54 +00002694 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002695 return -ENODEV;
2696
Charan Teja Reddy313991e2018-03-12 12:19:31 +05302697 if (arm_smmu_is_slave_side_secure(smmu_domain))
2698 return msm_secure_smmu_map(domain, iova, paddr, size, prot);
2699
Patrick Dalya7414b12018-03-21 14:30:31 -07002700 arm_smmu_prealloc_memory(smmu_domain, size, &nonsecure_pool);
Patrick Dalye271f212016-10-04 13:24:49 -07002701 arm_smmu_secure_domain_lock(smmu_domain);
2702
Will Deacon518f7132014-11-14 17:17:54 +00002703 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya7414b12018-03-21 14:30:31 -07002704 list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
Will Deacon518f7132014-11-14 17:17:54 +00002705 ret = ops->map(ops, iova, paddr, size, prot);
Patrick Dalya7414b12018-03-21 14:30:31 -07002706 list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
Will Deacon518f7132014-11-14 17:17:54 +00002707 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002708
2709 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002710 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002711
Patrick Dalya7414b12018-03-21 14:30:31 -07002712 arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
Will Deacon518f7132014-11-14 17:17:54 +00002713 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002714}
2715
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002716static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2717 dma_addr_t iova)
2718{
2719 uint64_t ret;
2720 unsigned long flags;
2721 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2722 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2723
2724 if (!ops)
2725 return 0;
2726
2727 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2728 ret = ops->iova_to_pte(ops, iova);
2729 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2730 return ret;
2731}
2732
Will Deacon45ae7cf2013-06-24 18:31:25 +01002733static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2734 size_t size)
2735{
Will Deacon518f7132014-11-14 17:17:54 +00002736 size_t ret;
2737 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002738 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002739 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002740
Will Deacon518f7132014-11-14 17:17:54 +00002741 if (!ops)
2742 return 0;
2743
Charan Teja Reddy313991e2018-03-12 12:19:31 +05302744 if (arm_smmu_is_slave_side_secure(smmu_domain))
2745 return msm_secure_smmu_unmap(domain, iova, size);
2746
Patrick Daly8befb662016-08-17 20:03:28 -07002747 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002748 if (ret)
2749 return ret;
2750
Patrick Dalye271f212016-10-04 13:24:49 -07002751 arm_smmu_secure_domain_lock(smmu_domain);
2752
Will Deacon518f7132014-11-14 17:17:54 +00002753 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2754 ret = ops->unmap(ops, iova, size);
2755 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002756
Patrick Daly8befb662016-08-17 20:03:28 -07002757 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002758 /*
2759 * While splitting up block mappings, we might allocate page table
2760 * memory during unmap, so the vmids needs to be assigned to the
2761 * memory here as well.
2762 */
2763 arm_smmu_assign_table(smmu_domain);
2764 /* Also unassign any pages that were free'd during unmap */
2765 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002766 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002767 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002768}
2769
Patrick Daly88d321d2017-02-09 18:02:13 -08002770#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002771static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2772 struct scatterlist *sg, unsigned int nents, int prot)
2773{
2774 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002775 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002776 unsigned long flags;
2777 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2778 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002779 unsigned int idx_start, idx_end;
2780 struct scatterlist *sg_start, *sg_end;
2781 unsigned long __saved_iova_start;
Patrick Daly2d600832018-02-11 15:12:55 -08002782 LIST_HEAD(nonsecure_pool);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002783
2784 if (!ops)
2785 return -ENODEV;
2786
Charan Teja Reddy313991e2018-03-12 12:19:31 +05302787 if (arm_smmu_is_slave_side_secure(smmu_domain))
2788 return msm_secure_smmu_map_sg(domain, iova, sg, nents, prot);
2789
Patrick Dalya7414b12018-03-21 14:30:31 -07002790 arm_smmu_prealloc_memory_sg(smmu_domain, sg, nents, &nonsecure_pool);
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002791 arm_smmu_secure_domain_lock(smmu_domain);
2792
Patrick Daly88d321d2017-02-09 18:02:13 -08002793 __saved_iova_start = iova;
2794 idx_start = idx_end = 0;
2795 sg_start = sg_end = sg;
2796 while (idx_end < nents) {
2797 batch_size = sg_end->length;
2798 sg_end = sg_next(sg_end);
2799 idx_end++;
2800 while ((idx_end < nents) &&
2801 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002802
Patrick Daly88d321d2017-02-09 18:02:13 -08002803 batch_size += sg_end->length;
2804 sg_end = sg_next(sg_end);
2805 idx_end++;
2806 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002807
Patrick Daly88d321d2017-02-09 18:02:13 -08002808 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Daly2d600832018-02-11 15:12:55 -08002809 list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
Patrick Daly88d321d2017-02-09 18:02:13 -08002810 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2811 prot, &size);
Patrick Daly2d600832018-02-11 15:12:55 -08002812 list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
Patrick Daly88d321d2017-02-09 18:02:13 -08002813 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2814 /* Returns 0 on error */
2815 if (!ret) {
2816 size_to_unmap = iova + size - __saved_iova_start;
2817 goto out;
2818 }
2819
2820 iova += batch_size;
2821 idx_start = idx_end;
2822 sg_start = sg_end;
2823 }
2824
2825out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002826 arm_smmu_assign_table(smmu_domain);
2827
Patrick Daly88d321d2017-02-09 18:02:13 -08002828 if (size_to_unmap) {
2829 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2830 iova = __saved_iova_start;
2831 }
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002832 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly2d600832018-02-11 15:12:55 -08002833 arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
Patrick Daly88d321d2017-02-09 18:02:13 -08002834 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002835}
2836
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002837static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002838 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002839{
Joerg Roedel1d672632015-03-26 13:43:10 +01002840 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002841 struct arm_smmu_device *smmu = smmu_domain->smmu;
2842 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2843 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2844 struct device *dev = smmu->dev;
2845 void __iomem *cb_base;
2846 u32 tmp;
2847 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002848 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002849
2850 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2851
Robin Murphy661d9622015-05-27 17:09:34 +01002852 /* ATS1 registers can only be written atomically */
2853 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002854 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002855 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2856 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002857 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002858
2859 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2860 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002861 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002862 dev_err(dev,
2863 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2864 &iova, &phys);
2865 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002866 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002867 }
2868
Robin Murphyf9a05f02016-04-13 18:13:01 +01002869 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002870 if (phys & CB_PAR_F) {
2871 dev_err(dev, "translation fault!\n");
2872 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002873 phys = 0;
2874 } else {
2875 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002876 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002877
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002878 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002879}
2880
Will Deacon45ae7cf2013-06-24 18:31:25 +01002881static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002882 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002883{
Will Deacon518f7132014-11-14 17:17:54 +00002884 phys_addr_t ret;
2885 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002886 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002887 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002888
Will Deacon518f7132014-11-14 17:17:54 +00002889 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002890 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002891
Will Deacon518f7132014-11-14 17:17:54 +00002892 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002893 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002894 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002895
Will Deacon518f7132014-11-14 17:17:54 +00002896 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002897}
2898
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002899/*
2900 * This function can sleep, and cannot be called from atomic context. Will
2901 * power on register block if required. This restriction does not apply to the
2902 * original iova_to_phys() op.
2903 */
2904static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2905 dma_addr_t iova)
2906{
2907 phys_addr_t ret = 0;
2908 unsigned long flags;
2909 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly62ba1922017-08-30 16:47:18 -07002910 struct arm_smmu_device *smmu = smmu_domain->smmu;
2911
2912 if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS)
2913 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002914
Patrick Dalyad441dd2016-09-15 15:50:46 -07002915 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002916 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2917 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002918 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002919 return ret;
2920 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002921
2922 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2923 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2924 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002925 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002926
2927 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2928
2929 return ret;
2930}
2931
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002932static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002933{
Will Deacond0948942014-06-24 17:30:10 +01002934 switch (cap) {
2935 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002936 /*
2937 * Return true here as the SMMU can always send out coherent
2938 * requests.
2939 */
2940 return true;
Will Deacond0948942014-06-24 17:30:10 +01002941 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002942 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002943 case IOMMU_CAP_NOEXEC:
2944 return true;
Will Deacond0948942014-06-24 17:30:10 +01002945 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002946 return false;
Will Deacond0948942014-06-24 17:30:10 +01002947 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002948}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002949
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302950#ifdef CONFIG_MSM_TZ_SMMU
2951static struct arm_smmu_device *arm_smmu_get_by_addr(void __iomem *addr)
2952{
2953 struct arm_smmu_device *smmu;
2954 unsigned long flags;
2955
2956 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2957 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2958 unsigned long base = (unsigned long)smmu->base;
2959 unsigned long mask = ~(smmu->size - 1);
2960
2961 if ((base & mask) == ((unsigned long)addr & mask)) {
2962 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2963 return smmu;
2964 }
2965 }
2966 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2967 return NULL;
2968}
2969
2970bool arm_smmu_skip_write(void __iomem *addr)
2971{
2972 struct arm_smmu_device *smmu;
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05302973 int cb;
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302974
2975 smmu = arm_smmu_get_by_addr(addr);
Shiraz Hashima28a4792018-01-13 00:39:52 +05302976
2977 /* Skip write if smmu not available by now */
2978 if (!smmu)
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302979 return true;
Shiraz Hashima28a4792018-01-13 00:39:52 +05302980
Charan Teja Reddy65ff5e42018-02-19 15:32:28 +05302981 if (!arm_smmu_is_static_cb(smmu))
2982 return false;
2983
Shiraz Hashima28a4792018-01-13 00:39:52 +05302984 /* Do not write to global space */
2985 if (((unsigned long)addr & (smmu->size - 1)) < (smmu->size >> 1))
2986 return true;
2987
2988 /* Finally skip writing to secure CB */
2989 cb = ((unsigned long)addr & ((smmu->size >> 1) - 1)) >> PAGE_SHIFT;
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05302990 if (test_bit(cb, smmu->secure_context_map))
2991 return true;
Shiraz Hashima28a4792018-01-13 00:39:52 +05302992
2993 return false;
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302994}
Charan Teja Reddy313991e2018-03-12 12:19:31 +05302995
2996static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
2997 phys_addr_t paddr, size_t size, int prot)
2998{
2999 size_t ret;
3000 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3001 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3002
3003 ret = ops->map(ops, iova, paddr, size, prot);
3004
3005 return ret;
3006}
3007
3008static size_t msm_secure_smmu_unmap(struct iommu_domain *domain,
3009 unsigned long iova,
3010 size_t size)
3011{
3012 size_t ret;
3013 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3014 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3015
3016 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
3017 if (ret)
3018 return ret;
3019
3020 ret = ops->unmap(ops, iova, size);
3021
3022 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
3023
3024 return ret;
3025}
3026
3027static size_t msm_secure_smmu_map_sg(struct iommu_domain *domain,
3028 unsigned long iova,
3029 struct scatterlist *sg,
3030 unsigned int nents, int prot)
3031{
3032 int ret;
3033 size_t size;
3034 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3035 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3036
3037 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
3038
3039 if (!ret)
3040 msm_secure_smmu_unmap(domain, iova, size);
3041
3042 return ret;
3043}
3044
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05303045#endif
3046
Patrick Daly8e3371a2017-02-13 22:14:53 -08003047static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
3048{
3049 struct arm_smmu_device *smmu;
3050 unsigned long flags;
3051
3052 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
3053 list_for_each_entry(smmu, &arm_smmu_devices, list) {
3054 if (smmu->dev->of_node == np) {
3055 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
3056 return smmu;
3057 }
3058 }
3059 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
3060 return NULL;
3061}
3062
Robin Murphy7e96c742016-09-14 15:26:46 +01003063static int arm_smmu_match_node(struct device *dev, void *data)
3064{
3065 return dev->of_node == data;
3066}
3067
3068static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
3069{
3070 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
3071 np, arm_smmu_match_node);
3072 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08003073 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01003074}
3075
Will Deacon03edb222015-01-19 14:27:33 +00003076static int arm_smmu_add_device(struct device *dev)
3077{
Robin Murphy06e393e2016-09-12 17:13:55 +01003078 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01003079 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01003080 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01003081 int i, ret;
3082
Robin Murphy7e96c742016-09-14 15:26:46 +01003083 if (using_legacy_binding) {
3084 ret = arm_smmu_register_legacy_master(dev, &smmu);
3085 fwspec = dev->iommu_fwspec;
3086 if (ret)
3087 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00003088 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01003089 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
3090 if (!smmu)
3091 return -ENODEV;
3092 } else {
3093 return -ENODEV;
3094 }
Robin Murphyd5b41782016-09-14 15:21:39 +01003095
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003096 ret = arm_smmu_power_on(smmu->pwr);
3097 if (ret)
3098 goto out_free;
3099
Robin Murphyd5b41782016-09-14 15:21:39 +01003100 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01003101 for (i = 0; i < fwspec->num_ids; i++) {
3102 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01003103 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01003104
Robin Murphy06e393e2016-09-12 17:13:55 +01003105 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01003106 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01003107 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003108 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01003109 }
Robin Murphy7e96c742016-09-14 15:26:46 +01003110 if (mask & ~smmu->smr_mask_mask) {
3111 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
3112 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003113 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01003114 }
Robin Murphyd5b41782016-09-14 15:21:39 +01003115 }
Will Deacon03edb222015-01-19 14:27:33 +00003116
Robin Murphy06e393e2016-09-12 17:13:55 +01003117 ret = -ENOMEM;
3118 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
3119 GFP_KERNEL);
3120 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003121 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01003122
3123 cfg->smmu = smmu;
3124 fwspec->iommu_priv = cfg;
3125 while (i--)
3126 cfg->smendx[i] = INVALID_SMENDX;
3127
Robin Murphy6668f692016-09-12 17:13:54 +01003128 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01003129 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003130 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01003131
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003132 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01003133 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01003134
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003135out_pwr_off:
3136 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01003137out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01003138 if (fwspec)
3139 kfree(fwspec->iommu_priv);
3140 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01003141 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00003142}
3143
Will Deacon45ae7cf2013-06-24 18:31:25 +01003144static void arm_smmu_remove_device(struct device *dev)
3145{
Robin Murphy06e393e2016-09-12 17:13:55 +01003146 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003147 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01003148
Robin Murphy06e393e2016-09-12 17:13:55 +01003149 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01003150 return;
Robin Murphya754fd12016-09-12 17:13:50 +01003151
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003152 smmu = fwspec_smmu(fwspec);
3153 if (arm_smmu_power_on(smmu->pwr)) {
3154 WARN_ON(1);
3155 return;
3156 }
3157
Robin Murphy06e393e2016-09-12 17:13:55 +01003158 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01003159 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01003160 kfree(fwspec->iommu_priv);
3161 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003162 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003163}
3164
Joerg Roedelaf659932015-10-21 23:51:41 +02003165static struct iommu_group *arm_smmu_device_group(struct device *dev)
3166{
Robin Murphy06e393e2016-09-12 17:13:55 +01003167 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
3168 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01003169 struct iommu_group *group = NULL;
3170 int i, idx;
3171
Robin Murphy06e393e2016-09-12 17:13:55 +01003172 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01003173 if (group && smmu->s2crs[idx].group &&
3174 group != smmu->s2crs[idx].group)
3175 return ERR_PTR(-EINVAL);
3176
3177 group = smmu->s2crs[idx].group;
3178 }
3179
Patrick Daly03330cc2017-08-11 14:56:38 -07003180 if (!group) {
3181 if (dev_is_pci(dev))
3182 group = pci_device_group(dev);
3183 else
3184 group = generic_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02003185
Patrick Daly03330cc2017-08-11 14:56:38 -07003186 if (IS_ERR(group))
3187 return NULL;
3188 }
3189
3190 if (arm_smmu_arch_device_group(dev, group)) {
3191 iommu_group_put(group);
3192 return ERR_PTR(-EINVAL);
3193 }
Joerg Roedelaf659932015-10-21 23:51:41 +02003194
Joerg Roedelaf659932015-10-21 23:51:41 +02003195 return group;
3196}
3197
Will Deaconc752ce42014-06-25 22:46:31 +01003198static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
3199 enum iommu_attr attr, void *data)
3200{
Joerg Roedel1d672632015-03-26 13:43:10 +01003201 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003202 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01003203
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003204 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01003205 switch (attr) {
3206 case DOMAIN_ATTR_NESTING:
3207 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003208 ret = 0;
3209 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08003210 case DOMAIN_ATTR_PT_BASE_ADDR:
3211 *((phys_addr_t *)data) =
3212 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003213 ret = 0;
3214 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003215 case DOMAIN_ATTR_CONTEXT_BANK:
3216 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003217 if (smmu_domain->smmu == NULL) {
3218 ret = -ENODEV;
3219 break;
3220 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003221 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
3222 ret = 0;
3223 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003224 case DOMAIN_ATTR_TTBR0: {
3225 u64 val;
3226 struct arm_smmu_device *smmu = smmu_domain->smmu;
3227 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003228 if (smmu == NULL) {
3229 ret = -ENODEV;
3230 break;
3231 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003232 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
3233 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
3234 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
3235 << (TTBRn_ASID_SHIFT);
3236 *((u64 *)data) = val;
3237 ret = 0;
3238 break;
3239 }
3240 case DOMAIN_ATTR_CONTEXTIDR:
3241 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003242 if (smmu_domain->smmu == NULL) {
3243 ret = -ENODEV;
3244 break;
3245 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003246 *((u32 *)data) = smmu_domain->cfg.procid;
3247 ret = 0;
3248 break;
3249 case DOMAIN_ATTR_PROCID:
3250 *((u32 *)data) = smmu_domain->cfg.procid;
3251 ret = 0;
3252 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07003253 case DOMAIN_ATTR_DYNAMIC:
3254 *((int *)data) = !!(smmu_domain->attributes
3255 & (1 << DOMAIN_ATTR_DYNAMIC));
3256 ret = 0;
3257 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07003258 case DOMAIN_ATTR_NON_FATAL_FAULTS:
3259 *((int *)data) = !!(smmu_domain->attributes
3260 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
3261 ret = 0;
3262 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07003263 case DOMAIN_ATTR_S1_BYPASS:
3264 *((int *)data) = !!(smmu_domain->attributes
3265 & (1 << DOMAIN_ATTR_S1_BYPASS));
3266 ret = 0;
3267 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07003268 case DOMAIN_ATTR_SECURE_VMID:
3269 *((int *)data) = smmu_domain->secure_vmid;
3270 ret = 0;
3271 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08003272 case DOMAIN_ATTR_PGTBL_INFO: {
3273 struct iommu_pgtbl_info *info = data;
3274
3275 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
3276 ret = -ENODEV;
3277 break;
3278 }
3279 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
3280 ret = 0;
3281 break;
3282 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003283 case DOMAIN_ATTR_FAST:
3284 *((int *)data) = !!(smmu_domain->attributes
3285 & (1 << DOMAIN_ATTR_FAST));
3286 ret = 0;
3287 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003288 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3289 *((int *)data) = !!(smmu_domain->attributes
3290 & (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
3291 ret = 0;
3292 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08003293 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3294 *((int *)data) = !!(smmu_domain->attributes &
3295 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
3296 ret = 0;
3297 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003298 case DOMAIN_ATTR_EARLY_MAP:
3299 *((int *)data) = !!(smmu_domain->attributes
3300 & (1 << DOMAIN_ATTR_EARLY_MAP));
3301 ret = 0;
3302 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07003303 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003304 if (!smmu_domain->smmu) {
3305 ret = -ENODEV;
3306 break;
3307 }
Liam Mark53cf2342016-12-20 11:36:07 -08003308 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
3309 ret = 0;
3310 break;
3311 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
3312 *((int *)data) = !!(smmu_domain->attributes
3313 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07003314 ret = 0;
3315 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303316 case DOMAIN_ATTR_CB_STALL_DISABLE:
3317 *((int *)data) = !!(smmu_domain->attributes
3318 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
3319 ret = 0;
3320 break;
Patrick Daly7c29f782018-08-16 15:36:20 -07003321 case DOMAIN_ATTR_NO_CFRE:
3322 *((int *)data) = !!(smmu_domain->attributes
3323 & (1 << DOMAIN_ATTR_NO_CFRE));
3324 ret = 0;
3325 break;
Patrick Daly83174c12017-10-26 12:31:15 -07003326 case DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN:
Patrick Daly23301482017-10-12 16:18:25 -07003327 *((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
3328 ret = 0;
3329 break;
Prakash Guptac2e909a2018-03-29 11:23:06 +05303330 case DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE:
3331 *((int *)data) = !!(smmu_domain->attributes
3332 & (1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE));
3333 ret = 0;
3334 break;
3335
Will Deaconc752ce42014-06-25 22:46:31 +01003336 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003337 ret = -ENODEV;
3338 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003339 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003340 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003341 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003342}
3343
3344static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
3345 enum iommu_attr attr, void *data)
3346{
Will Deacon518f7132014-11-14 17:17:54 +00003347 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01003348 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01003349
Will Deacon518f7132014-11-14 17:17:54 +00003350 mutex_lock(&smmu_domain->init_mutex);
3351
Will Deaconc752ce42014-06-25 22:46:31 +01003352 switch (attr) {
3353 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00003354 if (smmu_domain->smmu) {
3355 ret = -EPERM;
3356 goto out_unlock;
3357 }
3358
Will Deaconc752ce42014-06-25 22:46:31 +01003359 if (*(int *)data)
3360 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
3361 else
3362 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
3363
Will Deacon518f7132014-11-14 17:17:54 +00003364 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003365 case DOMAIN_ATTR_PROCID:
3366 if (smmu_domain->smmu != NULL) {
3367 dev_err(smmu_domain->smmu->dev,
3368 "cannot change procid attribute while attached\n");
3369 ret = -EBUSY;
3370 break;
3371 }
3372 smmu_domain->cfg.procid = *((u32 *)data);
3373 ret = 0;
3374 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07003375 case DOMAIN_ATTR_DYNAMIC: {
3376 int dynamic = *((int *)data);
3377
3378 if (smmu_domain->smmu != NULL) {
3379 dev_err(smmu_domain->smmu->dev,
3380 "cannot change dynamic attribute while attached\n");
3381 ret = -EBUSY;
3382 break;
3383 }
3384
3385 if (dynamic)
3386 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
3387 else
3388 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
3389 ret = 0;
3390 break;
3391 }
3392 case DOMAIN_ATTR_CONTEXT_BANK:
3393 /* context bank can't be set while attached */
3394 if (smmu_domain->smmu != NULL) {
3395 ret = -EBUSY;
3396 break;
3397 }
3398 /* ... and it can only be set for dynamic contexts. */
3399 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
3400 ret = -EINVAL;
3401 break;
3402 }
3403
3404 /* this will be validated during attach */
3405 smmu_domain->cfg.cbndx = *((unsigned int *)data);
3406 ret = 0;
3407 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07003408 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
3409 u32 non_fatal_faults = *((int *)data);
3410
3411 if (non_fatal_faults)
3412 smmu_domain->attributes |=
3413 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
3414 else
3415 smmu_domain->attributes &=
3416 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
3417 ret = 0;
3418 break;
3419 }
Patrick Dalye62d3362016-03-15 18:58:28 -07003420 case DOMAIN_ATTR_S1_BYPASS: {
3421 int bypass = *((int *)data);
3422
3423 /* bypass can't be changed while attached */
3424 if (smmu_domain->smmu != NULL) {
3425 ret = -EBUSY;
3426 break;
3427 }
3428 if (bypass)
3429 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
3430 else
3431 smmu_domain->attributes &=
3432 ~(1 << DOMAIN_ATTR_S1_BYPASS);
3433
3434 ret = 0;
3435 break;
3436 }
Patrick Daly8befb662016-08-17 20:03:28 -07003437 case DOMAIN_ATTR_ATOMIC:
3438 {
3439 int atomic_ctx = *((int *)data);
3440
3441 /* can't be changed while attached */
3442 if (smmu_domain->smmu != NULL) {
3443 ret = -EBUSY;
3444 break;
3445 }
3446 if (atomic_ctx)
3447 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
3448 else
3449 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
3450 break;
3451 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07003452 case DOMAIN_ATTR_SECURE_VMID:
3453 if (smmu_domain->secure_vmid != VMID_INVAL) {
3454 ret = -ENODEV;
3455 WARN(1, "secure vmid already set!");
3456 break;
3457 }
3458 smmu_domain->secure_vmid = *((int *)data);
3459 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003460 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3461 if (*((int *)data))
3462 smmu_domain->attributes |=
3463 1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
3464 ret = 0;
3465 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003466 /*
3467 * fast_smmu_unmap_page() and fast_smmu_alloc_iova() both
3468 * expect that the bus/clock/regulator are already on. Thus also
3469 * force DOMAIN_ATTR_ATOMIC to bet set.
3470 */
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003471 case DOMAIN_ATTR_FAST:
Patrick Daly0df84ac2017-10-11 17:32:41 -07003472 {
3473 int fast = *((int *)data);
3474
3475 if (fast) {
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003476 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003477 smmu_domain->attributes |= 1 << DOMAIN_ATTR_ATOMIC;
3478 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003479 ret = 0;
3480 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003481 }
Patrick Dalyce6786f2016-11-09 14:19:23 -08003482 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3483 /* can't be changed while attached */
3484 if (smmu_domain->smmu != NULL) {
3485 ret = -EBUSY;
3486 break;
3487 }
3488 if (*((int *)data))
3489 smmu_domain->attributes |=
3490 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
3491 ret = 0;
3492 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003493 case DOMAIN_ATTR_EARLY_MAP: {
3494 int early_map = *((int *)data);
3495
3496 ret = 0;
3497 if (early_map) {
3498 smmu_domain->attributes |=
3499 1 << DOMAIN_ATTR_EARLY_MAP;
3500 } else {
3501 if (smmu_domain->smmu)
3502 ret = arm_smmu_enable_s1_translations(
3503 smmu_domain);
3504
3505 if (!ret)
3506 smmu_domain->attributes &=
3507 ~(1 << DOMAIN_ATTR_EARLY_MAP);
3508 }
3509 break;
3510 }
Liam Mark53cf2342016-12-20 11:36:07 -08003511 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
3512 int force_coherent = *((int *)data);
3513
3514 if (smmu_domain->smmu != NULL) {
3515 dev_err(smmu_domain->smmu->dev,
3516 "cannot change force coherent attribute while attached\n");
3517 ret = -EBUSY;
3518 break;
3519 }
3520
3521 if (force_coherent)
3522 smmu_domain->attributes |=
3523 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
3524 else
3525 smmu_domain->attributes &=
3526 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
3527
3528 ret = 0;
3529 break;
3530 }
3531
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303532 case DOMAIN_ATTR_CB_STALL_DISABLE:
3533 if (*((int *)data))
3534 smmu_domain->attributes |=
3535 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
3536 ret = 0;
3537 break;
Prakash Guptac2e909a2018-03-29 11:23:06 +05303538
3539 case DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE: {
3540 int force_iova_guard_page = *((int *)data);
3541
3542 if (smmu_domain->smmu != NULL) {
3543 dev_err(smmu_domain->smmu->dev,
3544 "cannot change force guard page attribute while attached\n");
3545 ret = -EBUSY;
3546 break;
3547 }
3548
3549 if (force_iova_guard_page)
3550 smmu_domain->attributes |=
3551 1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE;
3552 else
3553 smmu_domain->attributes &=
3554 ~(1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE);
3555
3556 ret = 0;
3557 break;
3558 }
3559
Patrick Daly7c29f782018-08-16 15:36:20 -07003560 case DOMAIN_ATTR_NO_CFRE:
3561 if (*((int *)data))
3562 smmu_domain->attributes |=
3563 1 << DOMAIN_ATTR_NO_CFRE;
3564 ret = 0;
3565 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003566 default:
Will Deacon518f7132014-11-14 17:17:54 +00003567 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01003568 }
Will Deacon518f7132014-11-14 17:17:54 +00003569
3570out_unlock:
3571 mutex_unlock(&smmu_domain->init_mutex);
3572 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003573}
3574
Robin Murphy7e96c742016-09-14 15:26:46 +01003575static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
3576{
3577 u32 fwid = 0;
3578
3579 if (args->args_count > 0)
3580 fwid |= (u16)args->args[0];
3581
3582 if (args->args_count > 1)
3583 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
3584
3585 return iommu_fwspec_add_ids(dev, &fwid, 1);
3586}
3587
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003588static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
3589{
3590 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3591 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy6549a1f2017-08-08 14:56:14 +01003592 struct arm_smmu_cb *cb = &smmu->cbs[cfg->cbndx];
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003593 int ret;
3594
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003595 ret = arm_smmu_power_on(smmu->pwr);
3596 if (ret)
3597 return ret;
3598
Robin Murphy6549a1f2017-08-08 14:56:14 +01003599 cb->attributes &= ~(1 << DOMAIN_ATTR_EARLY_MAP);
3600 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003601
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003602 arm_smmu_power_off(smmu->pwr);
3603 return ret;
3604}
3605
Liam Mark3ba41cf2016-12-09 14:39:04 -08003606static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
3607 dma_addr_t iova)
3608{
3609 bool ret;
3610 unsigned long flags;
3611 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3612 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3613
3614 if (!ops)
3615 return false;
3616
3617 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3618 ret = ops->is_iova_coherent(ops, iova);
3619 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3620 return ret;
3621}
3622
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003623static void arm_smmu_trigger_fault(struct iommu_domain *domain,
3624 unsigned long flags)
3625{
3626 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3627 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3628 struct arm_smmu_device *smmu;
3629 void __iomem *cb_base;
3630
3631 if (!smmu_domain->smmu) {
3632 pr_err("Can't trigger faults on non-attached domains\n");
3633 return;
3634 }
3635
3636 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003637 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003638 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003639
3640 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3641 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
3642 flags, cfg->cbndx);
3643 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07003644 /* give the interrupt time to fire... */
3645 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003646
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003647 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003648}
3649
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003650static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3651{
Patrick Dalyda765c62017-09-11 16:31:07 -07003652 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3653 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
3654
3655 tlb->tlb_flush_all(smmu_domain);
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003656}
3657
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003658static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3659{
3660 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3661
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003662 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003663}
3664
3665static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3666{
3667 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3668
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003669 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003670}
3671
Will Deacon518f7132014-11-14 17:17:54 +00003672static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003673 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003674 .domain_alloc = arm_smmu_domain_alloc,
3675 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003676 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003677 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003678 .map = arm_smmu_map,
3679 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003680 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003681 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003682 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003683 .add_device = arm_smmu_add_device,
3684 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003685 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003686 .domain_get_attr = arm_smmu_domain_get_attr,
3687 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003688 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003689 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003690 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003691 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003692 .enable_config_clocks = arm_smmu_enable_config_clocks,
3693 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003694 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003695 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003696};
3697
Patrick Dalyad441dd2016-09-15 15:50:46 -07003698#define IMPL_DEF1_MICRO_MMU_CTRL 0
3699#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3700#define MICRO_MMU_CTRL_IDLE (1 << 3)
3701
3702/* Definitions for implementation-defined registers */
3703#define ACTLR_QCOM_OSH_SHIFT 28
3704#define ACTLR_QCOM_OSH 1
3705
3706#define ACTLR_QCOM_ISH_SHIFT 29
3707#define ACTLR_QCOM_ISH 1
3708
3709#define ACTLR_QCOM_NSH_SHIFT 30
3710#define ACTLR_QCOM_NSH 1
3711
3712static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003713{
3714 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003715 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003716
3717 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3718 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3719 0, 30000)) {
3720 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3721 return -EBUSY;
3722 }
3723
3724 return 0;
3725}
3726
Patrick Dalyad441dd2016-09-15 15:50:46 -07003727static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003728{
3729 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3730 u32 reg;
3731
3732 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3733 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303734
3735 if (arm_smmu_is_static_cb(smmu)) {
3736 phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
3737 smmu->phys_addr;
3738
3739 if (scm_io_write(impl_def1_base_phys +
3740 IMPL_DEF1_MICRO_MMU_CTRL, reg)) {
3741 dev_err(smmu->dev,
3742 "scm_io_write fail. SMMU might not be halted");
3743 return -EINVAL;
3744 }
3745 } else {
3746 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3747 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003748
Patrick Dalyad441dd2016-09-15 15:50:46 -07003749 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003750}
3751
Patrick Dalyad441dd2016-09-15 15:50:46 -07003752static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003753{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003754 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003755}
3756
Patrick Dalyad441dd2016-09-15 15:50:46 -07003757static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003758{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003759 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003760}
3761
Patrick Dalyad441dd2016-09-15 15:50:46 -07003762static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003763{
3764 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3765 u32 reg;
3766
3767 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3768 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303769
3770 if (arm_smmu_is_static_cb(smmu)) {
3771 phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
3772 smmu->phys_addr;
3773
3774 if (scm_io_write(impl_def1_base_phys +
3775 IMPL_DEF1_MICRO_MMU_CTRL, reg))
3776 dev_err(smmu->dev,
3777 "scm_io_write fail. SMMU might not be resumed");
3778 } else {
3779 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3780 }
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003781}
3782
Patrick Dalyad441dd2016-09-15 15:50:46 -07003783static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003784{
3785 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003786 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003787 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003788 /*
3789 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3790 * to prevent table walks with an inconsistent state.
3791 */
3792 for (i = 0; i < smmu->num_context_banks; ++i) {
Patrick Dalyad521082018-04-06 18:07:13 -07003793 struct arm_smmu_cb *cb = &smmu->cbs[i];
3794
Patrick Dalyad441dd2016-09-15 15:50:46 -07003795 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3796 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3797 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
Patrick Dalyad521082018-04-06 18:07:13 -07003798 cb->actlr = val;
Patrick Daly25317e82018-05-07 12:35:29 -07003799 cb->has_actlr = true;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003800 }
3801
3802 /* Program implementation defined registers */
3803 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003804 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3805 writel_relaxed(regs[i].value,
3806 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003807 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003808}
3809
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003810static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3811 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003812{
3813 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3814 struct arm_smmu_device *smmu = smmu_domain->smmu;
3815 int ret;
3816 phys_addr_t phys = 0;
3817 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003818 u32 sctlr, sctlr_orig, fsr;
3819 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003820
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003821 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003822 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003823 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003824
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003825 spin_lock_irqsave(&smmu->atos_lock, flags);
3826 cb_base = ARM_SMMU_CB_BASE(smmu) +
3827 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003828
3829 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003830 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003831 qsmmuv2_wait_for_halt(smmu);
3832
3833 /* clear FSR to allow ATOS to log any faults */
3834 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3835 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3836
3837 /* disable stall mode momentarily */
3838 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3839 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3840 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3841
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003842 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003843
3844 /* restore SCTLR */
3845 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3846
3847 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003848 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3849
3850 arm_smmu_power_off(smmu_domain->smmu->pwr);
3851 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003852}
3853
3854struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3855 .device_reset = qsmmuv2_device_reset,
3856 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003857};
3858
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003859static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003860{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003861 int i;
3862 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003863 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003864
Peng Fan3ca37122016-05-03 21:50:30 +08003865 /*
3866 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3867 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3868 * bit is only present in MMU-500r2 onwards.
3869 */
3870 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3871 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3872 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3873 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3874 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3875 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3876 }
3877
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003878 /* Make sure all context banks are disabled and clear CB_FSR */
3879 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy6549a1f2017-08-08 14:56:14 +01003880 void __iomem *cb_base = ARM_SMMU_CB_BASE(smmu) +
3881 ARM_SMMU_CB(smmu, i);
3882
3883 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003884 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003885 /*
3886 * Disable MMU-500's not-particularly-beneficial next-page
3887 * prefetcher for the sake of errata #841119 and #826419.
3888 */
3889 if (smmu->model == ARM_MMU500) {
3890 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3891 reg &= ~ARM_MMU500_ACTLR_CPRE;
3892 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3893 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003894 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003895}
3896
3897static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3898{
3899 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003900 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003901 u32 reg;
3902
3903 /* clear global FSR */
3904 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3905 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3906
Robin Murphy468f4942016-09-12 17:13:49 +01003907 /*
3908 * Reset stream mapping groups: Initial values mark all SMRn as
3909 * invalid and all S2CRn as bypass unless overridden.
3910 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003911 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3912 for (i = 0; i < smmu->num_mapping_groups; ++i)
3913 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003914
Patrick Daly59b6d202017-06-12 13:12:15 -07003915 arm_smmu_context_bank_reset(smmu);
3916 }
Will Deacon1463fe42013-07-31 19:21:27 +01003917
Will Deacon45ae7cf2013-06-24 18:31:25 +01003918 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003919 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3920 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3921
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003922 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003923
Will Deacon45ae7cf2013-06-24 18:31:25 +01003924 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003925 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003926
3927 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003928 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003929
Robin Murphy25a1c962016-02-10 14:25:33 +00003930 /* Enable client access, handling unmatched streams as appropriate */
3931 reg &= ~sCR0_CLIENTPD;
3932 if (disable_bypass)
3933 reg |= sCR0_USFCFG;
3934 else
3935 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003936
3937 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003938 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003939
3940 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003941 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003942
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003943 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3944 reg |= sCR0_VMID16EN;
3945
Patrick Daly7f377fe2017-10-06 17:37:10 -07003946 /* Force bypass transaction to be Non-Shareable & not io-coherent */
3947 reg &= ~(sCR0_SHCFG_MASK << sCR0_SHCFG_SHIFT);
Prakash Gupta673a79f2017-11-16 18:07:00 +05303948 reg |= sCR0_SHCFG_NSH << sCR0_SHCFG_SHIFT;
Patrick Daly7f377fe2017-10-06 17:37:10 -07003949
Will Deacon45ae7cf2013-06-24 18:31:25 +01003950 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003951 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003952 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003953
3954 /* Manage any implementation defined features */
3955 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003956}
3957
3958static int arm_smmu_id_size_to_bits(int size)
3959{
3960 switch (size) {
3961 case 0:
3962 return 32;
3963 case 1:
3964 return 36;
3965 case 2:
3966 return 40;
3967 case 3:
3968 return 42;
3969 case 4:
3970 return 44;
3971 case 5:
3972 default:
3973 return 48;
3974 }
3975}
3976
Patrick Dalyda688822017-05-17 20:12:48 -07003977
3978/*
3979 * Some context banks needs to be transferred from bootloader to HLOS in a way
3980 * that allows ongoing traffic. The current expectation is that these context
3981 * banks operate in bypass mode.
3982 * Additionally, there must be exactly one device in devicetree with stream-ids
3983 * overlapping those used by the bootloader.
3984 */
3985static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3986 struct arm_smmu_device *smmu,
3987 struct device *dev)
3988{
3989 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003990 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003991 u32 i, idx;
3992 int cb = -EINVAL;
3993 bool dynamic;
3994
Patrick Dalye72526b2017-07-18 16:21:44 -07003995 /*
3996 * Dynamic domains have already set cbndx through domain attribute.
3997 * Verify that they picked a valid value.
3998 */
Patrick Dalyda688822017-05-17 20:12:48 -07003999 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07004000 if (dynamic) {
4001 cb = smmu_domain->cfg.cbndx;
4002 if (cb < smmu->num_context_banks)
4003 return cb;
4004 else
4005 return -EINVAL;
4006 }
Patrick Dalyda688822017-05-17 20:12:48 -07004007
4008 mutex_lock(&smmu->stream_map_mutex);
4009 for_each_cfg_sme(fwspec, i, idx) {
4010 if (smmu->s2crs[idx].cb_handoff)
4011 cb = smmu->s2crs[idx].cbndx;
4012 }
4013
Shiraz Hashima28a4792018-01-13 00:39:52 +05304014 if (cb >= 0 && arm_smmu_is_static_cb(smmu)) {
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304015 smmu_domain->slave_side_secure = true;
4016
Shiraz Hashima28a4792018-01-13 00:39:52 +05304017 if (arm_smmu_is_slave_side_secure(smmu_domain))
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05304018 bitmap_set(smmu->secure_context_map, cb, 1);
Shiraz Hashima28a4792018-01-13 00:39:52 +05304019 }
4020
Charan Teja Reddyf0758df2017-09-04 18:52:07 +05304021 if (cb < 0 && !arm_smmu_is_static_cb(smmu)) {
Patrick Dalyda688822017-05-17 20:12:48 -07004022 mutex_unlock(&smmu->stream_map_mutex);
4023 return __arm_smmu_alloc_bitmap(smmu->context_map,
4024 smmu->num_s2_context_banks,
4025 smmu->num_context_banks);
4026 }
4027
4028 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07004029 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304030 if (!arm_smmu_is_static_cb(smmu))
4031 smmu->s2crs[i].cb_handoff = false;
Patrick Dalyda688822017-05-17 20:12:48 -07004032 smmu->s2crs[i].count -= 1;
4033 }
4034 }
4035 mutex_unlock(&smmu->stream_map_mutex);
4036
4037 return cb;
4038}
4039
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004040static void parse_static_cb_cfg(struct arm_smmu_device *smmu)
4041{
4042 u32 idx = 0;
4043 u32 val;
4044 int ret;
4045
4046 if (!(arm_smmu_is_static_cb(smmu) &&
4047 arm_smmu_opt_hibernation(smmu)))
4048 return;
4049
4050 /*
4051 * Context banks may be xpu-protected. Require a devicetree property to
4052 * indicate which context banks HLOS has access to.
4053 */
4054 bitmap_set(smmu->secure_context_map, 0, ARM_SMMU_MAX_CBS);
4055 while (idx < ARM_SMMU_MAX_CBS) {
4056 ret = of_property_read_u32_index(
4057 smmu->dev->of_node, "qcom,static-ns-cbs",
4058 idx++, &val);
4059 if (ret)
4060 break;
4061
4062 bitmap_clear(smmu->secure_context_map, val, 1);
4063 dev_dbg(smmu->dev, "Detected NS context bank: %d\n", idx);
4064 }
4065}
4066
Patrick Dalyda688822017-05-17 20:12:48 -07004067static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
4068{
4069 u32 i, raw_smr, raw_s2cr;
4070 struct arm_smmu_smr smr;
4071 struct arm_smmu_s2cr s2cr;
4072
4073 for (i = 0; i < smmu->num_mapping_groups; i++) {
4074 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
4075 ARM_SMMU_GR0_SMR(i));
4076 if (!(raw_smr & SMR_VALID))
4077 continue;
4078
4079 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
4080 smr.id = (u16)raw_smr;
4081 smr.valid = true;
4082
4083 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
4084 ARM_SMMU_GR0_S2CR(i));
Patrick Daly4ea4bdf2017-08-29 19:24:49 -07004085 memset(&s2cr, 0, sizeof(s2cr));
Patrick Dalyda688822017-05-17 20:12:48 -07004086 s2cr.group = NULL;
4087 s2cr.count = 1;
4088 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
4089 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
4090 S2CR_PRIVCFG_MASK;
4091 s2cr.cbndx = (u8)raw_s2cr;
4092 s2cr.cb_handoff = true;
4093
4094 if (s2cr.type != S2CR_TYPE_TRANS)
4095 continue;
4096
4097 smmu->smrs[i] = smr;
4098 smmu->s2crs[i] = s2cr;
4099 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
4100 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
4101 raw_smr, raw_s2cr, s2cr.cbndx);
4102 }
4103
4104 return 0;
4105}
4106
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004107static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
4108{
4109 struct device *dev = smmu->dev;
4110 int i, ntuples, ret;
4111 u32 *tuples;
4112 struct arm_smmu_impl_def_reg *regs, *regit;
4113
4114 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
4115 return 0;
4116
4117 ntuples /= sizeof(u32);
4118 if (ntuples % 2) {
4119 dev_err(dev,
4120 "Invalid number of attach-impl-defs registers: %d\n",
4121 ntuples);
4122 return -EINVAL;
4123 }
4124
4125 regs = devm_kmalloc(
4126 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
4127 GFP_KERNEL);
4128 if (!regs)
4129 return -ENOMEM;
4130
4131 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
4132 if (!tuples)
4133 return -ENOMEM;
4134
4135 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
4136 tuples, ntuples);
4137 if (ret)
4138 return ret;
4139
4140 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
4141 regit->offset = tuples[i];
4142 regit->value = tuples[i + 1];
4143 }
4144
4145 devm_kfree(dev, tuples);
4146
4147 smmu->impl_def_attach_registers = regs;
4148 smmu->num_impl_def_attach_registers = ntuples / 2;
4149
4150 return 0;
4151}
4152
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004153
4154static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004155{
4156 const char *cname;
4157 struct property *prop;
4158 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004159 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004160
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004161 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004162 of_property_count_strings(dev->of_node, "clock-names");
4163
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004164 if (pwr->num_clocks < 1) {
4165 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004166 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07004167 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004168
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004169 pwr->clocks = devm_kzalloc(
4170 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004171 GFP_KERNEL);
4172
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004173 if (!pwr->clocks)
4174 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004175
4176 i = 0;
4177 of_property_for_each_string(dev->of_node, "clock-names",
4178 prop, cname) {
4179 struct clk *c = devm_clk_get(dev, cname);
4180
4181 if (IS_ERR(c)) {
4182 dev_err(dev, "Couldn't get clock: %s",
4183 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07004184 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004185 }
4186
4187 if (clk_get_rate(c) == 0) {
4188 long rate = clk_round_rate(c, 1000);
4189
4190 clk_set_rate(c, rate);
4191 }
4192
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004193 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004194
4195 ++i;
4196 }
4197 return 0;
4198}
4199
Charan Teja Reddyf8464882017-12-05 20:29:05 +05304200static int regulator_notifier(struct notifier_block *nb,
4201 unsigned long event, void *data)
4202{
4203 int ret = 0;
4204 struct arm_smmu_device *smmu = container_of(nb, struct arm_smmu_device,
4205 regulator_nb);
4206
4207 if (event != REGULATOR_EVENT_PRE_DISABLE &&
4208 event != REGULATOR_EVENT_ENABLE)
4209 return NOTIFY_OK;
4210
4211 ret = arm_smmu_prepare_clocks(smmu->pwr);
4212 if (ret)
4213 goto out;
4214
4215 ret = arm_smmu_power_on_atomic(smmu->pwr);
4216 if (ret)
4217 goto unprepare_clock;
4218
4219 if (event == REGULATOR_EVENT_PRE_DISABLE)
4220 qsmmuv2_halt(smmu);
4221 else if (event == REGULATOR_EVENT_ENABLE) {
Charan Teja Reddyec6f7822018-01-10 17:32:52 +05304222 if (arm_smmu_restore_sec_cfg(smmu, 0))
Charan Teja Reddyf8464882017-12-05 20:29:05 +05304223 goto power_off;
4224 qsmmuv2_resume(smmu);
4225 }
4226power_off:
4227 arm_smmu_power_off_atomic(smmu->pwr);
4228unprepare_clock:
4229 arm_smmu_unprepare_clocks(smmu->pwr);
4230out:
4231 return NOTIFY_OK;
4232}
4233
4234static int register_regulator_notifier(struct arm_smmu_device *smmu)
4235{
4236 struct device *dev = smmu->dev;
4237 struct regulator_bulk_data *consumers;
4238 int ret = 0, num_consumers;
4239 struct arm_smmu_power_resources *pwr = smmu->pwr;
4240
4241 if (!(smmu->options & ARM_SMMU_OPT_HALT))
4242 goto out;
4243
4244 num_consumers = pwr->num_gdscs;
4245 consumers = pwr->gdscs;
4246
4247 if (!num_consumers) {
4248 dev_info(dev, "no regulator info exist for %s\n",
4249 dev_name(dev));
4250 goto out;
4251 }
4252
4253 smmu->regulator_nb.notifier_call = regulator_notifier;
4254 /* registering the notifier against one gdsc is sufficient as
4255 * we do enable/disable regulators in group.
4256 */
4257 ret = regulator_register_notifier(consumers[0].consumer,
4258 &smmu->regulator_nb);
4259 if (ret)
4260 dev_err(dev, "Regulator notifier request failed\n");
4261out:
4262 return ret;
4263}
4264
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004265static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004266{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004267 const char *cname;
4268 struct property *prop;
4269 int i, ret = 0;
4270 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004271
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004272 pwr->num_gdscs =
4273 of_property_count_strings(dev->of_node, "qcom,regulator-names");
4274
4275 if (pwr->num_gdscs < 1) {
4276 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004277 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004278 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004279
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004280 pwr->gdscs = devm_kzalloc(
4281 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
4282
4283 if (!pwr->gdscs)
4284 return -ENOMEM;
4285
Prakash Guptafad87ca2017-05-16 12:13:02 +05304286 if (!of_property_read_u32(dev->of_node,
4287 "qcom,deferred-regulator-disable-delay",
4288 &(pwr->regulator_defer)))
4289 dev_info(dev, "regulator defer delay %d\n",
4290 pwr->regulator_defer);
4291
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004292 i = 0;
4293 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
4294 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07004295 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004296
4297 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
4298 return ret;
4299}
4300
4301static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
4302{
4303 struct device *dev = pwr->dev;
4304
4305 /* We don't want the bus APIs to print an error message */
4306 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
4307 dev_dbg(dev, "No bus scaling info\n");
4308 return 0;
4309 }
4310
4311 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
4312 if (!pwr->bus_dt_data) {
4313 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
4314 return -EINVAL;
4315 }
4316
4317 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
4318 if (!pwr->bus_client) {
4319 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004320 return -EINVAL;
4321 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004322
4323 return 0;
4324}
4325
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004326/*
4327 * Cleanup done by devm. Any non-devm resources must clean up themselves.
4328 */
4329static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
4330 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07004331{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004332 struct arm_smmu_power_resources *pwr;
4333 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07004334
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004335 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
4336 if (!pwr)
4337 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07004338
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004339 pwr->dev = &pdev->dev;
4340 pwr->pdev = pdev;
4341 mutex_init(&pwr->power_lock);
4342 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07004343
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004344 ret = arm_smmu_init_clocks(pwr);
4345 if (ret)
4346 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004347
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004348 ret = arm_smmu_init_regulators(pwr);
4349 if (ret)
4350 return ERR_PTR(ret);
4351
4352 ret = arm_smmu_init_bus_scaling(pwr);
4353 if (ret)
4354 return ERR_PTR(ret);
4355
4356 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07004357}
4358
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004359/*
Patrick Dalyabeee952017-04-13 18:14:59 -07004360 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004361 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004362static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004363{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004364 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004365}
4366
Will Deacon45ae7cf2013-06-24 18:31:25 +01004367static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
4368{
4369 unsigned long size;
4370 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
4371 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01004372 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01004373 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004374
Charan Teja Reddyec6f7822018-01-10 17:32:52 +05304375 if (arm_smmu_restore_sec_cfg(smmu, 0))
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304376 return -ENODEV;
4377
Mitchel Humpherysba822582015-10-20 11:37:41 -07004378 dev_dbg(smmu->dev, "probing hardware configuration...\n");
4379 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01004380 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004381
4382 /* ID0 */
4383 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01004384
4385 /* Restrict available stages based on module parameter */
4386 if (force_stage == 1)
4387 id &= ~(ID0_S2TS | ID0_NTS);
4388 else if (force_stage == 2)
4389 id &= ~(ID0_S1TS | ID0_NTS);
4390
Will Deacon45ae7cf2013-06-24 18:31:25 +01004391 if (id & ID0_S1TS) {
4392 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004393 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004394 }
4395
4396 if (id & ID0_S2TS) {
4397 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004398 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004399 }
4400
4401 if (id & ID0_NTS) {
4402 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004403 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004404 }
4405
4406 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01004407 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004408 dev_err(smmu->dev, "\tno translation support!\n");
4409 return -ENODEV;
4410 }
4411
Robin Murphyb7862e32016-04-13 18:13:03 +01004412 if ((id & ID0_S1TS) &&
4413 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00004414 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004415 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00004416 }
4417
Robin Murphybae2c2d2015-07-29 19:46:05 +01004418 /*
4419 * In order for DMA API calls to work properly, we must defer to what
4420 * the DT says about coherency, regardless of what the hardware claims.
4421 * Fortunately, this also opens up a workaround for systems where the
4422 * ID register value has ended up configured incorrectly.
4423 */
4424 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
4425 cttw_reg = !!(id & ID0_CTTW);
4426 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01004427 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01004428 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004429 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01004430 cttw_dt ? "" : "non-");
4431 if (cttw_dt != cttw_reg)
4432 dev_notice(smmu->dev,
4433 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004434
Robin Murphy53867802016-09-12 17:13:48 +01004435 /* Max. number of entries we have for stream matching/indexing */
4436 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
4437 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004438 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01004439 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08004440 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004441
4442 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01004443 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
4444 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004445 dev_err(smmu->dev,
4446 "stream-matching supported, but no SMRs present!\n");
4447 return -ENODEV;
4448 }
4449
Robin Murphy53867802016-09-12 17:13:48 +01004450 /*
4451 * SMR.ID bits may not be preserved if the corresponding MASK
4452 * bits are set, so check each one separately. We can reject
4453 * masters later if they try to claim IDs outside these masks.
4454 */
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304455 if (!arm_smmu_is_static_cb(smmu)) {
4456 for (i = 0; i < size; i++) {
4457 smr = readl_relaxed(
4458 gr0_base + ARM_SMMU_GR0_SMR(i));
4459 if (!(smr & SMR_VALID))
4460 break;
4461 }
4462 if (i == size) {
4463 dev_err(smmu->dev,
4464 "Unable to compute streamid_masks\n");
4465 return -ENODEV;
4466 }
4467
4468 smr = smmu->streamid_mask << SMR_ID_SHIFT;
4469 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
Patrick Daly937de532016-12-12 18:44:09 -08004470 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304471 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08004472
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304473 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
4474 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
4475 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
4476 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
4477 } else {
4478 smmu->smr_mask_mask = SMR_MASK_MASK;
4479 smmu->streamid_mask = SID_MASK;
4480 }
Dhaval Patel031d7462015-05-09 14:47:29 -07004481
Robin Murphy468f4942016-09-12 17:13:49 +01004482 /* Zero-initialised to mark as invalid */
4483 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
4484 GFP_KERNEL);
4485 if (!smmu->smrs)
4486 return -ENOMEM;
4487
Robin Murphy53867802016-09-12 17:13:48 +01004488 dev_notice(smmu->dev,
4489 "\tstream matching with %lu register groups, mask 0x%x",
4490 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004491 }
Robin Murphya754fd12016-09-12 17:13:50 +01004492 /* s2cr->type == 0 means translation, so initialise explicitly */
4493 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
4494 GFP_KERNEL);
4495 if (!smmu->s2crs)
4496 return -ENOMEM;
4497 for (i = 0; i < size; i++)
4498 smmu->s2crs[i] = s2cr_init_val;
4499
Robin Murphy53867802016-09-12 17:13:48 +01004500 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01004501 mutex_init(&smmu->stream_map_mutex);
Swathi Sridharfa26bd52018-04-25 18:26:14 -07004502 mutex_init(&smmu->iommu_group_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004503
Robin Murphy7602b872016-04-28 17:12:09 +01004504 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
4505 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
4506 if (!(id & ID0_PTFS_NO_AARCH32S))
4507 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
4508 }
4509
Will Deacon45ae7cf2013-06-24 18:31:25 +01004510 /* ID1 */
4511 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01004512 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004513
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01004514 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00004515 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01004516 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01004517 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07004518 dev_warn(smmu->dev,
4519 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
4520 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004521
Will Deacon518f7132014-11-14 17:17:54 +00004522 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004523 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
4524 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
4525 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
4526 return -ENODEV;
4527 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07004528 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01004529 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01004530 /*
4531 * Cavium CN88xx erratum #27704.
4532 * Ensure ASID and VMID allocation is unique across all SMMUs in
4533 * the system.
4534 */
4535 if (smmu->model == CAVIUM_SMMUV2) {
4536 smmu->cavium_id_base =
4537 atomic_add_return(smmu->num_context_banks,
4538 &cavium_smmu_context_count);
4539 smmu->cavium_id_base -= smmu->num_context_banks;
4540 }
Robin Murphy6549a1f2017-08-08 14:56:14 +01004541 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
4542 sizeof(*smmu->cbs), GFP_KERNEL);
4543 if (!smmu->cbs)
4544 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004545
4546 /* ID2 */
4547 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
4548 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004549 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004550
Will Deacon518f7132014-11-14 17:17:54 +00004551 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01004552 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004553 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004554
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08004555 if (id & ID2_VMID16)
4556 smmu->features |= ARM_SMMU_FEAT_VMID16;
4557
Robin Murphyf1d84542015-03-04 16:41:05 +00004558 /*
4559 * What the page table walker can address actually depends on which
4560 * descriptor format is in use, but since a) we don't know that yet,
4561 * and b) it can vary per context bank, this will have to do...
4562 */
4563 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
4564 dev_warn(smmu->dev,
4565 "failed to set DMA mask for table walker\n");
4566
Robin Murphyb7862e32016-04-13 18:13:03 +01004567 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00004568 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01004569 if (smmu->version == ARM_SMMU_V1_64K)
4570 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004571 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004572 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00004573 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00004574 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01004575 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00004576 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01004577 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00004578 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01004579 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004580 }
4581
Robin Murphy7602b872016-04-28 17:12:09 +01004582 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01004583 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01004584 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01004585 if (smmu->features &
4586 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01004587 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01004588 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01004589 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01004590 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01004591 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01004592
Robin Murphyd5466352016-05-09 17:20:09 +01004593 if (arm_smmu_ops.pgsize_bitmap == -1UL)
4594 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
4595 else
4596 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004597 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01004598 smmu->pgsize_bitmap);
4599
Will Deacon518f7132014-11-14 17:17:54 +00004600
Will Deacon28d60072014-09-01 16:24:48 +01004601 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004602 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
4603 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004604
4605 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004606 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
4607 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004608
Will Deacon45ae7cf2013-06-24 18:31:25 +01004609 return 0;
4610}
4611
Robin Murphy67b65a32016-04-13 18:12:57 +01004612struct arm_smmu_match_data {
4613 enum arm_smmu_arch_version version;
4614 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004615 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01004616};
4617
Patrick Dalyd7476202016-09-08 18:23:28 -07004618#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
4619static struct arm_smmu_match_data name = { \
4620.version = ver, \
4621.model = imp, \
4622.arch_ops = ops, \
4623} \
Robin Murphy67b65a32016-04-13 18:12:57 +01004624
Patrick Daly1f8a2882016-09-12 17:32:05 -07004625struct arm_smmu_arch_ops qsmmuv500_arch_ops;
4626
Patrick Dalyd7476202016-09-08 18:23:28 -07004627ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
4628ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
4629ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
4630ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
4631ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07004632ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004633ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
4634 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01004635
Joerg Roedel09b52692014-10-02 12:24:45 +02004636static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01004637 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
4638 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
4639 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01004640 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01004641 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01004642 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07004643 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07004644 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01004645 { },
4646};
4647MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
4648
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304649#ifdef CONFIG_MSM_TZ_SMMU
4650int register_iommu_sec_ptbl(void)
4651{
4652 struct device_node *np;
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004653
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304654 for_each_matching_node(np, arm_smmu_of_match)
4655 if (of_find_property(np, "qcom,tz-device-id", NULL) &&
4656 of_device_is_available(np))
4657 break;
4658 if (!np)
4659 return -ENODEV;
4660
4661 of_node_put(np);
4662
4663 return msm_iommu_sec_pgtbl_init();
4664}
4665#endif
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004666static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
4667{
4668 if (!dev->iommu_fwspec)
4669 of_iommu_configure(dev, dev->of_node);
4670 return 0;
4671}
4672
Patrick Daly000a2f22017-02-13 22:18:12 -08004673static int arm_smmu_add_device_fixup(struct device *dev, void *data)
4674{
4675 struct iommu_ops *ops = data;
4676
4677 ops->add_device(dev);
4678 return 0;
4679}
4680
Patrick Daly1f8a2882016-09-12 17:32:05 -07004681static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004682static int arm_smmu_device_dt_probe(struct platform_device *pdev)
4683{
Robin Murphy67b65a32016-04-13 18:12:57 +01004684 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004685 struct resource *res;
4686 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004687 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01004688 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01004689 bool legacy_binding;
4690
4691 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
4692 if (legacy_binding && !using_generic_binding) {
4693 if (!using_legacy_binding)
4694 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
4695 using_legacy_binding = true;
4696 } else if (!legacy_binding && !using_legacy_binding) {
4697 using_generic_binding = true;
4698 } else {
4699 dev_err(dev, "not probing due to mismatched DT properties\n");
4700 return -ENODEV;
4701 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004702
4703 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4704 if (!smmu) {
4705 dev_err(dev, "failed to allocate arm_smmu_device\n");
4706 return -ENOMEM;
4707 }
4708 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08004709 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07004710 idr_init(&smmu->asid_idr);
4711 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004712
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004713 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01004714 smmu->version = data->version;
4715 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004716 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01004717
Will Deacon45ae7cf2013-06-24 18:31:25 +01004718 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Prakash Guptaa87818d2018-02-09 19:24:02 +05304719 if (res == NULL) {
4720 dev_err(dev, "no MEM resource info\n");
4721 return -EINVAL;
4722 }
4723
4724 smmu->phys_addr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01004725 smmu->base = devm_ioremap_resource(dev, res);
4726 if (IS_ERR(smmu->base))
4727 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004728 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004729
4730 if (of_property_read_u32(dev->of_node, "#global-interrupts",
4731 &smmu->num_global_irqs)) {
4732 dev_err(dev, "missing #global-interrupts property\n");
4733 return -ENODEV;
4734 }
4735
4736 num_irqs = 0;
4737 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
4738 num_irqs++;
4739 if (num_irqs > smmu->num_global_irqs)
4740 smmu->num_context_irqs++;
4741 }
4742
Andreas Herrmann44a08de2013-10-01 13:39:07 +01004743 if (!smmu->num_context_irqs) {
4744 dev_err(dev, "found %d interrupts but expected at least %d\n",
4745 num_irqs, smmu->num_global_irqs + 1);
4746 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004747 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004748
4749 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
4750 GFP_KERNEL);
4751 if (!smmu->irqs) {
4752 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
4753 return -ENOMEM;
4754 }
4755
4756 for (i = 0; i < num_irqs; ++i) {
4757 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07004758
Will Deacon45ae7cf2013-06-24 18:31:25 +01004759 if (irq < 0) {
4760 dev_err(dev, "failed to get irq index %d\n", i);
4761 return -ENODEV;
4762 }
4763 smmu->irqs[i] = irq;
4764 }
4765
Dhaval Patel031d7462015-05-09 14:47:29 -07004766 parse_driver_options(smmu);
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004767 parse_static_cb_cfg(smmu);
Dhaval Patel031d7462015-05-09 14:47:29 -07004768
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004769 smmu->pwr = arm_smmu_init_power_resources(pdev);
4770 if (IS_ERR(smmu->pwr))
4771 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004772
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004773 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004774 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004775 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004776
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304777 smmu->sec_id = msm_dev_to_device_id(dev);
Charan Teja Reddy65ff5e42018-02-19 15:32:28 +05304778 INIT_LIST_HEAD(&smmu->list);
4779 spin_lock(&arm_smmu_devices_lock);
4780 list_add(&smmu->list, &arm_smmu_devices);
4781 spin_unlock(&arm_smmu_devices_lock);
4782
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004783 err = arm_smmu_device_cfg_probe(smmu);
4784 if (err)
4785 goto out_power_off;
4786
Patrick Dalyda688822017-05-17 20:12:48 -07004787 err = arm_smmu_handoff_cbs(smmu);
4788 if (err)
4789 goto out_power_off;
4790
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004791 err = arm_smmu_parse_impl_def_registers(smmu);
4792 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004793 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004794
Robin Murphyb7862e32016-04-13 18:13:03 +01004795 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004796 smmu->num_context_banks != smmu->num_context_irqs) {
4797 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004798 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4799 smmu->num_context_irqs, smmu->num_context_banks,
4800 smmu->num_context_banks);
4801 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004802 }
4803
Will Deacon45ae7cf2013-06-24 18:31:25 +01004804 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004805 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4806 NULL, arm_smmu_global_fault,
4807 IRQF_ONESHOT | IRQF_SHARED,
4808 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004809 if (err) {
4810 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4811 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004812 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004813 }
4814 }
4815
Patrick Dalyd7476202016-09-08 18:23:28 -07004816 err = arm_smmu_arch_init(smmu);
4817 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004818 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004819
Robin Murphy06e393e2016-09-12 17:13:55 +01004820 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004821 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004822 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004823 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004824
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004825 /* bus_set_iommu depends on this. */
4826 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4827 arm_smmu_of_iommu_configure_fixup);
4828
Robin Murphy7e96c742016-09-14 15:26:46 +01004829 /* Oh, for a proper bus abstraction */
4830 if (!iommu_present(&platform_bus_type))
4831 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004832 else
4833 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4834 arm_smmu_add_device_fixup);
Charan Teja Reddyf8464882017-12-05 20:29:05 +05304835
4836 err = register_regulator_notifier(smmu);
4837 if (err)
4838 goto out_power_off;
4839
Robin Murphy7e96c742016-09-14 15:26:46 +01004840#ifdef CONFIG_ARM_AMBA
4841 if (!iommu_present(&amba_bustype))
4842 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4843#endif
4844#ifdef CONFIG_PCI
4845 if (!iommu_present(&pci_bus_type)) {
4846 pci_request_acs();
4847 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4848 }
4849#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004850 return 0;
4851
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004852out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004853 arm_smmu_power_off(smmu->pwr);
Charan Teja Reddy65ff5e42018-02-19 15:32:28 +05304854 spin_lock(&arm_smmu_devices_lock);
4855 list_del(&smmu->list);
4856 spin_unlock(&arm_smmu_devices_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004857
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004858out_exit_power_resources:
4859 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004860
Will Deacon45ae7cf2013-06-24 18:31:25 +01004861 return err;
4862}
4863
4864static int arm_smmu_device_remove(struct platform_device *pdev)
4865{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004866 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004867
4868 if (!smmu)
4869 return -ENODEV;
4870
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004871 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004872 return -EINVAL;
4873
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004874 if (!(bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS) &&
4875 (bitmap_empty(smmu->secure_context_map, ARM_SMMU_MAX_CBS) ||
4876 arm_smmu_opt_hibernation(smmu))))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004877 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004878
Patrick Dalyc190d932016-08-30 17:23:28 -07004879 idr_destroy(&smmu->asid_idr);
4880
Will Deacon45ae7cf2013-06-24 18:31:25 +01004881 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004882 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004883 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004884
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004885 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004886
Prakash Gupta56c447b2018-07-27 14:09:46 +05304887 spin_lock(&arm_smmu_devices_lock);
4888 list_del(&smmu->list);
4889 spin_unlock(&arm_smmu_devices_lock);
4890
Will Deacon45ae7cf2013-06-24 18:31:25 +01004891 return 0;
4892}
4893
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004894static int arm_smmu_pm_freeze(struct device *dev)
4895{
4896 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
4897
4898 if (!arm_smmu_opt_hibernation(smmu)) {
4899 dev_err(smmu->dev, "Aborting: Hibernation not supported\n");
4900 return -EINVAL;
4901 }
4902 return 0;
4903}
4904
4905static int arm_smmu_pm_restore(struct device *dev)
4906{
4907 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
4908 int ret;
4909
4910 ret = arm_smmu_power_on(smmu->pwr);
4911 if (ret)
4912 return ret;
4913
4914 arm_smmu_device_reset(smmu);
4915 arm_smmu_power_off(smmu->pwr);
4916 return 0;
4917}
4918
4919static const struct dev_pm_ops arm_smmu_pm_ops = {
4920#ifdef CONFIG_PM_SLEEP
4921 .freeze = arm_smmu_pm_freeze,
4922 .restore = arm_smmu_pm_restore,
4923#endif
4924};
4925
Will Deacon45ae7cf2013-06-24 18:31:25 +01004926static struct platform_driver arm_smmu_driver = {
4927 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004928 .name = "arm-smmu",
4929 .of_match_table = of_match_ptr(arm_smmu_of_match),
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004930 .pm = &arm_smmu_pm_ops,
Will Deacon45ae7cf2013-06-24 18:31:25 +01004931 },
4932 .probe = arm_smmu_device_dt_probe,
4933 .remove = arm_smmu_device_remove,
4934};
4935
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004936static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004937static int __init arm_smmu_init(void)
4938{
Robin Murphy7e96c742016-09-14 15:26:46 +01004939 static bool registered;
4940 int ret = 0;
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004941 struct device_node *node;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004942 ktime_t cur;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004943
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004944 if (registered)
4945 return 0;
4946
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004947 cur = ktime_get();
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004948 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4949 if (ret)
4950 return ret;
4951
4952 ret = platform_driver_register(&arm_smmu_driver);
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004953 /* Disable secure usecases if hibernation support is enabled */
4954 node = of_find_compatible_node(NULL, NULL, "qcom,qsmmu-v500");
4955 if (IS_ENABLED(CONFIG_MSM_TZ_SMMU) && node &&
4956 !of_find_property(node, "qcom,hibernation-support", NULL))
4957 ret = register_iommu_sec_ptbl();
4958
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004959 registered = !ret;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004960 trace_smmu_init(ktime_us_delta(ktime_get(), cur));
4961
Robin Murphy7e96c742016-09-14 15:26:46 +01004962 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004963}
4964
4965static void __exit arm_smmu_exit(void)
4966{
4967 return platform_driver_unregister(&arm_smmu_driver);
4968}
4969
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004970subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004971module_exit(arm_smmu_exit);
4972
Robin Murphy7e96c742016-09-14 15:26:46 +01004973static int __init arm_smmu_of_init(struct device_node *np)
4974{
4975 int ret = arm_smmu_init();
4976
4977 if (ret)
4978 return ret;
4979
4980 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4981 return -ENODEV;
4982
4983 return 0;
4984}
4985IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4986IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4987IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4988IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4989IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4990IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004991
Patrick Dalya0fddb62017-03-27 19:26:59 -07004992#define TCU_HW_VERSION_HLOS1 (0x18)
4993
Patrick Daly1f8a2882016-09-12 17:32:05 -07004994#define DEBUG_SID_HALT_REG 0x0
4995#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004996#define DEBUG_SID_HALT_SID_MASK 0x3ff
4997
4998#define DEBUG_VA_ADDR_REG 0x8
4999
5000#define DEBUG_TXN_TRIGG_REG 0x18
5001#define DEBUG_TXN_AXPROT_SHIFT 6
5002#define DEBUG_TXN_AXCACHE_SHIFT 2
5003#define DEBUG_TRX_WRITE (0x1 << 1)
5004#define DEBUG_TXN_READ (0x0 << 1)
5005#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07005006
5007#define DEBUG_SR_HALT_ACK_REG 0x20
5008#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005009#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
5010
5011#define DEBUG_PAR_REG 0x28
5012#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
5013#define DEBUG_PAR_PA_SHIFT 12
5014#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07005015
Patrick Daly8c1202b2017-05-10 15:42:30 -07005016#define TBU_DBG_TIMEOUT_US 100
Patrick Daly1f8a2882016-09-12 17:32:05 -07005017
Patrick Daly23301482017-10-12 16:18:25 -07005018#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK 0x3
5019#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT 0x8
5020
Patrick Daly03330cc2017-08-11 14:56:38 -07005021
5022struct actlr_setting {
5023 struct arm_smmu_smr smr;
5024 u32 actlr;
5025};
5026
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005027struct qsmmuv500_archdata {
5028 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07005029 void __iomem *tcu_base;
5030 u32 version;
Patrick Dalyda765c62017-09-11 16:31:07 -07005031
5032 struct actlr_setting *actlrs;
5033 u32 actlr_tbl_size;
5034
5035 struct arm_smmu_smr *errata1_clients;
5036 u32 num_errata1_clients;
5037 remote_spinlock_t errata1_lock;
5038 ktime_t last_tlbi_ktime;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005039};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005040#define get_qsmmuv500_archdata(smmu) \
5041 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005042
Patrick Daly1f8a2882016-09-12 17:32:05 -07005043struct qsmmuv500_tbu_device {
5044 struct list_head list;
5045 struct device *dev;
5046 struct arm_smmu_device *smmu;
5047 void __iomem *base;
5048 void __iomem *status_reg;
5049
5050 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005051 u32 sid_start;
5052 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005053
5054 /* Protects halt count */
5055 spinlock_t halt_lock;
5056 u32 halt_count;
5057};
5058
Patrick Daly03330cc2017-08-11 14:56:38 -07005059struct qsmmuv500_group_iommudata {
5060 bool has_actlr;
5061 u32 actlr;
5062};
5063#define to_qsmmuv500_group_iommudata(group) \
5064 ((struct qsmmuv500_group_iommudata *) \
5065 (iommu_group_get_iommudata(group)))
5066
5067
5068static bool arm_smmu_fwspec_match_smr(struct iommu_fwspec *fwspec,
Patrick Dalyda765c62017-09-11 16:31:07 -07005069 struct arm_smmu_smr *smr)
5070{
5071 struct arm_smmu_smr *smr2;
Patrick Daly03330cc2017-08-11 14:56:38 -07005072 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Patrick Dalyda765c62017-09-11 16:31:07 -07005073 int i, idx;
5074
Patrick Daly03330cc2017-08-11 14:56:38 -07005075 for_each_cfg_sme(fwspec, i, idx) {
5076 smr2 = &smmu->smrs[idx];
Patrick Dalyda765c62017-09-11 16:31:07 -07005077 /* Continue if table entry does not match */
5078 if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
5079 continue;
5080 return true;
5081 }
5082 return false;
5083}
5084
5085#define ERRATA1_REMOTE_SPINLOCK "S:6"
5086#define ERRATA1_TLBI_INTERVAL_US 10
5087static bool
5088qsmmuv500_errata1_required(struct arm_smmu_domain *smmu_domain,
5089 struct qsmmuv500_archdata *data)
5090{
5091 bool ret = false;
5092 int j;
5093 struct arm_smmu_smr *smr;
Patrick Daly03330cc2017-08-11 14:56:38 -07005094 struct iommu_fwspec *fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07005095
5096 if (smmu_domain->qsmmuv500_errata1_init)
5097 return smmu_domain->qsmmuv500_errata1_client;
5098
Patrick Daly03330cc2017-08-11 14:56:38 -07005099 fwspec = smmu_domain->dev->iommu_fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07005100 for (j = 0; j < data->num_errata1_clients; j++) {
5101 smr = &data->errata1_clients[j];
Patrick Daly03330cc2017-08-11 14:56:38 -07005102 if (arm_smmu_fwspec_match_smr(fwspec, smr)) {
Patrick Dalyda765c62017-09-11 16:31:07 -07005103 ret = true;
5104 break;
5105 }
5106 }
5107
5108 smmu_domain->qsmmuv500_errata1_init = true;
5109 smmu_domain->qsmmuv500_errata1_client = ret;
5110 return ret;
5111}
5112
Patrick Daly86960052017-12-04 18:53:13 -08005113#define SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
5114#define SCM_CONFIG_ERRATA1 0x3
Patrick Dalyda765c62017-09-11 16:31:07 -07005115static void __qsmmuv500_errata1_tlbiall(struct arm_smmu_domain *smmu_domain)
5116{
5117 struct arm_smmu_device *smmu = smmu_domain->smmu;
5118 struct device *dev = smmu_domain->dev;
5119 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
5120 void __iomem *base;
Patrick Daly86960052017-12-04 18:53:13 -08005121 int ret;
Patrick Dalyda765c62017-09-11 16:31:07 -07005122 ktime_t cur;
5123 u32 val;
Patrick Daly86960052017-12-04 18:53:13 -08005124 struct scm_desc desc = {
5125 .args[0] = SCM_CONFIG_ERRATA1_CLIENT_ALL,
5126 .args[1] = false,
5127 .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
5128 };
Patrick Dalyda765c62017-09-11 16:31:07 -07005129
5130 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
5131 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
5132 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
Patrick Daly86960052017-12-04 18:53:13 -08005133 if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
5134 !(val & TLBSTATUS_SACTIVE), 0, 100))
5135 return;
5136
5137 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
5138 SCM_CONFIG_ERRATA1),
5139 &desc);
5140 if (ret) {
5141 dev_err(smmu->dev, "Calling into TZ to disable ERRATA1 failed - IOMMU hardware in bad state\n");
5142 BUG();
5143 return;
5144 }
5145
5146 cur = ktime_get();
5147 trace_tlbi_throttle_start(dev, 0);
5148 msm_bus_noc_throttle_wa(true);
5149
Patrick Dalyda765c62017-09-11 16:31:07 -07005150 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
Patrick Daly86960052017-12-04 18:53:13 -08005151 !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
5152 dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout - IOMMU hardware in bad state");
5153 trace_tlbsync_timeout(dev, 0);
5154 BUG();
5155 }
Patrick Dalyda765c62017-09-11 16:31:07 -07005156
Patrick Daly86960052017-12-04 18:53:13 -08005157 msm_bus_noc_throttle_wa(false);
5158 trace_tlbi_throttle_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07005159
Patrick Daly86960052017-12-04 18:53:13 -08005160 desc.args[1] = true;
5161 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
5162 SCM_CONFIG_ERRATA1),
5163 &desc);
5164 if (ret) {
5165 dev_err(smmu->dev, "Calling into TZ to reenable ERRATA1 failed - IOMMU hardware in bad state\n");
5166 BUG();
Patrick Dalyda765c62017-09-11 16:31:07 -07005167 }
5168}
5169
5170/* Must be called with clocks/regulators enabled */
5171static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
5172{
5173 struct arm_smmu_domain *smmu_domain = cookie;
5174 struct device *dev = smmu_domain->dev;
5175 struct qsmmuv500_archdata *data =
5176 get_qsmmuv500_archdata(smmu_domain->smmu);
5177 ktime_t cur;
Patrick Daly1faa3112017-10-31 16:40:40 -07005178 unsigned long flags;
Patrick Dalyda765c62017-09-11 16:31:07 -07005179 bool errata;
5180
5181 cur = ktime_get();
Prakash Gupta25f90512017-11-20 14:56:54 +05305182 trace_tlbi_start(dev, 0);
Patrick Dalyda765c62017-09-11 16:31:07 -07005183
5184 errata = qsmmuv500_errata1_required(smmu_domain, data);
Patrick Daly1faa3112017-10-31 16:40:40 -07005185 remote_spin_lock_irqsave(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07005186 if (errata) {
5187 s64 delta;
5188
5189 delta = ktime_us_delta(ktime_get(), data->last_tlbi_ktime);
5190 if (delta < ERRATA1_TLBI_INTERVAL_US)
5191 udelay(ERRATA1_TLBI_INTERVAL_US - delta);
5192
5193 __qsmmuv500_errata1_tlbiall(smmu_domain);
5194
5195 data->last_tlbi_ktime = ktime_get();
5196 } else {
5197 __qsmmuv500_errata1_tlbiall(smmu_domain);
5198 }
Patrick Daly1faa3112017-10-31 16:40:40 -07005199 remote_spin_unlock_irqrestore(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07005200
Prakash Gupta25f90512017-11-20 14:56:54 +05305201 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07005202}
5203
5204static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
5205 .tlb_flush_all = qsmmuv500_errata1_tlb_inv_context,
5206 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
5207 .free_pages_exact = arm_smmu_free_pages_exact,
5208};
5209
Patrick Daly8c1202b2017-05-10 15:42:30 -07005210static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
5211 struct arm_smmu_domain *smmu_domain)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005212{
5213 unsigned long flags;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005214 u32 halt, fsr, sctlr_orig, sctlr, status;
5215 void __iomem *base, *cb_base;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005216
5217 spin_lock_irqsave(&tbu->halt_lock, flags);
5218 if (tbu->halt_count) {
5219 tbu->halt_count++;
5220 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5221 return 0;
5222 }
5223
Patrick Daly8c1202b2017-05-10 15:42:30 -07005224 cb_base = ARM_SMMU_CB_BASE(smmu_domain->smmu) +
5225 ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005226 base = tbu->base;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005227 halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
5228 halt |= DEBUG_SID_HALT_VAL;
5229 writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005230
Patrick Daly8c1202b2017-05-10 15:42:30 -07005231 if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
5232 (status & DEBUG_SR_HALT_ACK_VAL),
5233 0, TBU_DBG_TIMEOUT_US))
5234 goto out;
5235
5236 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5237 if (!(fsr & FSR_FAULT)) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07005238 dev_err(tbu->dev, "Couldn't halt TBU!\n");
5239 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5240 return -ETIMEDOUT;
5241 }
5242
Patrick Daly8c1202b2017-05-10 15:42:30 -07005243 /*
5244 * We are in a fault; Our request to halt the bus will not complete
5245 * until transactions in front of us (such as the fault itself) have
5246 * completed. Disable iommu faults and terminate any existing
5247 * transactions.
5248 */
5249 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
5250 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
5251 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
5252
5253 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
5254 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
5255
5256 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
5257 (status & DEBUG_SR_HALT_ACK_VAL),
5258 0, TBU_DBG_TIMEOUT_US)) {
5259 dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
5260 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
5261 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5262 return -ETIMEDOUT;
5263 }
5264
5265 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
5266out:
Patrick Daly1f8a2882016-09-12 17:32:05 -07005267 tbu->halt_count = 1;
5268 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5269 return 0;
5270}
5271
5272static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
5273{
5274 unsigned long flags;
5275 u32 val;
5276 void __iomem *base;
5277
5278 spin_lock_irqsave(&tbu->halt_lock, flags);
5279 if (!tbu->halt_count) {
5280 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
5281 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5282 return;
5283
5284 } else if (tbu->halt_count > 1) {
5285 tbu->halt_count--;
5286 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5287 return;
5288 }
5289
5290 base = tbu->base;
5291 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
5292 val &= ~DEBUG_SID_HALT_VAL;
5293 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
5294
5295 tbu->halt_count = 0;
5296 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5297}
5298
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005299static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
5300 struct arm_smmu_device *smmu, u32 sid)
5301{
5302 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005303 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005304
5305 list_for_each_entry(tbu, &data->tbus, list) {
5306 if (tbu->sid_start <= sid &&
5307 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07005308 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005309 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07005310 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005311}
5312
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005313static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
5314 struct qsmmuv500_tbu_device *tbu,
5315 unsigned long *flags)
5316{
5317 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005318 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005319 u32 val;
5320
5321 spin_lock_irqsave(&smmu->atos_lock, *flags);
5322 /* The status register is not accessible on version 1.0 */
5323 if (data->version == 0x01000000)
5324 return 0;
5325
5326 if (readl_poll_timeout_atomic(tbu->status_reg,
5327 val, (val == 0x1), 0,
5328 TBU_DBG_TIMEOUT_US)) {
5329 dev_err(tbu->dev, "ECATS hw busy!\n");
5330 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
5331 return -ETIMEDOUT;
5332 }
5333
5334 return 0;
5335}
5336
5337static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
5338 struct qsmmuv500_tbu_device *tbu,
5339 unsigned long *flags)
5340{
5341 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005342 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005343
5344 /* The status register is not accessible on version 1.0 */
5345 if (data->version != 0x01000000)
5346 writel_relaxed(0, tbu->status_reg);
5347 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
5348}
5349
5350/*
5351 * Zero means failure.
5352 */
5353static phys_addr_t qsmmuv500_iova_to_phys(
5354 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
5355{
5356 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
5357 struct arm_smmu_device *smmu = smmu_domain->smmu;
5358 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
5359 struct qsmmuv500_tbu_device *tbu;
5360 int ret;
5361 phys_addr_t phys = 0;
5362 u64 val, fsr;
5363 unsigned long flags;
5364 void __iomem *cb_base;
5365 u32 sctlr_orig, sctlr;
5366 int needs_redo = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005367 ktime_t timeout;
5368
5369 /* only 36 bit iova is supported */
5370 if (iova >= (1ULL << 36)) {
5371 dev_err_ratelimited(smmu->dev, "ECATS: address too large: %pad\n",
5372 &iova);
5373 return 0;
5374 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005375
5376 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
5377 tbu = qsmmuv500_find_tbu(smmu, sid);
5378 if (!tbu)
5379 return 0;
5380
5381 ret = arm_smmu_power_on(tbu->pwr);
5382 if (ret)
5383 return 0;
5384
Patrick Daly8c1202b2017-05-10 15:42:30 -07005385 ret = qsmmuv500_tbu_halt(tbu, smmu_domain);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005386 if (ret)
5387 goto out_power_off;
5388
Patrick Daly8c1202b2017-05-10 15:42:30 -07005389 /*
5390 * ECATS can trigger the fault interrupt, so disable it temporarily
5391 * and check for an interrupt manually.
5392 */
5393 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
5394 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
5395 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
5396
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005397 /* Only one concurrent atos operation */
5398 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
5399 if (ret)
5400 goto out_resume;
5401
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005402redo:
5403 /* Set address and stream-id */
5404 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
5405 val |= sid & DEBUG_SID_HALT_SID_MASK;
5406 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
5407 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
5408
5409 /*
5410 * Write-back Read and Write-Allocate
5411 * Priviledged, nonsecure, data transaction
5412 * Read operation.
5413 */
5414 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
5415 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
5416 val |= DEBUG_TXN_TRIGGER;
5417 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
5418
5419 ret = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005420 //based on readx_poll_timeout_atomic
5421 timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
5422 for (;;) {
5423 val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
5424 if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
5425 break;
5426 val = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5427 if (val & FSR_FAULT)
5428 break;
5429 if (ktime_compare(ktime_get(), timeout) > 0) {
5430 dev_err(tbu->dev, "ECATS translation timed out!\n");
5431 ret = -ETIMEDOUT;
5432 break;
5433 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005434 }
5435
5436 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5437 if (fsr & FSR_FAULT) {
5438 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
Patrick Daly8c1202b2017-05-10 15:42:30 -07005439 fsr);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005440 ret = -EINVAL;
5441
5442 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
5443 /*
5444 * Clear pending interrupts
5445 * Barrier required to ensure that the FSR is cleared
5446 * before resuming SMMU operation
5447 */
5448 wmb();
5449 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
5450 }
5451
5452 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
5453 if (val & DEBUG_PAR_FAULT_VAL) {
5454 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
5455 val);
5456 ret = -EINVAL;
5457 }
5458
5459 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
5460 if (ret < 0)
5461 phys = 0;
5462
5463 /* Reset hardware */
5464 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
5465 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
5466
5467 /*
5468 * After a failed translation, the next successful translation will
5469 * incorrectly be reported as a failure.
5470 */
5471 if (!phys && needs_redo++ < 2)
5472 goto redo;
5473
5474 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
5475 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
5476
5477out_resume:
5478 qsmmuv500_tbu_resume(tbu);
5479
5480out_power_off:
5481 arm_smmu_power_off(tbu->pwr);
5482
5483 return phys;
5484}
5485
5486static phys_addr_t qsmmuv500_iova_to_phys_hard(
5487 struct iommu_domain *domain, dma_addr_t iova)
5488{
5489 u16 sid;
5490 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
5491 struct iommu_fwspec *fwspec;
5492
5493 /* Select a sid */
5494 fwspec = smmu_domain->dev->iommu_fwspec;
5495 sid = (u16)fwspec->ids[0];
5496
5497 return qsmmuv500_iova_to_phys(domain, iova, sid);
5498}
5499
Patrick Daly03330cc2017-08-11 14:56:38 -07005500static void qsmmuv500_release_group_iommudata(void *data)
5501{
5502 kfree(data);
5503}
5504
5505/* If a device has a valid actlr, it must match */
5506static int qsmmuv500_device_group(struct device *dev,
5507 struct iommu_group *group)
5508{
5509 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
5510 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
5511 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5512 struct qsmmuv500_group_iommudata *iommudata;
5513 u32 actlr, i;
5514 struct arm_smmu_smr *smr;
5515
5516 iommudata = to_qsmmuv500_group_iommudata(group);
5517 if (!iommudata) {
5518 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
5519 if (!iommudata)
5520 return -ENOMEM;
5521
5522 iommu_group_set_iommudata(group, iommudata,
5523 qsmmuv500_release_group_iommudata);
5524 }
5525
5526 for (i = 0; i < data->actlr_tbl_size; i++) {
5527 smr = &data->actlrs[i].smr;
5528 actlr = data->actlrs[i].actlr;
5529
5530 if (!arm_smmu_fwspec_match_smr(fwspec, smr))
5531 continue;
5532
5533 if (!iommudata->has_actlr) {
5534 iommudata->actlr = actlr;
5535 iommudata->has_actlr = true;
5536 } else if (iommudata->actlr != actlr) {
5537 return -EINVAL;
5538 }
5539 }
5540
5541 return 0;
5542}
5543
5544static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
5545 struct device *dev)
5546{
5547 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyad521082018-04-06 18:07:13 -07005548 struct arm_smmu_cb *cb = &smmu->cbs[smmu_domain->cfg.cbndx];
Patrick Daly03330cc2017-08-11 14:56:38 -07005549 struct qsmmuv500_group_iommudata *iommudata =
5550 to_qsmmuv500_group_iommudata(dev->iommu_group);
Patrick Daly03330cc2017-08-11 14:56:38 -07005551
5552 if (!iommudata->has_actlr)
5553 return;
5554
Patrick Dalyad521082018-04-06 18:07:13 -07005555 cb->actlr = iommudata->actlr;
Patrick Daly25317e82018-05-07 12:35:29 -07005556 cb->has_actlr = true;
Patrick Daly03330cc2017-08-11 14:56:38 -07005557 /*
Patrick Daly23301482017-10-12 16:18:25 -07005558 * Prefetch only works properly if the start and end of all
5559 * buffers in the page table are aligned to 16 Kb.
5560 */
Patrick Daly27bd9292017-11-22 13:59:59 -08005561 if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
Patrick Daly23301482017-10-12 16:18:25 -07005562 QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
5563 smmu_domain->qsmmuv500_errata2_min_align = true;
Patrick Daly03330cc2017-08-11 14:56:38 -07005564}
5565
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005566static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005567{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005568 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005569 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005570 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005571
5572 if (!dev->driver) {
5573 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
5574 return -EINVAL;
5575 }
5576
5577 tbu = dev_get_drvdata(dev);
5578
5579 INIT_LIST_HEAD(&tbu->list);
5580 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005581 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005582 return 0;
5583}
5584
Patrick Dalyda765c62017-09-11 16:31:07 -07005585static int qsmmuv500_parse_errata1(struct arm_smmu_device *smmu)
5586{
5587 int len, i;
5588 struct device *dev = smmu->dev;
5589 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5590 struct arm_smmu_smr *smrs;
5591 const __be32 *cell;
5592
5593 cell = of_get_property(dev->of_node, "qcom,mmu500-errata-1", NULL);
5594 if (!cell)
5595 return 0;
5596
5597 remote_spin_lock_init(&data->errata1_lock, ERRATA1_REMOTE_SPINLOCK);
5598 len = of_property_count_elems_of_size(
5599 dev->of_node, "qcom,mmu500-errata-1", sizeof(u32) * 2);
5600 if (len < 0)
5601 return 0;
5602
5603 smrs = devm_kzalloc(dev, sizeof(*smrs) * len, GFP_KERNEL);
5604 if (!smrs)
5605 return -ENOMEM;
5606
5607 for (i = 0; i < len; i++) {
5608 smrs[i].id = of_read_number(cell++, 1);
5609 smrs[i].mask = of_read_number(cell++, 1);
5610 }
5611
5612 data->errata1_clients = smrs;
5613 data->num_errata1_clients = len;
5614 return 0;
5615}
5616
Patrick Daly03330cc2017-08-11 14:56:38 -07005617static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
5618{
5619 int len, i;
5620 struct device *dev = smmu->dev;
5621 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5622 struct actlr_setting *actlrs;
5623 const __be32 *cell;
5624
5625 cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
5626 if (!cell)
5627 return 0;
5628
5629 len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
5630 sizeof(u32) * 3);
5631 if (len < 0)
5632 return 0;
5633
5634 actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
5635 if (!actlrs)
5636 return -ENOMEM;
5637
5638 for (i = 0; i < len; i++) {
5639 actlrs[i].smr.id = of_read_number(cell++, 1);
5640 actlrs[i].smr.mask = of_read_number(cell++, 1);
5641 actlrs[i].actlr = of_read_number(cell++, 1);
5642 }
5643
5644 data->actlrs = actlrs;
5645 data->actlr_tbl_size = len;
5646 return 0;
5647}
5648
Patrick Daly1f8a2882016-09-12 17:32:05 -07005649static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
5650{
Patrick Dalya0fddb62017-03-27 19:26:59 -07005651 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005652 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005653 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07005654 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005655 int ret;
Patrick Daly03330cc2017-08-11 14:56:38 -07005656 u32 val;
5657 void __iomem *reg;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005658
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005659 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
5660 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005661 return -ENOMEM;
5662
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005663 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07005664
5665 pdev = container_of(dev, struct platform_device, dev);
5666 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
Charan Teja Reddy97fb6c52018-03-20 15:55:37 +05305667 if (!res) {
5668 dev_err(dev, "Unable to get the tcu-base\n");
5669 return -EINVAL;
5670 }
5671 data->tcu_base = devm_ioremap(dev, res->start, resource_size(res));
Patrick Dalya0fddb62017-03-27 19:26:59 -07005672 if (IS_ERR(data->tcu_base))
5673 return PTR_ERR(data->tcu_base);
5674
5675 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005676 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005677
Charan Teja Reddy424ed342018-01-18 12:25:06 +05305678 if (arm_smmu_is_static_cb(smmu))
5679 return 0;
5680
Patrick Dalyda765c62017-09-11 16:31:07 -07005681 ret = qsmmuv500_parse_errata1(smmu);
5682 if (ret)
5683 return ret;
5684
Patrick Daly03330cc2017-08-11 14:56:38 -07005685 ret = qsmmuv500_read_actlr_tbl(smmu);
5686 if (ret)
5687 return ret;
5688
5689 reg = ARM_SMMU_GR0(smmu);
5690 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5691 val &= ~ARM_MMU500_ACR_CACHE_LOCK;
5692 writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
5693 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5694 /*
5695 * Modifiying the nonsecure copy of the sACR register is only
5696 * allowed if permission is given in the secure sACR register.
5697 * Attempt to detect if we were able to update the value.
5698 */
5699 WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
5700
Patrick Daly1f8a2882016-09-12 17:32:05 -07005701 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
5702 if (ret)
5703 return ret;
5704
5705 /* Attempt to register child devices */
5706 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
5707 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07005708 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005709
5710 return 0;
5711}
5712
5713struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
5714 .init = qsmmuv500_arch_init,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005715 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly03330cc2017-08-11 14:56:38 -07005716 .init_context_bank = qsmmuv500_init_cb,
5717 .device_group = qsmmuv500_device_group,
Patrick Daly1f8a2882016-09-12 17:32:05 -07005718};
5719
5720static const struct of_device_id qsmmuv500_tbu_of_match[] = {
5721 {.compatible = "qcom,qsmmuv500-tbu"},
5722 {}
5723};
5724
5725static int qsmmuv500_tbu_probe(struct platform_device *pdev)
5726{
5727 struct resource *res;
5728 struct device *dev = &pdev->dev;
5729 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005730 const __be32 *cell;
5731 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005732
5733 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
5734 if (!tbu)
5735 return -ENOMEM;
5736
5737 INIT_LIST_HEAD(&tbu->list);
5738 tbu->dev = dev;
5739 spin_lock_init(&tbu->halt_lock);
5740
5741 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
5742 tbu->base = devm_ioremap_resource(dev, res);
5743 if (IS_ERR(tbu->base))
5744 return PTR_ERR(tbu->base);
5745
5746 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
5747 tbu->status_reg = devm_ioremap_resource(dev, res);
5748 if (IS_ERR(tbu->status_reg))
5749 return PTR_ERR(tbu->status_reg);
5750
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005751 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
5752 if (!cell || len < 8)
5753 return -EINVAL;
5754
5755 tbu->sid_start = of_read_number(cell, 1);
5756 tbu->num_sids = of_read_number(cell + 1, 1);
5757
Patrick Daly1f8a2882016-09-12 17:32:05 -07005758 tbu->pwr = arm_smmu_init_power_resources(pdev);
5759 if (IS_ERR(tbu->pwr))
5760 return PTR_ERR(tbu->pwr);
5761
5762 dev_set_drvdata(dev, tbu);
5763 return 0;
5764}
5765
5766static struct platform_driver qsmmuv500_tbu_driver = {
5767 .driver = {
5768 .name = "qsmmuv500-tbu",
5769 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
5770 },
5771 .probe = qsmmuv500_tbu_probe,
5772};
5773
Will Deacon45ae7cf2013-06-24 18:31:25 +01005774MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
5775MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
5776MODULE_LICENSE("GPL v2");