blob: 17193366d28f828f03e09daa6c856bd38146368a [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Daly86960052017-12-04 18:53:13 -080050#include <soc/qcom/scm.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070051#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070052#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070053#include <linux/msm-bus.h>
54#include <dt-bindings/msm/msm-bus-ids.h>
Patrick Dalyda765c62017-09-11 16:31:07 -070055#include <linux/remote_spinlock.h>
56#include <linux/ktime.h>
57#include <trace/events/iommu.h>
Charan Teja Reddyf8464882017-12-05 20:29:05 +053058#include <linux/notifier.h>
Prakash Gupta5b8eb322018-01-09 15:16:39 +053059#include <dt-bindings/arm/arm-smmu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010060
61#include <linux/amba/bus.h>
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +053062#include <soc/qcom/msm_tz_smmu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010063
Will Deacon518f7132014-11-14 17:17:54 +000064#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Will Deacon45ae7cf2013-06-24 18:31:25 +010066/* Maximum number of context banks per SMMU */
67#define ARM_SMMU_MAX_CBS 128
68
Will Deacon45ae7cf2013-06-24 18:31:25 +010069/* SMMU global address space */
70#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010071#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010072
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000073/*
74 * SMMU global address space with conditional offset to access secure
75 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
76 * nsGFSYNR0: 0x450)
77 */
78#define ARM_SMMU_GR0_NS(smmu) \
79 ((smmu)->base + \
80 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
81 ? 0x400 : 0))
82
Robin Murphyf9a05f02016-04-13 18:13:01 +010083/*
84 * Some 64-bit registers only make sense to write atomically, but in such
85 * cases all the data relevant to AArch32 formats lies within the lower word,
86 * therefore this actually makes more sense than it might first appear.
87 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010088#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010089#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010090#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010091#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010092#endif
93
Will Deacon45ae7cf2013-06-24 18:31:25 +010094/* Configuration registers */
95#define ARM_SMMU_GR0_sCR0 0x0
96#define sCR0_CLIENTPD (1 << 0)
97#define sCR0_GFRE (1 << 1)
98#define sCR0_GFIE (1 << 2)
99#define sCR0_GCFGFRE (1 << 4)
100#define sCR0_GCFGFIE (1 << 5)
101#define sCR0_USFCFG (1 << 10)
102#define sCR0_VMIDPNE (1 << 11)
103#define sCR0_PTM (1 << 12)
104#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800105#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100106#define sCR0_BSU_SHIFT 14
107#define sCR0_BSU_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700108#define sCR0_SHCFG_SHIFT 22
109#define sCR0_SHCFG_MASK 0x3
110#define sCR0_SHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100111
Peng Fan3ca37122016-05-03 21:50:30 +0800112/* Auxiliary Configuration register */
113#define ARM_SMMU_GR0_sACR 0x10
114
Will Deacon45ae7cf2013-06-24 18:31:25 +0100115/* Identification registers */
116#define ARM_SMMU_GR0_ID0 0x20
117#define ARM_SMMU_GR0_ID1 0x24
118#define ARM_SMMU_GR0_ID2 0x28
119#define ARM_SMMU_GR0_ID3 0x2c
120#define ARM_SMMU_GR0_ID4 0x30
121#define ARM_SMMU_GR0_ID5 0x34
122#define ARM_SMMU_GR0_ID6 0x38
123#define ARM_SMMU_GR0_ID7 0x3c
124#define ARM_SMMU_GR0_sGFSR 0x48
125#define ARM_SMMU_GR0_sGFSYNR0 0x50
126#define ARM_SMMU_GR0_sGFSYNR1 0x54
127#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128
129#define ID0_S1TS (1 << 30)
130#define ID0_S2TS (1 << 29)
131#define ID0_NTS (1 << 28)
132#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000133#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100134#define ID0_PTFS_NO_AARCH32 (1 << 25)
135#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100136#define ID0_CTTW (1 << 14)
137#define ID0_NUMIRPT_SHIFT 16
138#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700139#define ID0_NUMSIDB_SHIFT 9
140#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100141#define ID0_NUMSMRG_SHIFT 0
142#define ID0_NUMSMRG_MASK 0xff
143
144#define ID1_PAGESIZE (1 << 31)
145#define ID1_NUMPAGENDXB_SHIFT 28
146#define ID1_NUMPAGENDXB_MASK 7
147#define ID1_NUMS2CB_SHIFT 16
148#define ID1_NUMS2CB_MASK 0xff
149#define ID1_NUMCB_SHIFT 0
150#define ID1_NUMCB_MASK 0xff
151
152#define ID2_OAS_SHIFT 4
153#define ID2_OAS_MASK 0xf
154#define ID2_IAS_SHIFT 0
155#define ID2_IAS_MASK 0xf
156#define ID2_UBS_SHIFT 8
157#define ID2_UBS_MASK 0xf
158#define ID2_PTFS_4K (1 << 12)
159#define ID2_PTFS_16K (1 << 13)
160#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800161#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100162
Peng Fan3ca37122016-05-03 21:50:30 +0800163#define ID7_MAJOR_SHIFT 4
164#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100167#define ARM_SMMU_GR0_TLBIVMID 0x64
168#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
169#define ARM_SMMU_GR0_TLBIALLH 0x6c
170#define ARM_SMMU_GR0_sTLBGSYNC 0x70
171#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
172#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800173#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100174
175/* Stream mapping registers */
176#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
177#define SMR_VALID (1 << 31)
178#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700179#define SMR_MASK_MASK 0x7FFF
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530180#define SID_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100181#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100182
183#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
184#define S2CR_CBNDX_SHIFT 0
185#define S2CR_CBNDX_MASK 0xff
186#define S2CR_TYPE_SHIFT 16
187#define S2CR_TYPE_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700188#define S2CR_SHCFG_SHIFT 8
189#define S2CR_SHCFG_MASK 0x3
190#define S2CR_SHCFG_NSH 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100191enum arm_smmu_s2cr_type {
192 S2CR_TYPE_TRANS,
193 S2CR_TYPE_BYPASS,
194 S2CR_TYPE_FAULT,
195};
196
197#define S2CR_PRIVCFG_SHIFT 24
198#define S2CR_PRIVCFG_MASK 0x3
199enum arm_smmu_s2cr_privcfg {
200 S2CR_PRIVCFG_DEFAULT,
201 S2CR_PRIVCFG_DIPAN,
202 S2CR_PRIVCFG_UNPRIV,
203 S2CR_PRIVCFG_PRIV,
204};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100205
206/* Context bank attribute registers */
207#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
208#define CBAR_VMID_SHIFT 0
209#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000210#define CBAR_S1_BPSHCFG_SHIFT 8
211#define CBAR_S1_BPSHCFG_MASK 3
212#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define CBAR_S1_MEMATTR_SHIFT 12
214#define CBAR_S1_MEMATTR_MASK 0xf
215#define CBAR_S1_MEMATTR_WB 0xf
216#define CBAR_TYPE_SHIFT 16
217#define CBAR_TYPE_MASK 0x3
218#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
219#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
220#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
221#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
222#define CBAR_IRPTNDX_SHIFT 24
223#define CBAR_IRPTNDX_MASK 0xff
224
Shalaj Jain04059c52015-03-03 13:34:59 -0800225#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
226#define CBFRSYNRA_SID_MASK (0xffff)
227
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
229#define CBA2R_RW64_32BIT (0 << 0)
230#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800231#define CBA2R_VMID_SHIFT 16
232#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233
234/* Translation context bank */
235#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100236#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237
238#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100239#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100240#define ARM_SMMU_CB_RESUME 0x8
241#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100242#define ARM_SMMU_CB_TTBR0 0x20
243#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100244#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600245#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000247#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100248#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100249#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700250#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100251#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100252#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000253#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100254#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700255#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000256#define ARM_SMMU_CB_S1_TLBIVAL 0x620
257#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
258#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700259#define ARM_SMMU_CB_TLBSYNC 0x7f0
260#define ARM_SMMU_CB_TLBSTATUS 0x7f4
261#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100262#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000263#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264
Patrick Daly7f377fe2017-10-06 17:37:10 -0700265#define SCTLR_SHCFG_SHIFT 22
266#define SCTLR_SHCFG_MASK 0x3
267#define SCTLR_SHCFG_NSH 0x3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100268#define SCTLR_S1_ASIDPNE (1 << 12)
269#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530270#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100271#define SCTLR_CFIE (1 << 6)
272#define SCTLR_CFRE (1 << 5)
273#define SCTLR_E (1 << 4)
274#define SCTLR_AFE (1 << 2)
275#define SCTLR_TRE (1 << 1)
276#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100278#define ARM_MMU500_ACTLR_CPRE (1 << 1)
279
Peng Fan3ca37122016-05-03 21:50:30 +0800280#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
281
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700282#define ARM_SMMU_IMPL_DEF0(smmu) \
283 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
284#define ARM_SMMU_IMPL_DEF1(smmu) \
285 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000286#define CB_PAR_F (1 << 0)
287
288#define ATSR_ACTIVE (1 << 0)
289
Will Deacon45ae7cf2013-06-24 18:31:25 +0100290#define RESUME_RETRY (0 << 0)
291#define RESUME_TERMINATE (1 << 0)
292
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100294#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Tomasz Nowicki681e6612017-01-16 08:16:07 +0100295#define TTBCR2_AS (1 << 4)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100297#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100298
299#define FSR_MULTI (1 << 31)
300#define FSR_SS (1 << 30)
301#define FSR_UUT (1 << 8)
302#define FSR_ASF (1 << 7)
303#define FSR_TLBLKF (1 << 6)
304#define FSR_TLBMCF (1 << 5)
305#define FSR_EF (1 << 4)
306#define FSR_PF (1 << 3)
307#define FSR_AFF (1 << 2)
308#define FSR_TF (1 << 1)
309
Mitchel Humpherys29073202014-07-08 09:52:18 -0700310#define FSR_IGN (FSR_AFF | FSR_ASF | \
311 FSR_TLBMCF | FSR_TLBLKF)
312#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100313 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100314
315#define FSYNR0_WNR (1 << 4)
316
Will Deacon4cf740b2014-07-14 19:47:39 +0100317static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000318module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100319MODULE_PARM_DESC(force_stage,
320 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800321static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000322module_param(disable_bypass, bool, S_IRUGO);
323MODULE_PARM_DESC(disable_bypass,
324 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100325
Robin Murphy09360402014-08-28 17:51:59 +0100326enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100327 ARM_SMMU_V1,
328 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100329 ARM_SMMU_V2,
330};
331
Robin Murphy67b65a32016-04-13 18:12:57 +0100332enum arm_smmu_implementation {
333 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100334 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100335 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700336 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700337 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100338};
339
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700340struct arm_smmu_impl_def_reg {
341 u32 offset;
342 u32 value;
343};
344
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700345/*
346 * attach_count
347 * The SMR and S2CR registers are only programmed when the number of
348 * devices attached to the iommu using these registers is > 0. This
349 * is required for the "SID switch" use case for secure display.
350 * Protected by stream_map_mutex.
351 */
Robin Murphya754fd12016-09-12 17:13:50 +0100352struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100353 struct iommu_group *group;
354 int count;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700355 int attach_count;
Robin Murphya754fd12016-09-12 17:13:50 +0100356 enum arm_smmu_s2cr_type type;
357 enum arm_smmu_s2cr_privcfg privcfg;
358 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700359 bool cb_handoff;
Robin Murphya754fd12016-09-12 17:13:50 +0100360};
361
362#define s2cr_init_val (struct arm_smmu_s2cr){ \
363 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700364 .cb_handoff = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100365}
366
Will Deacon45ae7cf2013-06-24 18:31:25 +0100367struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100368 u16 mask;
369 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100370 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371};
372
Robin Murphy6549a1f2017-08-08 14:56:14 +0100373struct arm_smmu_cb {
374 u64 ttbr[2];
375 u32 tcr[2];
376 u32 mair[2];
377 struct arm_smmu_cfg *cfg;
Patrick Dalyad521082018-04-06 18:07:13 -0700378 u32 actlr;
Robin Murphy6549a1f2017-08-08 14:56:14 +0100379 u32 attributes;
380};
381
Will Deacona9a1b0b2014-05-01 18:05:08 +0100382struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100383 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100384 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100385};
Robin Murphy468f4942016-09-12 17:13:49 +0100386#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100387#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
388#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000389#define fwspec_smendx(fw, i) \
390 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100391#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000392 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100393
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700394/*
395 * Describes resources required for on/off power operation.
396 * Separate reference count is provided for atomic/nonatomic
397 * operations.
398 */
399struct arm_smmu_power_resources {
400 struct platform_device *pdev;
401 struct device *dev;
402
403 struct clk **clocks;
404 int num_clocks;
405
406 struct regulator_bulk_data *gdscs;
407 int num_gdscs;
408
409 uint32_t bus_client;
410 struct msm_bus_scale_pdata *bus_dt_data;
411
412 /* Protects power_count */
413 struct mutex power_lock;
414 int power_count;
415
416 /* Protects clock_refs_count */
417 spinlock_t clock_refs_lock;
418 int clock_refs_count;
Prakash Guptafad87ca2017-05-16 12:13:02 +0530419 int regulator_defer;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700420};
421
Patrick Daly03330cc2017-08-11 14:56:38 -0700422struct arm_smmu_arch_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100423struct arm_smmu_device {
424 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100425
426 void __iomem *base;
427 unsigned long size;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530428 phys_addr_t phys_addr;
Will Deaconc757e852014-07-30 11:33:25 +0100429 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100430
431#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
432#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
433#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
434#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
435#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000436#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800437#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100438#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
439#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
440#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
441#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
442#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100443 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000444
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000445 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100446 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100447 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100448
449 u32 num_context_banks;
450 u32 num_s2_context_banks;
451 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Charan Teja Reddy4971ca42018-01-23 18:27:08 +0530452 DECLARE_BITMAP(secure_context_map, ARM_SMMU_MAX_CBS);
Robin Murphy6549a1f2017-08-08 14:56:14 +0100453 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100454 atomic_t irptndx;
455
456 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100457 u16 streamid_mask;
458 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100459 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100460 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100461 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100462
Will Deacon518f7132014-11-14 17:17:54 +0000463 unsigned long va_size;
464 unsigned long ipa_size;
465 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100466 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100467
468 u32 num_global_irqs;
469 u32 num_context_irqs;
470 unsigned int *irqs;
471
Patrick Daly8e3371a2017-02-13 22:14:53 -0800472 struct list_head list;
473
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800474 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700475 /* Specific to QCOM */
476 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
477 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800478
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700479 struct arm_smmu_power_resources *pwr;
Charan Teja Reddyf8464882017-12-05 20:29:05 +0530480 struct notifier_block regulator_nb;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700481
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800482 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700483
484 /* protects idr */
485 struct mutex idr_mutex;
486 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700487
488 struct arm_smmu_arch_ops *arch_ops;
489 void *archdata;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530490
491 enum tz_smmu_device_id sec_id;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100492};
493
Robin Murphy7602b872016-04-28 17:12:09 +0100494enum arm_smmu_context_fmt {
495 ARM_SMMU_CTX_FMT_NONE,
496 ARM_SMMU_CTX_FMT_AARCH64,
497 ARM_SMMU_CTX_FMT_AARCH32_L,
498 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100499};
500
501struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100502 u8 cbndx;
503 u8 irptndx;
504 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600505 u32 procid;
506 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100507 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100508};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100509#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600510#define INVALID_CBNDX 0xff
511#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700512/*
513 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
514 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
515 */
516#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100517
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600518#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800519#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100520
Will Deaconc752ce42014-06-25 22:46:31 +0100521enum arm_smmu_domain_stage {
522 ARM_SMMU_DOMAIN_S1 = 0,
523 ARM_SMMU_DOMAIN_S2,
524 ARM_SMMU_DOMAIN_NESTED,
525};
526
Patrick Dalyc11d1082016-09-01 15:52:44 -0700527struct arm_smmu_pte_info {
528 void *virt_addr;
529 size_t size;
530 struct list_head entry;
531};
532
Will Deacon45ae7cf2013-06-24 18:31:25 +0100533struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100534 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800535 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000536 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700537 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000538 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100539 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100540 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000541 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700542 u32 attributes;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530543 bool slave_side_secure;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700544 u32 secure_vmid;
545 struct list_head pte_info_list;
546 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700547 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700548 struct list_head secure_pool_list;
Patrick Daly2d600832018-02-11 15:12:55 -0800549 /* nonsecure pool protected by pgtbl_lock */
550 struct list_head nonsecure_pool;
Joerg Roedel1d672632015-03-26 13:43:10 +0100551 struct iommu_domain domain;
Patrick Dalyda765c62017-09-11 16:31:07 -0700552
553 bool qsmmuv500_errata1_init;
554 bool qsmmuv500_errata1_client;
Patrick Daly23301482017-10-12 16:18:25 -0700555 bool qsmmuv500_errata2_min_align;
Prakash Guptac2e909a2018-03-29 11:23:06 +0530556 bool is_force_guard_page;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100557};
558
Patrick Daly8e3371a2017-02-13 22:14:53 -0800559static DEFINE_SPINLOCK(arm_smmu_devices_lock);
560static LIST_HEAD(arm_smmu_devices);
561
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000562struct arm_smmu_option_prop {
563 u32 opt;
564 const char *prop;
565};
566
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800567static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
568
Robin Murphy7e96c742016-09-14 15:26:46 +0100569static bool using_legacy_binding, using_generic_binding;
570
Mitchel Humpherys29073202014-07-08 09:52:18 -0700571static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000572 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800573 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700574 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700575 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700576 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700577 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Patrick Daly62ba1922017-08-30 16:47:18 -0700578 { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
Patrick Daly83174c12017-10-26 12:31:15 -0700579 { ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530580 { ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
Charan Teja Reddyf8464882017-12-05 20:29:05 +0530581 { ARM_SMMU_OPT_HALT, "qcom,enable-smmu-halt"},
Patrick Dalyaddf1f82018-04-23 14:39:19 -0700582 { ARM_SMMU_OPT_HIBERNATION, "qcom,hibernation-support"},
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000583 { 0, NULL},
584};
585
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800586static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
587 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700588static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
589 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600590static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800591
Patrick Dalyc11d1082016-09-01 15:52:44 -0700592static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
593static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700594static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700595static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
596
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700597static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
598 dma_addr_t iova);
599
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800600static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
601
Patrick Dalyda688822017-05-17 20:12:48 -0700602static int arm_smmu_alloc_cb(struct iommu_domain *domain,
603 struct arm_smmu_device *smmu,
604 struct device *dev);
Patrick Dalyda765c62017-09-11 16:31:07 -0700605static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops;
Patrick Dalyda688822017-05-17 20:12:48 -0700606
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530607static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530608static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
609static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530610
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530611static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
612 phys_addr_t paddr, size_t size, int prot);
613static size_t msm_secure_smmu_unmap(struct iommu_domain *domain,
614 unsigned long iova,
615 size_t size);
616static size_t msm_secure_smmu_map_sg(struct iommu_domain *domain,
617 unsigned long iova,
618 struct scatterlist *sg,
619 unsigned int nents, int prot);
620
Joerg Roedel1d672632015-03-26 13:43:10 +0100621static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
622{
623 return container_of(dom, struct arm_smmu_domain, domain);
624}
625
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000626static void parse_driver_options(struct arm_smmu_device *smmu)
627{
628 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700629
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000630 do {
631 if (of_property_read_bool(smmu->dev->of_node,
632 arm_smmu_options[i].prop)) {
633 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700634 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000635 arm_smmu_options[i].prop);
636 }
637 } while (arm_smmu_options[++i].opt);
638}
639
Patrick Dalyc190d932016-08-30 17:23:28 -0700640static bool is_dynamic_domain(struct iommu_domain *domain)
641{
642 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
643
644 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
645}
646
Charan Teja Reddyec6f7822018-01-10 17:32:52 +0530647static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu, u32 cb)
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530648{
649 int ret;
650 int scm_ret = 0;
651
652 if (!arm_smmu_is_static_cb(smmu))
653 return 0;
654
Charan Teja Reddyec6f7822018-01-10 17:32:52 +0530655 ret = scm_restore_sec_cfg(smmu->sec_id, cb, &scm_ret);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530656 if (ret || scm_ret) {
657 pr_err("scm call IOMMU_SECURE_CFG failed\n");
658 return -EINVAL;
659 }
660
661 return 0;
662}
Liam Mark53cf2342016-12-20 11:36:07 -0800663static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
664{
665 if (smmu_domain->attributes &
666 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
667 return true;
668 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
669 return smmu_domain->smmu->dev->archdata.dma_coherent;
670 else
671 return false;
672}
673
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530674static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu)
675{
676 return smmu->options & ARM_SMMU_OPT_STATIC_CB;
677}
678
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530679static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain)
Patrick Dalye271f212016-10-04 13:24:49 -0700680{
681 return (smmu_domain->secure_vmid != VMID_INVAL);
682}
683
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530684static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain)
685{
686 return arm_smmu_has_secure_vmid(smmu_domain) &&
687 smmu_domain->slave_side_secure;
688}
689
690static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain)
691{
692 return arm_smmu_has_secure_vmid(smmu_domain)
693 && !smmu_domain->slave_side_secure;
694}
695
Patrick Dalye271f212016-10-04 13:24:49 -0700696static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
697{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530698 if (arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -0700699 mutex_lock(&smmu_domain->assign_lock);
700}
701
702static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
703{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530704 if (arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -0700705 mutex_unlock(&smmu_domain->assign_lock);
706}
707
Patrick Dalyaddf1f82018-04-23 14:39:19 -0700708static bool arm_smmu_opt_hibernation(struct arm_smmu_device *smmu)
709{
710 return smmu->options & ARM_SMMU_OPT_HIBERNATION;
711}
712
Patrick Daly03330cc2017-08-11 14:56:38 -0700713/*
714 * init()
715 * Hook for additional device tree parsing at probe time.
716 *
717 * device_reset()
718 * Hook for one-time architecture-specific register settings.
719 *
720 * iova_to_phys_hard()
721 * Provides debug information. May be called from the context fault irq handler.
722 *
723 * init_context_bank()
724 * Hook for architecture-specific settings which require knowledge of the
725 * dynamically allocated context bank number.
726 *
727 * device_group()
728 * Hook for checking whether a device is compatible with a said group.
729 */
730struct arm_smmu_arch_ops {
731 int (*init)(struct arm_smmu_device *smmu);
732 void (*device_reset)(struct arm_smmu_device *smmu);
733 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
734 dma_addr_t iova);
735 void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
736 struct device *dev);
737 int (*device_group)(struct device *dev, struct iommu_group *group);
738};
739
740static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
741{
742 if (!smmu->arch_ops)
743 return 0;
744 if (!smmu->arch_ops->init)
745 return 0;
746 return smmu->arch_ops->init(smmu);
747}
748
749static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
750{
751 if (!smmu->arch_ops)
752 return;
753 if (!smmu->arch_ops->device_reset)
754 return;
755 return smmu->arch_ops->device_reset(smmu);
756}
757
758static void arm_smmu_arch_init_context_bank(
759 struct arm_smmu_domain *smmu_domain, struct device *dev)
760{
761 struct arm_smmu_device *smmu = smmu_domain->smmu;
762
763 if (!smmu->arch_ops)
764 return;
765 if (!smmu->arch_ops->init_context_bank)
766 return;
767 return smmu->arch_ops->init_context_bank(smmu_domain, dev);
768}
769
770static int arm_smmu_arch_device_group(struct device *dev,
771 struct iommu_group *group)
772{
773 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
774 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
775
776 if (!smmu->arch_ops)
777 return 0;
778 if (!smmu->arch_ops->device_group)
779 return 0;
780 return smmu->arch_ops->device_group(dev, group);
781}
782
Will Deacon8f68f8e2014-07-15 11:27:08 +0100783static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100784{
785 if (dev_is_pci(dev)) {
786 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700787
Will Deacona9a1b0b2014-05-01 18:05:08 +0100788 while (!pci_is_root_bus(bus))
789 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100790 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100791 }
792
Robin Murphyd5b41782016-09-14 15:21:39 +0100793 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100794}
795
Robin Murphyd5b41782016-09-14 15:21:39 +0100796static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100797{
Robin Murphyd5b41782016-09-14 15:21:39 +0100798 *((__be32 *)data) = cpu_to_be32(alias);
799 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100800}
801
Robin Murphyd5b41782016-09-14 15:21:39 +0100802static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100803{
Robin Murphyd5b41782016-09-14 15:21:39 +0100804 struct of_phandle_iterator *it = *(void **)data;
805 struct device_node *np = it->node;
806 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100807
Robin Murphyd5b41782016-09-14 15:21:39 +0100808 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
809 "#stream-id-cells", 0)
810 if (it->node == np) {
811 *(void **)data = dev;
812 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700813 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100814 it->node = np;
815 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100816}
817
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100818static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100819static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100820
Robin Murphy06e393e2016-09-12 17:13:55 +0100821static int arm_smmu_register_legacy_master(struct device *dev,
822 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100823{
Robin Murphy06e393e2016-09-12 17:13:55 +0100824 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100825 struct device_node *np;
826 struct of_phandle_iterator it;
827 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100828 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100829 __be32 pci_sid;
830 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100831
Stephen Boydfecdeef2017-03-01 16:53:19 -0800832 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100833 np = dev_get_dev_node(dev);
834 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
835 of_node_put(np);
836 return -ENODEV;
837 }
838
839 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100840 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
841 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100842 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100843 of_node_put(np);
844 if (err == 0)
845 return -ENODEV;
846 if (err < 0)
847 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100848
Robin Murphyd5b41782016-09-14 15:21:39 +0100849 if (dev_is_pci(dev)) {
850 /* "mmu-masters" assumes Stream ID == Requester ID */
851 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
852 &pci_sid);
853 it.cur = &pci_sid;
854 it.cur_count = 1;
855 }
856
Robin Murphy06e393e2016-09-12 17:13:55 +0100857 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
858 &arm_smmu_ops);
859 if (err)
860 return err;
861
862 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
863 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100864 return -ENOMEM;
865
Robin Murphy06e393e2016-09-12 17:13:55 +0100866 *smmu = dev_get_drvdata(smmu_dev);
867 of_phandle_iterator_args(&it, sids, it.cur_count);
868 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
869 kfree(sids);
870 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100871}
872
873static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
874{
875 int idx;
876
877 do {
878 idx = find_next_zero_bit(map, end, start);
879 if (idx == end)
880 return -ENOSPC;
881 } while (test_and_set_bit(idx, map));
882
883 return idx;
884}
885
886static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
887{
888 clear_bit(idx, map);
889}
890
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700891static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700892{
893 int i, ret = 0;
894
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700895 for (i = 0; i < pwr->num_clocks; ++i) {
896 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700897 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700898 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700899 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700900 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700901 break;
902 }
903 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700904 return ret;
905}
906
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700907static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700908{
909 int i;
910
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700911 for (i = pwr->num_clocks; i; --i)
912 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700913}
914
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700915static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700916{
917 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700918
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700919 for (i = 0; i < pwr->num_clocks; ++i) {
920 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700921 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700922 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700923 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700924 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700925 break;
926 }
927 }
928
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700929 return ret;
930}
Patrick Daly8befb662016-08-17 20:03:28 -0700931
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700932static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
933{
934 int i;
935
936 for (i = pwr->num_clocks; i; --i)
937 clk_disable(pwr->clocks[i - 1]);
938}
939
940static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
941{
942 if (!pwr->bus_client)
943 return 0;
944 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
945}
946
947static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
948{
949 if (!pwr->bus_client)
950 return;
951 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
952}
953
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700954static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
955{
956 struct regulator_bulk_data *consumers;
957 int num_consumers, ret;
958 int i;
959
960 num_consumers = pwr->num_gdscs;
961 consumers = pwr->gdscs;
962 for (i = 0; i < num_consumers; i++) {
963 ret = regulator_enable(consumers[i].consumer);
964 if (ret)
965 goto out;
966 }
967 return 0;
968
969out:
970 i -= 1;
971 for (; i >= 0; i--)
972 regulator_disable(consumers[i].consumer);
973 return ret;
974}
975
Prakash Guptafad87ca2017-05-16 12:13:02 +0530976static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
977{
978 struct regulator_bulk_data *consumers;
979 int i;
980 int num_consumers, ret, r;
981
982 num_consumers = pwr->num_gdscs;
983 consumers = pwr->gdscs;
984 for (i = num_consumers - 1; i >= 0; --i) {
985 ret = regulator_disable_deferred(consumers[i].consumer,
986 pwr->regulator_defer);
987 if (ret != 0)
988 goto err;
989 }
990
991 return 0;
992
993err:
994 pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
995 for (++i; i < num_consumers; ++i) {
996 r = regulator_enable(consumers[i].consumer);
997 if (r != 0)
998 pr_err("Failed to reename %s: %d\n",
999 consumers[i].supply, r);
1000 }
1001
1002 return ret;
1003}
1004
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001005/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
1006static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
1007{
1008 int ret = 0;
1009 unsigned long flags;
1010
1011 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
1012 if (pwr->clock_refs_count > 0) {
1013 pwr->clock_refs_count++;
1014 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
1015 return 0;
1016 }
1017
1018 ret = arm_smmu_enable_clocks(pwr);
1019 if (!ret)
1020 pwr->clock_refs_count = 1;
1021
1022 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001023 return ret;
1024}
1025
1026/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001027static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001028{
Patrick Daly8befb662016-08-17 20:03:28 -07001029 unsigned long flags;
1030
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001031 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
1032 if (pwr->clock_refs_count == 0) {
1033 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
1034 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
1035 return;
1036
1037 } else if (pwr->clock_refs_count > 1) {
1038 pwr->clock_refs_count--;
1039 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001040 return;
1041 }
1042
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001043 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001044
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001045 pwr->clock_refs_count = 0;
1046 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001047}
1048
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001049static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001050{
1051 int ret;
1052
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001053 mutex_lock(&pwr->power_lock);
1054 if (pwr->power_count > 0) {
1055 pwr->power_count += 1;
1056 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001057 return 0;
1058 }
1059
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001060 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001061 if (ret)
1062 goto out_unlock;
1063
Patrick Dalyb26f97c2017-08-11 15:24:20 -07001064 ret = arm_smmu_enable_regulators(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001065 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001066 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001067
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001068 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07001069 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001070 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -07001071
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001072 pwr->power_count = 1;
1073 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001074 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001075
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001076out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001077 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001078out_disable_bus:
1079 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001080out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001081 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001082 return ret;
1083}
1084
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001085static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001086{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001087 mutex_lock(&pwr->power_lock);
1088 if (pwr->power_count == 0) {
1089 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
1090 mutex_unlock(&pwr->power_lock);
1091 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001092
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001093 } else if (pwr->power_count > 1) {
1094 pwr->power_count--;
1095 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001096 return;
1097 }
1098
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001099 arm_smmu_unprepare_clocks(pwr);
Prakash Guptafad87ca2017-05-16 12:13:02 +05301100 arm_smmu_disable_regulators(pwr);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001101 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -07001102 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001103 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001104}
1105
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001106static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001107{
1108 int ret;
1109
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001110 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001111 if (ret)
1112 return ret;
1113
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001114 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001115 if (ret)
1116 goto out_disable;
1117
1118 return 0;
1119
1120out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001121 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001122 return ret;
1123}
1124
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001125static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001126{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001127 arm_smmu_power_off_atomic(pwr);
1128 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001129}
1130
1131/*
1132 * Must be used instead of arm_smmu_power_on if it may be called from
1133 * atomic context
1134 */
1135static int arm_smmu_domain_power_on(struct iommu_domain *domain,
1136 struct arm_smmu_device *smmu)
1137{
1138 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1139 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1140
1141 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001142 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001143
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001144 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001145}
1146
1147/*
1148 * Must be used instead of arm_smmu_power_on if it may be called from
1149 * atomic context
1150 */
1151static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1152 struct arm_smmu_device *smmu)
1153{
1154 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1155 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1156
1157 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001158 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001159 return;
1160 }
1161
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001162 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001163}
1164
Will Deacon45ae7cf2013-06-24 18:31:25 +01001165/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001166static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1167 int cbndx)
1168{
1169 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1170 u32 val;
1171
1172 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1173 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1174 !(val & TLBSTATUS_SACTIVE),
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301175 0, TLB_LOOP_TIMEOUT)) {
1176 trace_tlbsync_timeout(smmu->dev, 0);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001177 dev_err(smmu->dev, "TLBSYNC timeout!\n");
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301178 }
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001179}
1180
Will Deacon518f7132014-11-14 17:17:54 +00001181static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001182{
1183 int count = 0;
1184 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1185
1186 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1187 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1188 & sTLBGSTATUS_GSACTIVE) {
1189 cpu_relax();
1190 if (++count == TLB_LOOP_TIMEOUT) {
1191 dev_err_ratelimited(smmu->dev,
1192 "TLB sync timed out -- SMMU may be deadlocked\n");
1193 return;
1194 }
1195 udelay(1);
1196 }
1197}
1198
Will Deacon518f7132014-11-14 17:17:54 +00001199static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001200{
Will Deacon518f7132014-11-14 17:17:54 +00001201 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001202 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001203}
1204
Patrick Daly8befb662016-08-17 20:03:28 -07001205/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001206static void arm_smmu_tlb_inv_context(void *cookie)
1207{
1208 struct arm_smmu_domain *smmu_domain = cookie;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301209 struct device *dev = smmu_domain->dev;
Will Deacon44680ee2014-06-25 11:29:12 +01001210 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1211 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001212 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001213 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -07001214 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301215 ktime_t cur = ktime_get();
1216
1217 trace_tlbi_start(dev, 0);
Will Deacon1463fe42013-07-31 19:21:27 +01001218
Patrick Dalye7069342017-07-11 12:35:55 -07001219 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001220 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001221 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001222 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001223 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001224 } else if (stage1 && use_tlbiall) {
1225 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1226 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1227 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001228 } else {
1229 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001230 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001231 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001232 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001233 }
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301234
1235 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Will Deacon1463fe42013-07-31 19:21:27 +01001236}
1237
Will Deacon518f7132014-11-14 17:17:54 +00001238static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001239 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001240{
1241 struct arm_smmu_domain *smmu_domain = cookie;
1242 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1243 struct arm_smmu_device *smmu = smmu_domain->smmu;
1244 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1245 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001246 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001247
Patrick Dalye7069342017-07-11 12:35:55 -07001248 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001249 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1250 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1251
Robin Murphy7602b872016-04-28 17:12:09 +01001252 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001253 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001254 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001255 do {
1256 writel_relaxed(iova, reg);
1257 iova += granule;
1258 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001259 } else {
1260 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001261 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001262 do {
1263 writeq_relaxed(iova, reg);
1264 iova += granule >> 12;
1265 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001266 }
Patrick Dalye7069342017-07-11 12:35:55 -07001267 } else if (stage1 && use_tlbiall) {
1268 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1269 reg += ARM_SMMU_CB_S1_TLBIALL;
1270 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001271 } else if (smmu->version == ARM_SMMU_V2) {
1272 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1273 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1274 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001275 iova >>= 12;
1276 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001277 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001278 iova += granule >> 12;
1279 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001280 } else {
1281 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001282 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001283 }
1284}
1285
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001286struct arm_smmu_secure_pool_chunk {
1287 void *addr;
1288 size_t size;
1289 struct list_head list;
1290};
1291
1292static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1293 size_t size)
1294{
1295 struct arm_smmu_secure_pool_chunk *it;
1296
1297 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1298 if (it->size == size) {
1299 void *addr = it->addr;
1300
1301 list_del(&it->list);
1302 kfree(it);
1303 return addr;
1304 }
1305 }
1306
1307 return NULL;
1308}
1309
1310static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1311 void *addr, size_t size)
1312{
1313 struct arm_smmu_secure_pool_chunk *chunk;
1314
1315 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1316 if (!chunk)
1317 return -ENOMEM;
1318
1319 chunk->addr = addr;
1320 chunk->size = size;
1321 memset(addr, 0, size);
1322 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1323
1324 return 0;
1325}
1326
1327static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1328{
1329 struct arm_smmu_secure_pool_chunk *it, *i;
1330
1331 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1332 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1333 /* pages will be freed later (after being unassigned) */
Prakash Gupta8e827be2017-10-04 12:37:11 +05301334 list_del(&it->list);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001335 kfree(it);
1336 }
1337}
1338
Patrick Dalyc11d1082016-09-01 15:52:44 -07001339static void *arm_smmu_alloc_pages_exact(void *cookie,
1340 size_t size, gfp_t gfp_mask)
1341{
1342 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001343 void *page;
1344 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001345
Patrick Daly2d600832018-02-11 15:12:55 -08001346 if (!arm_smmu_is_master_side_secure(smmu_domain)) {
1347 struct page *pg;
1348 /* size is expected to be 4K with current configuration */
1349 if (size == PAGE_SIZE) {
1350 pg = list_first_entry_or_null(
1351 &smmu_domain->nonsecure_pool, struct page, lru);
1352 if (pg) {
1353 list_del_init(&pg->lru);
1354 return page_address(pg);
1355 }
1356 }
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001357 return alloc_pages_exact(size, gfp_mask);
Patrick Daly2d600832018-02-11 15:12:55 -08001358 }
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001359
1360 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1361 if (page)
1362 return page;
1363
1364 page = alloc_pages_exact(size, gfp_mask);
1365 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001366 ret = arm_smmu_prepare_pgtable(page, cookie);
1367 if (ret) {
1368 free_pages_exact(page, size);
1369 return NULL;
1370 }
1371 }
1372
1373 return page;
1374}
1375
1376static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1377{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001378 struct arm_smmu_domain *smmu_domain = cookie;
1379
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301380 if (!arm_smmu_is_master_side_secure(smmu_domain)) {
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001381 free_pages_exact(virt, size);
1382 return;
1383 }
1384
1385 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1386 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001387}
1388
Will Deacon518f7132014-11-14 17:17:54 +00001389static struct iommu_gather_ops arm_smmu_gather_ops = {
1390 .tlb_flush_all = arm_smmu_tlb_inv_context,
1391 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1392 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001393 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1394 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001395};
1396
Charan Teja Reddy8e4c3bdc2018-03-02 14:15:21 +05301397static void msm_smmu_tlb_inv_context(void *cookie)
1398{
1399}
1400
1401static void msm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1402 size_t granule, bool leaf,
1403 void *cookie)
1404{
1405}
1406
1407static void msm_smmu_tlb_sync(void *cookie)
1408{
1409}
1410
1411static struct iommu_gather_ops msm_smmu_gather_ops = {
1412 .tlb_flush_all = msm_smmu_tlb_inv_context,
1413 .tlb_add_flush = msm_smmu_tlb_inv_range_nosync,
1414 .tlb_sync = msm_smmu_tlb_sync,
1415 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1416 .free_pages_exact = arm_smmu_free_pages_exact,
1417};
1418
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001419static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1420 dma_addr_t iova, u32 fsr)
1421{
1422 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001423 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyda765c62017-09-11 16:31:07 -07001424 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001425 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001426 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001427
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001428 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyda765c62017-09-11 16:31:07 -07001429 tlb->tlb_flush_all(smmu_domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001430 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001431
Patrick Dalyad441dd2016-09-15 15:50:46 -07001432 if (phys != phys_post_tlbiall) {
1433 dev_err(smmu->dev,
1434 "ATOS results differed across TLBIALL...\n"
1435 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1436 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001437
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001438 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001439}
1440
Will Deacon45ae7cf2013-06-24 18:31:25 +01001441static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1442{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001443 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001444 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001445 unsigned long iova;
1446 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001447 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001448 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1449 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001450 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001451 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001452 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001453 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001454 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001455 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001456 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001457
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001458 static DEFINE_RATELIMIT_STATE(_rs,
1459 DEFAULT_RATELIMIT_INTERVAL,
1460 DEFAULT_RATELIMIT_BURST);
1461
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001462 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001463 if (ret)
1464 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001465
Shalaj Jain04059c52015-03-03 13:34:59 -08001466 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001467 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001468 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1469
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001470 if (!(fsr & FSR_FAULT)) {
1471 ret = IRQ_NONE;
1472 goto out_power_off;
1473 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001474
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001475 if (fatal_asf && (fsr & FSR_ASF)) {
1476 dev_err(smmu->dev,
1477 "Took an address size fault. Refusing to recover.\n");
1478 BUG();
1479 }
1480
Will Deacon45ae7cf2013-06-24 18:31:25 +01001481 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001482 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001483 if (fsr & FSR_TF)
1484 flags |= IOMMU_FAULT_TRANSLATION;
1485 if (fsr & FSR_PF)
1486 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001487 if (fsr & FSR_EF)
1488 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001489 if (fsr & FSR_SS)
1490 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001491
Robin Murphyf9a05f02016-04-13 18:13:01 +01001492 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001493 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001494 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1495 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001496 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1497 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001498 dev_dbg(smmu->dev,
1499 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1500 iova, fsr, fsynr, cfg->cbndx);
1501 dev_dbg(smmu->dev,
1502 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001503 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001504 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001505 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001506 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1507 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001508 if (__ratelimit(&_rs)) {
1509 dev_err(smmu->dev,
1510 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1511 iova, fsr, fsynr, cfg->cbndx);
1512 dev_err(smmu->dev, "FAR = %016lx\n",
1513 (unsigned long)iova);
1514 dev_err(smmu->dev,
1515 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1516 fsr,
1517 (fsr & 0x02) ? "TF " : "",
1518 (fsr & 0x04) ? "AFF " : "",
1519 (fsr & 0x08) ? "PF " : "",
1520 (fsr & 0x10) ? "EF " : "",
1521 (fsr & 0x20) ? "TLBMCF " : "",
1522 (fsr & 0x40) ? "TLBLKF " : "",
1523 (fsr & 0x80) ? "MHF " : "",
1524 (fsr & 0x40000000) ? "SS " : "",
1525 (fsr & 0x80000000) ? "MULTI " : "");
1526 dev_err(smmu->dev,
1527 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001528 if (!phys_soft)
1529 dev_err(smmu->dev,
1530 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1531 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001532 if (phys_atos)
1533 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1534 &phys_atos);
1535 else
1536 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001537 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1538 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001539 ret = IRQ_NONE;
1540 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001541 if (!non_fatal_fault) {
1542 dev_err(smmu->dev,
1543 "Unhandled arm-smmu context fault!\n");
1544 BUG();
1545 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001546 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001547
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001548 /*
1549 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1550 * if stalled. This is required to keep the IOMMU client stalled on
1551 * the outstanding fault. This gives the client a chance to take any
1552 * debug action and then terminate the stalled transaction.
1553 * So, the sequence in case of stall on fault should be:
1554 * 1) Do not clear FSR or write to RESUME here
1555 * 2) Client takes any debug action
1556 * 3) Client terminates the stalled transaction and resumes the IOMMU
1557 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1558 * not before so that the fault remains outstanding. This ensures
1559 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1560 * need to be terminated.
1561 */
1562 if (tmp != -EBUSY) {
1563 /* Clear the faulting FSR */
1564 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001565
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001566 /*
1567 * Barrier required to ensure that the FSR is cleared
1568 * before resuming SMMU operation
1569 */
1570 wmb();
1571
1572 /* Retry or terminate any stalled transactions */
1573 if (fsr & FSR_SS)
1574 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1575 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001576
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001577out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001578 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001579
Patrick Daly5ba28112016-08-30 19:18:52 -07001580 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001581}
1582
1583static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1584{
1585 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1586 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001587 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001588
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001589 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001590 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001591
Will Deacon45ae7cf2013-06-24 18:31:25 +01001592 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1593 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1594 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1595 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1596
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001597 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001598 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001599 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001600 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001601
Will Deacon45ae7cf2013-06-24 18:31:25 +01001602 dev_err_ratelimited(smmu->dev,
1603 "Unexpected global fault, this could be serious\n");
1604 dev_err_ratelimited(smmu->dev,
1605 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1606 gfsr, gfsynr0, gfsynr1, gfsynr2);
1607
1608 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001609 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001610 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001611}
1612
Shiraz Hashimeca8c2e2018-01-15 20:08:38 +05301613static bool arm_smmu_master_attached(struct arm_smmu_device *smmu,
1614 struct iommu_fwspec *fwspec)
1615{
1616 int i, idx;
1617
1618 for_each_cfg_sme(fwspec, i, idx) {
1619 if (smmu->s2crs[idx].attach_count)
1620 return true;
1621 }
1622
1623 return false;
1624}
1625
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301626static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain,
1627 struct io_pgtable_cfg *pgtbl_cfg)
1628{
1629 struct arm_smmu_device *smmu = smmu_domain->smmu;
1630 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1631 int ret = 0;
1632
1633 if ((smmu->version > ARM_SMMU_V1) &&
1634 (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) &&
1635 !arm_smmu_has_secure_vmid(smmu_domain) &&
1636 arm_smmu_is_static_cb(smmu)) {
1637 ret = msm_tz_set_cb_format(smmu->sec_id, cfg->cbndx);
1638 }
1639 return ret;
1640}
1641
Will Deacon518f7132014-11-14 17:17:54 +00001642static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1643 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001644{
Will Deacon44680ee2014-06-25 11:29:12 +01001645 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy6549a1f2017-08-08 14:56:14 +01001646 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
1647 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1648
1649 cb->cfg = cfg;
1650
1651 /* TTBCR */
1652 if (stage1) {
1653 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1654 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
1655 } else {
1656 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1657 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1658 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
1659 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1660 cb->tcr[1] |= TTBCR2_AS;
1661 }
1662 } else {
1663 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1664 }
1665
1666 /* TTBRs */
1667 if (stage1) {
1668 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1669 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1670 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1671 } else {
1672 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1673 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
1674 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1675 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
1676 }
1677 } else {
1678 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1679 }
1680
1681 /* MAIRs (stage-1 only) */
1682 if (stage1) {
1683 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1684 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
1685 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
1686 } else {
1687 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1688 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1689 }
1690 }
1691
1692 cb->attributes = smmu_domain->attributes;
1693}
1694
1695static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
1696{
1697 u32 reg;
1698 bool stage1;
1699 struct arm_smmu_cb *cb = &smmu->cbs[idx];
1700 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001701 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001702
Robin Murphy6549a1f2017-08-08 14:56:14 +01001703 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, idx);
1704
1705 /* Unassigned context banks only need disabling */
1706 if (!cfg) {
1707 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1708 return;
1709 }
1710
Will Deacon45ae7cf2013-06-24 18:31:25 +01001711 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001712 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001713
Robin Murphy6549a1f2017-08-08 14:56:14 +01001714 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +00001715 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001716 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1717 reg = CBA2R_RW64_64BIT;
1718 else
1719 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001720 /* 16-bit VMIDs live in CBA2R */
1721 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001722 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001723
Robin Murphy6549a1f2017-08-08 14:56:14 +01001724 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +00001725 }
1726
Will Deacon45ae7cf2013-06-24 18:31:25 +01001727 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001728 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001729 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001730 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001731
Will Deacon57ca90f2014-02-06 14:59:05 +00001732 /*
1733 * Use the weakest shareability/memory types, so they are
1734 * overridden by the ttbcr/pte.
1735 */
1736 if (stage1) {
1737 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1738 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001739 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1740 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001741 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001742 }
Robin Murphy6549a1f2017-08-08 14:56:14 +01001743 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001744
Sunil Gouthamf0b0a2a2017-03-28 16:11:12 +05301745 /*
1746 * TTBCR
1747 * We must write this before the TTBRs, since it determines the
1748 * access behaviour of some fields (in particular, ASID[15:8]).
1749 */
Robin Murphy6549a1f2017-08-08 14:56:14 +01001750 if (stage1 && smmu->version > ARM_SMMU_V1)
1751 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
1752 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Sunil Gouthamf0b0a2a2017-03-28 16:11:12 +05301753
Will Deacon518f7132014-11-14 17:17:54 +00001754 /* TTBRs */
Robin Murphy6549a1f2017-08-08 14:56:14 +01001755 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1756 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1757 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
1758 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001759 } else {
Robin Murphy6549a1f2017-08-08 14:56:14 +01001760 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
1761 if (stage1)
1762 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001763 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001764
Will Deacon518f7132014-11-14 17:17:54 +00001765 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001766 if (stage1) {
Robin Murphy6549a1f2017-08-08 14:56:14 +01001767 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
1768 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001769 }
1770
Patrick Dalyad521082018-04-06 18:07:13 -07001771 /* ACTLR (implementation defined) */
1772 writel_relaxed(cb->actlr, cb_base + ARM_SMMU_CB_ACTLR);
1773
Will Deacon45ae7cf2013-06-24 18:31:25 +01001774 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001775 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001776
Patrick Daly7f377fe2017-10-06 17:37:10 -07001777 /* Ensure bypass transactions are Non-shareable */
1778 reg |= SCTLR_SHCFG_NSH << SCTLR_SHCFG_SHIFT;
1779
Robin Murphy6549a1f2017-08-08 14:56:14 +01001780 if (cb->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301781 reg &= ~SCTLR_CFCFG;
1782 reg |= SCTLR_HUPCF;
1783 }
1784
Robin Murphy6549a1f2017-08-08 14:56:14 +01001785 if ((!(cb->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1786 !(cb->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001787 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001788 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001789 if (stage1)
1790 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy6549a1f2017-08-08 14:56:14 +01001791 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1792 reg |= SCTLR_E;
1793
Will Deacon25724842013-08-21 13:49:53 +01001794 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001795}
1796
Patrick Dalyc190d932016-08-30 17:23:28 -07001797static int arm_smmu_init_asid(struct iommu_domain *domain,
1798 struct arm_smmu_device *smmu)
1799{
1800 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1801 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1802 bool dynamic = is_dynamic_domain(domain);
1803 int ret;
1804
1805 if (!dynamic) {
1806 cfg->asid = cfg->cbndx + 1;
1807 } else {
1808 mutex_lock(&smmu->idr_mutex);
1809 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1810 smmu->num_context_banks + 2,
1811 MAX_ASID + 1, GFP_KERNEL);
1812
1813 mutex_unlock(&smmu->idr_mutex);
1814 if (ret < 0) {
1815 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1816 ret);
1817 return ret;
1818 }
1819 cfg->asid = ret;
1820 }
1821 return 0;
1822}
1823
1824static void arm_smmu_free_asid(struct iommu_domain *domain)
1825{
1826 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1827 struct arm_smmu_device *smmu = smmu_domain->smmu;
1828 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1829 bool dynamic = is_dynamic_domain(domain);
1830
1831 if (cfg->asid == INVALID_ASID || !dynamic)
1832 return;
1833
1834 mutex_lock(&smmu->idr_mutex);
1835 idr_remove(&smmu->asid_idr, cfg->asid);
1836 mutex_unlock(&smmu->idr_mutex);
1837}
1838
Will Deacon45ae7cf2013-06-24 18:31:25 +01001839static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001840 struct arm_smmu_device *smmu,
1841 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001842{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001843 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001844 unsigned long ias, oas;
1845 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001846 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001847 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001848 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001849 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001850 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001851 bool dynamic;
Patrick Dalyda765c62017-09-11 16:31:07 -07001852 const struct iommu_gather_ops *tlb;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001853
Will Deacon518f7132014-11-14 17:17:54 +00001854 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001855 if (smmu_domain->smmu)
1856 goto out_unlock;
1857
Patrick Dalyc190d932016-08-30 17:23:28 -07001858 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1859 smmu_domain->cfg.asid = INVALID_ASID;
1860
Patrick Dalyc190d932016-08-30 17:23:28 -07001861 dynamic = is_dynamic_domain(domain);
1862 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1863 dev_err(smmu->dev, "dynamic domains not supported\n");
1864 ret = -EPERM;
1865 goto out_unlock;
1866 }
1867
Patrick Dalyaddf1f82018-04-23 14:39:19 -07001868 if (arm_smmu_has_secure_vmid(smmu_domain) &&
1869 arm_smmu_opt_hibernation(smmu)) {
1870 dev_err(smmu->dev,
1871 "Secure usecases not supported with hibernation\n");
1872 ret = -EPERM;
1873 goto out_unlock;
1874 }
1875
Will Deaconc752ce42014-06-25 22:46:31 +01001876 /*
1877 * Mapping the requested stage onto what we support is surprisingly
1878 * complicated, mainly because the spec allows S1+S2 SMMUs without
1879 * support for nested translation. That means we end up with the
1880 * following table:
1881 *
1882 * Requested Supported Actual
1883 * S1 N S1
1884 * S1 S1+S2 S1
1885 * S1 S2 S2
1886 * S1 S1 S1
1887 * N N N
1888 * N S1+S2 S2
1889 * N S2 S2
1890 * N S1 S1
1891 *
1892 * Note that you can't actually request stage-2 mappings.
1893 */
1894 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1895 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1896 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1897 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1898
Robin Murphy7602b872016-04-28 17:12:09 +01001899 /*
1900 * Choosing a suitable context format is even more fiddly. Until we
1901 * grow some way for the caller to express a preference, and/or move
1902 * the decision into the io-pgtable code where it arguably belongs,
1903 * just aim for the closest thing to the rest of the system, and hope
1904 * that the hardware isn't esoteric enough that we can't assume AArch64
1905 * support to be a superset of AArch32 support...
1906 */
1907 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1908 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001909 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1910 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1911 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1912 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1913 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001914 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1915 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1916 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1917 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1918 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1919
1920 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1921 ret = -EINVAL;
1922 goto out_unlock;
1923 }
1924
Will Deaconc752ce42014-06-25 22:46:31 +01001925 switch (smmu_domain->stage) {
1926 case ARM_SMMU_DOMAIN_S1:
1927 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1928 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001929 ias = smmu->va_size;
1930 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001931 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001932 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001933 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1934 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001935 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001936 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001937 ias = min(ias, 32UL);
1938 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001939 } else {
1940 fmt = ARM_V7S;
1941 ias = min(ias, 32UL);
1942 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001943 }
Will Deaconc752ce42014-06-25 22:46:31 +01001944 break;
1945 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001946 /*
1947 * We will likely want to change this if/when KVM gets
1948 * involved.
1949 */
Will Deaconc752ce42014-06-25 22:46:31 +01001950 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001951 cfg->cbar = CBAR_TYPE_S2_TRANS;
1952 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001953 ias = smmu->ipa_size;
1954 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001955 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001956 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001957 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001958 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001959 ias = min(ias, 40UL);
1960 oas = min(oas, 40UL);
1961 }
Will Deaconc752ce42014-06-25 22:46:31 +01001962 break;
1963 default:
1964 ret = -EINVAL;
1965 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001966 }
1967
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001968 if (is_fast)
1969 fmt = ARM_V8L_FAST;
1970
Patrick Dalyce6786f2016-11-09 14:19:23 -08001971 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1972 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001973 if (is_iommu_pt_coherent(smmu_domain))
1974 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Patrick Daly49ccf332017-09-27 15:10:29 -07001975 if ((quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
1976 (smmu->model == QCOM_SMMUV500))
1977 quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001978
Patrick Dalyda765c62017-09-11 16:31:07 -07001979 tlb = &arm_smmu_gather_ops;
Patrick Daly83174c12017-10-26 12:31:15 -07001980 if (smmu->options & ARM_SMMU_OPT_MMU500_ERRATA1)
Patrick Dalyda765c62017-09-11 16:31:07 -07001981 tlb = &qsmmuv500_errata1_smmu_gather_ops;
1982
Charan Teja Reddy8e4c3bdc2018-03-02 14:15:21 +05301983 if (arm_smmu_is_slave_side_secure(smmu_domain))
1984 tlb = &msm_smmu_gather_ops;
1985
Patrick Dalyda688822017-05-17 20:12:48 -07001986 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1987 if (ret < 0)
1988 goto out_unlock;
1989 cfg->cbndx = ret;
1990
Robin Murphyb7862e32016-04-13 18:13:03 +01001991 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001992 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1993 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001994 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001995 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001996 }
1997
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301998 if (arm_smmu_is_slave_side_secure(smmu_domain)) {
1999 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
2000 .quirks = quirks,
2001 .pgsize_bitmap = smmu->pgsize_bitmap,
2002 .arm_msm_secure_cfg = {
2003 .sec_id = smmu->sec_id,
2004 .cbndx = cfg->cbndx,
2005 },
Charan Teja Reddy8e4c3bdc2018-03-02 14:15:21 +05302006 .tlb = tlb,
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302007 .iommu_dev = smmu->dev,
2008 };
2009 fmt = ARM_MSM_SECURE;
2010 } else {
2011 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
2012 .quirks = quirks,
2013 .pgsize_bitmap = smmu->pgsize_bitmap,
2014 .ias = ias,
2015 .oas = oas,
2016 .tlb = tlb,
2017 .iommu_dev = smmu->dev,
2018 };
2019 }
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002020
Will Deacon518f7132014-11-14 17:17:54 +00002021 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08002022 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07002023 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
2024 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002025 if (!pgtbl_ops) {
2026 ret = -ENOMEM;
2027 goto out_clear_smmu;
2028 }
2029
Patrick Dalyc11d1082016-09-01 15:52:44 -07002030 /*
2031 * assign any page table memory that might have been allocated
2032 * during alloc_io_pgtable_ops
2033 */
Patrick Dalye271f212016-10-04 13:24:49 -07002034 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002035 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002036 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002037
Robin Murphyd5466352016-05-09 17:20:09 +01002038 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07002039 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01002040 domain->geometry.aperture_end = (1UL << ias) - 1;
2041 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00002042
Patrick Dalyc190d932016-08-30 17:23:28 -07002043 /* Assign an asid */
2044 ret = arm_smmu_init_asid(domain, smmu);
2045 if (ret)
2046 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00002047
Patrick Dalyc190d932016-08-30 17:23:28 -07002048 if (!dynamic) {
2049 /* Initialise the context bank with our page table cfg */
2050 arm_smmu_init_context_bank(smmu_domain,
Robin Murphy6549a1f2017-08-08 14:56:14 +01002051 &smmu_domain->pgtbl_cfg);
Patrick Dalyad521082018-04-06 18:07:13 -07002052 arm_smmu_arch_init_context_bank(smmu_domain, dev);
Robin Murphy6549a1f2017-08-08 14:56:14 +01002053 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302054 /* for slave side secure, we may have to force the pagetable
2055 * format to V8L.
2056 */
2057 ret = arm_smmu_set_pt_format(smmu_domain,
2058 &smmu_domain->pgtbl_cfg);
2059 if (ret)
2060 goto out_clear_smmu;
Patrick Dalyc190d932016-08-30 17:23:28 -07002061
2062 /*
2063 * Request context fault interrupt. Do this last to avoid the
2064 * handler seeing a half-initialised domain state.
2065 */
2066 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
2067 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08002068 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
2069 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07002070 if (ret < 0) {
2071 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
2072 cfg->irptndx, irq);
2073 cfg->irptndx = INVALID_IRPTNDX;
2074 goto out_clear_smmu;
2075 }
2076 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01002077 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002078 }
Will Deacon518f7132014-11-14 17:17:54 +00002079 mutex_unlock(&smmu_domain->init_mutex);
2080
2081 /* Publish page table ops for map/unmap */
2082 smmu_domain->pgtbl_ops = pgtbl_ops;
Shiraz Hashimeca8c2e2018-01-15 20:08:38 +05302083 if (arm_smmu_is_slave_side_secure(smmu_domain) &&
2084 !arm_smmu_master_attached(smmu, dev->iommu_fwspec))
2085 arm_smmu_restore_sec_cfg(smmu, cfg->cbndx);
2086
Will Deacona9a1b0b2014-05-01 18:05:08 +01002087 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002088
Will Deacon518f7132014-11-14 17:17:54 +00002089out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06002090 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002091 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002092out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00002093 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002094 return ret;
2095}
2096
Patrick Daly77db4f92016-10-14 15:34:10 -07002097static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
2098{
2099 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
2100 smmu_domain->cfg.cbndx = INVALID_CBNDX;
2101 smmu_domain->secure_vmid = VMID_INVAL;
2102}
2103
Will Deacon45ae7cf2013-06-24 18:31:25 +01002104static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
2105{
Joerg Roedel1d672632015-03-26 13:43:10 +01002106 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01002107 struct arm_smmu_device *smmu = smmu_domain->smmu;
2108 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002109 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07002110 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002111 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002112
Robin Murphy7e96c742016-09-14 15:26:46 +01002113 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002114 return;
2115
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002116 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002117 if (ret) {
2118 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
2119 smmu);
2120 return;
2121 }
2122
Patrick Dalyc190d932016-08-30 17:23:28 -07002123 dynamic = is_dynamic_domain(domain);
2124 if (dynamic) {
2125 arm_smmu_free_asid(domain);
2126 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002127 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07002128 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002129 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002130 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002131 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07002132 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07002133 return;
2134 }
2135
Will Deacon518f7132014-11-14 17:17:54 +00002136 /*
2137 * Disable the context bank and free the page tables before freeing
2138 * it.
2139 */
Robin Murphy6549a1f2017-08-08 14:56:14 +01002140 smmu->cbs[cfg->cbndx].cfg = NULL;
2141 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01002142
Will Deacon44680ee2014-06-25 11:29:12 +01002143 if (cfg->irptndx != INVALID_IRPTNDX) {
2144 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08002145 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002146 }
2147
Markus Elfring44830b02015-11-06 18:32:41 +01002148 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07002149 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002150 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002151 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002152 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01002153 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05302154 /* As the nonsecure context bank index is any way set to zero,
2155 * so, directly clearing up the secure cb bitmap.
2156 */
2157 if (arm_smmu_is_slave_side_secure(smmu_domain))
2158 __arm_smmu_free_bitmap(smmu->secure_context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002159
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002160 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07002161 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002162}
2163
Joerg Roedel1d672632015-03-26 13:43:10 +01002164static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002165{
2166 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002167
Patrick Daly09801312016-08-29 17:02:52 -07002168 /* Do not support DOMAIN_DMA for now */
2169 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01002170 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002171 /*
2172 * Allocate the domain and initialise some of its data structures.
2173 * We can't really do anything meaningful until we've added a
2174 * master.
2175 */
2176 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
2177 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01002178 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002179
Robin Murphy7e96c742016-09-14 15:26:46 +01002180 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
2181 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00002182 kfree(smmu_domain);
2183 return NULL;
2184 }
2185
Will Deacon518f7132014-11-14 17:17:54 +00002186 mutex_init(&smmu_domain->init_mutex);
2187 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002188 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
2189 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07002190 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002191 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly2d600832018-02-11 15:12:55 -08002192 INIT_LIST_HEAD(&smmu_domain->nonsecure_pool);
Patrick Daly77db4f92016-10-14 15:34:10 -07002193 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01002194
2195 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002196}
2197
Joerg Roedel1d672632015-03-26 13:43:10 +01002198static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002199{
Joerg Roedel1d672632015-03-26 13:43:10 +01002200 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01002201
2202 /*
2203 * Free the domain resources. We assume that all devices have
2204 * already been detached.
2205 */
Robin Murphy9adb9592016-01-26 18:06:36 +00002206 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002207 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002208 kfree(smmu_domain);
2209}
2210
Robin Murphy468f4942016-09-12 17:13:49 +01002211static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
2212{
2213 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01002214 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01002215
2216 if (smr->valid)
2217 reg |= SMR_VALID;
2218 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2219}
2220
Robin Murphya754fd12016-09-12 17:13:50 +01002221static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
2222{
2223 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
2224 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
2225 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
Patrick Daly7f377fe2017-10-06 17:37:10 -07002226 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT |
2227 S2CR_SHCFG_NSH << S2CR_SHCFG_SHIFT;
Robin Murphya754fd12016-09-12 17:13:50 +01002228
2229 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2230}
2231
2232static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
2233{
2234 arm_smmu_write_s2cr(smmu, idx);
2235 if (smmu->smrs)
2236 arm_smmu_write_smr(smmu, idx);
2237}
2238
Robin Murphy6668f692016-09-12 17:13:54 +01002239static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01002240{
2241 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01002242 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002243
Robin Murphy6668f692016-09-12 17:13:54 +01002244 /* Stream indexing is blissfully easy */
2245 if (!smrs)
2246 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01002247
Robin Murphy6668f692016-09-12 17:13:54 +01002248 /* Validating SMRs is... less so */
2249 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2250 if (!smrs[i].valid) {
2251 /*
2252 * Note the first free entry we come across, which
2253 * we'll claim in the end if nothing else matches.
2254 */
2255 if (free_idx < 0)
2256 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01002257 continue;
2258 }
Robin Murphy6668f692016-09-12 17:13:54 +01002259 /*
2260 * If the new entry is _entirely_ matched by an existing entry,
2261 * then reuse that, with the guarantee that there also cannot
2262 * be any subsequent conflicting entries. In normal use we'd
2263 * expect simply identical entries for this case, but there's
2264 * no harm in accommodating the generalisation.
2265 */
2266 if ((mask & smrs[i].mask) == mask &&
2267 !((id ^ smrs[i].id) & ~smrs[i].mask))
2268 return i;
2269 /*
2270 * If the new entry has any other overlap with an existing one,
2271 * though, then there always exists at least one stream ID
2272 * which would cause a conflict, and we can't allow that risk.
2273 */
2274 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
2275 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002276 }
2277
Robin Murphy6668f692016-09-12 17:13:54 +01002278 return free_idx;
2279}
2280
2281static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
2282{
2283 if (--smmu->s2crs[idx].count)
2284 return false;
2285
2286 smmu->s2crs[idx] = s2cr_init_val;
2287 if (smmu->smrs)
2288 smmu->smrs[idx].valid = false;
2289
2290 return true;
2291}
2292
2293static int arm_smmu_master_alloc_smes(struct device *dev)
2294{
Robin Murphy06e393e2016-09-12 17:13:55 +01002295 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2296 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01002297 struct arm_smmu_device *smmu = cfg->smmu;
2298 struct arm_smmu_smr *smrs = smmu->smrs;
2299 struct iommu_group *group;
2300 int i, idx, ret;
2301
2302 mutex_lock(&smmu->stream_map_mutex);
2303 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01002304 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002305 u16 sid = fwspec->ids[i];
2306 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
2307
Robin Murphy6668f692016-09-12 17:13:54 +01002308 if (idx != INVALID_SMENDX) {
2309 ret = -EEXIST;
2310 goto out_err;
2311 }
2312
Robin Murphy7e96c742016-09-14 15:26:46 +01002313 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01002314 if (ret < 0)
2315 goto out_err;
2316
2317 idx = ret;
2318 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002319 smrs[idx].id = sid;
2320 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01002321 smrs[idx].valid = true;
2322 }
2323 smmu->s2crs[idx].count++;
2324 cfg->smendx[i] = (s16)idx;
2325 }
2326
2327 group = iommu_group_get_for_dev(dev);
2328 if (!group)
2329 group = ERR_PTR(-ENOMEM);
2330 if (IS_ERR(group)) {
2331 ret = PTR_ERR(group);
2332 goto out_err;
2333 }
2334 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01002335
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002336 /* It worked! Don't poke the actual hardware until we've attached */
2337 for_each_cfg_sme(fwspec, i, idx)
Robin Murphy6668f692016-09-12 17:13:54 +01002338 smmu->s2crs[idx].group = group;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002339
Robin Murphy6668f692016-09-12 17:13:54 +01002340 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002341 return 0;
2342
Robin Murphy6668f692016-09-12 17:13:54 +01002343out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01002344 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01002345 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01002346 cfg->smendx[i] = INVALID_SMENDX;
2347 }
Robin Murphy6668f692016-09-12 17:13:54 +01002348 mutex_unlock(&smmu->stream_map_mutex);
2349 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002350}
2351
Robin Murphy06e393e2016-09-12 17:13:55 +01002352static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002353{
Robin Murphy06e393e2016-09-12 17:13:55 +01002354 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
2355 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01002356 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01002357
Robin Murphy6668f692016-09-12 17:13:54 +01002358 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002359 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002360 if (arm_smmu_free_sme(smmu, idx))
2361 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01002362 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002363 }
Robin Murphy6668f692016-09-12 17:13:54 +01002364 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002365}
2366
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002367static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
2368 struct iommu_fwspec *fwspec)
2369{
2370 struct arm_smmu_device *smmu = smmu_domain->smmu;
2371 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2372 int i, idx;
2373 const struct iommu_gather_ops *tlb;
2374
2375 tlb = smmu_domain->pgtbl_cfg.tlb;
2376
2377 mutex_lock(&smmu->stream_map_mutex);
2378 for_each_cfg_sme(fwspec, i, idx) {
2379 WARN_ON(s2cr[idx].attach_count == 0);
2380 s2cr[idx].attach_count -= 1;
2381
2382 if (s2cr[idx].attach_count > 0)
2383 continue;
2384
2385 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2386 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2387 }
2388 mutex_unlock(&smmu->stream_map_mutex);
2389
2390 /* Ensure there are no stale mappings for this context bank */
2391 tlb->tlb_flush_all(smmu_domain);
2392}
2393
Will Deacon45ae7cf2013-06-24 18:31:25 +01002394static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01002395 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002396{
Will Deacon44680ee2014-06-25 11:29:12 +01002397 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002398 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2399 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2400 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002401 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002402
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002403 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002404 for_each_cfg_sme(fwspec, i, idx) {
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002405 if (s2cr[idx].attach_count++ > 0)
Robin Murphy6668f692016-09-12 17:13:54 +01002406 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002407
2408 s2cr[idx].type = type;
2409 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2410 s2cr[idx].cbndx = cbndx;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002411 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002412 }
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002413 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002414
2415 return 0;
2416}
2417
Patrick Daly09801312016-08-29 17:02:52 -07002418static void arm_smmu_detach_dev(struct iommu_domain *domain,
2419 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002420{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002421 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002422 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002423 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly09801312016-08-29 17:02:52 -07002424 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002425 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002426
2427 if (dynamic)
2428 return;
2429
Patrick Daly09801312016-08-29 17:02:52 -07002430 if (!smmu) {
2431 dev_err(dev, "Domain not attached; cannot detach!\n");
2432 return;
2433 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002434
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302435 if (atomic_domain)
2436 arm_smmu_power_on_atomic(smmu->pwr);
2437 else
2438 arm_smmu_power_on(smmu->pwr);
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002439
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302440 arm_smmu_domain_remove_master(smmu_domain, fwspec);
2441 arm_smmu_power_off(smmu->pwr);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002442}
2443
Patrick Dalye271f212016-10-04 13:24:49 -07002444static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002445{
Patrick Dalye271f212016-10-04 13:24:49 -07002446 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002447 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2448 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2449 int source_vmid = VMID_HLOS;
2450 struct arm_smmu_pte_info *pte_info, *temp;
2451
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302452 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -07002453 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002454
Patrick Dalye271f212016-10-04 13:24:49 -07002455 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002456 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2457 PAGE_SIZE, &source_vmid, 1,
2458 dest_vmids, dest_perms, 2);
2459 if (WARN_ON(ret))
2460 break;
2461 }
2462
2463 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2464 entry) {
2465 list_del(&pte_info->entry);
2466 kfree(pte_info);
2467 }
Patrick Dalye271f212016-10-04 13:24:49 -07002468 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002469}
2470
2471static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2472{
2473 int ret;
2474 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002475 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002476 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2477 struct arm_smmu_pte_info *pte_info, *temp;
2478
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302479 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002480 return;
2481
2482 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2483 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2484 PAGE_SIZE, source_vmlist, 2,
2485 &dest_vmids, &dest_perms, 1);
2486 if (WARN_ON(ret))
2487 break;
2488 free_pages_exact(pte_info->virt_addr, pte_info->size);
2489 }
2490
2491 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2492 entry) {
2493 list_del(&pte_info->entry);
2494 kfree(pte_info);
2495 }
2496}
2497
2498static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2499{
2500 struct arm_smmu_domain *smmu_domain = cookie;
2501 struct arm_smmu_pte_info *pte_info;
2502
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302503 if (smmu_domain->slave_side_secure ||
2504 !arm_smmu_has_secure_vmid(smmu_domain)) {
2505 if (smmu_domain->slave_side_secure)
2506 WARN(1, "slave side secure is enforced\n");
2507 else
2508 WARN(1, "Invalid VMID is set !!\n");
2509 return;
2510 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002511
2512 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2513 if (!pte_info)
2514 return;
2515
2516 pte_info->virt_addr = addr;
2517 pte_info->size = size;
2518 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2519}
2520
2521static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2522{
2523 struct arm_smmu_domain *smmu_domain = cookie;
2524 struct arm_smmu_pte_info *pte_info;
2525
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302526 if (smmu_domain->slave_side_secure ||
2527 !arm_smmu_has_secure_vmid(smmu_domain)) {
2528 if (smmu_domain->slave_side_secure)
2529 WARN(1, "slave side secure is enforced\n");
2530 else
2531 WARN(1, "Invalid VMID is set !!\n");
2532 return -EINVAL;
2533 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002534
2535 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2536 if (!pte_info)
2537 return -ENOMEM;
2538 pte_info->virt_addr = addr;
2539 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2540 return 0;
2541}
2542
Patrick Daly2d600832018-02-11 15:12:55 -08002543static void arm_smmu_prealloc_memory(struct arm_smmu_domain *smmu_domain,
Patrick Dalya7414b12018-03-21 14:30:31 -07002544 size_t size, struct list_head *pool)
Patrick Daly2d600832018-02-11 15:12:55 -08002545{
Patrick Daly2d600832018-02-11 15:12:55 -08002546 int i;
Patrick Dalya7414b12018-03-21 14:30:31 -07002547 u32 nr = 0;
Patrick Daly2d600832018-02-11 15:12:55 -08002548 struct page *page;
2549
2550 if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
2551 arm_smmu_has_secure_vmid(smmu_domain))
2552 return;
2553
Patrick Daly2d600832018-02-11 15:12:55 -08002554 /* number of 2nd level pagetable entries */
2555 nr += round_up(size, SZ_1G) >> 30;
2556 /* number of 3rd level pagetabel entries */
2557 nr += round_up(size, SZ_2M) >> 21;
2558
2559 /* Retry later with atomic allocation on error */
2560 for (i = 0; i < nr; i++) {
2561 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
2562 if (!page)
2563 break;
2564 list_add(&page->lru, pool);
2565 }
2566}
2567
Patrick Dalya7414b12018-03-21 14:30:31 -07002568static void arm_smmu_prealloc_memory_sg(struct arm_smmu_domain *smmu_domain,
2569 struct scatterlist *sgl, int nents,
2570 struct list_head *pool)
2571{
2572 int i;
2573 size_t size = 0;
2574 struct scatterlist *sg;
2575
2576 if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
2577 arm_smmu_has_secure_vmid(smmu_domain))
2578 return;
2579
2580 for_each_sg(sgl, sg, nents, i)
2581 size += sg->length;
2582
2583 arm_smmu_prealloc_memory(smmu_domain, size, pool);
2584}
2585
Patrick Daly2d600832018-02-11 15:12:55 -08002586static void arm_smmu_release_prealloc_memory(
2587 struct arm_smmu_domain *smmu_domain, struct list_head *list)
2588{
2589 struct page *page, *tmp;
Patrick Daly2d600832018-02-11 15:12:55 -08002590
2591 list_for_each_entry_safe(page, tmp, list, lru) {
2592 list_del(&page->lru);
2593 __free_pages(page, 0);
Patrick Daly2d600832018-02-11 15:12:55 -08002594 }
2595}
2596
Will Deacon45ae7cf2013-06-24 18:31:25 +01002597static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2598{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002599 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002600 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002601 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002602 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002603 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002604
Robin Murphy06e393e2016-09-12 17:13:55 +01002605 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002606 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2607 return -ENXIO;
2608 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002609
Robin Murphy4f79b142016-10-17 12:06:21 +01002610 /*
2611 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2612 * domains between of_xlate() and add_device() - we have no way to cope
2613 * with that, so until ARM gets converted to rely on groups and default
2614 * domains, just say no (but more politely than by dereferencing NULL).
2615 * This should be at least a WARN_ON once that's sorted.
2616 */
2617 if (!fwspec->iommu_priv)
2618 return -ENODEV;
2619
Robin Murphy06e393e2016-09-12 17:13:55 +01002620 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002621
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002622 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002623 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002624 if (ret)
2625 return ret;
2626
Will Deacon518f7132014-11-14 17:17:54 +00002627 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002628 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002629 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002630 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002631
Patrick Dalyc190d932016-08-30 17:23:28 -07002632 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002633 if (is_dynamic_domain(domain)) {
2634 ret = 0;
2635 goto out_power_off;
2636 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002637
Will Deacon45ae7cf2013-06-24 18:31:25 +01002638 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002639 * Sanity check the domain. We don't support domains across
2640 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002641 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002642 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002643 dev_err(dev,
2644 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002645 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002646 ret = -EINVAL;
2647 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002648 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002649
2650 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002651 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002652
2653out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002654 /*
2655 * Keep an additional vote for non-atomic power until domain is
2656 * detached
2657 */
2658 if (!ret && atomic_domain) {
2659 WARN_ON(arm_smmu_power_on(smmu->pwr));
2660 arm_smmu_power_off_atomic(smmu->pwr);
2661 }
2662
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002663 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002664
Will Deacon45ae7cf2013-06-24 18:31:25 +01002665 return ret;
2666}
2667
Will Deacon45ae7cf2013-06-24 18:31:25 +01002668static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002669 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002670{
Will Deacon518f7132014-11-14 17:17:54 +00002671 int ret;
2672 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002673 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002674 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Patrick Dalya7414b12018-03-21 14:30:31 -07002675 LIST_HEAD(nonsecure_pool);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002676
Will Deacon518f7132014-11-14 17:17:54 +00002677 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002678 return -ENODEV;
2679
Charan Teja Reddy313991e2018-03-12 12:19:31 +05302680 if (arm_smmu_is_slave_side_secure(smmu_domain))
2681 return msm_secure_smmu_map(domain, iova, paddr, size, prot);
2682
Patrick Dalya7414b12018-03-21 14:30:31 -07002683 arm_smmu_prealloc_memory(smmu_domain, size, &nonsecure_pool);
Patrick Dalye271f212016-10-04 13:24:49 -07002684 arm_smmu_secure_domain_lock(smmu_domain);
2685
Will Deacon518f7132014-11-14 17:17:54 +00002686 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya7414b12018-03-21 14:30:31 -07002687 list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
Will Deacon518f7132014-11-14 17:17:54 +00002688 ret = ops->map(ops, iova, paddr, size, prot);
Patrick Dalya7414b12018-03-21 14:30:31 -07002689 list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
Will Deacon518f7132014-11-14 17:17:54 +00002690 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002691
2692 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002693 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002694
Patrick Dalya7414b12018-03-21 14:30:31 -07002695 arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
Will Deacon518f7132014-11-14 17:17:54 +00002696 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002697}
2698
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002699static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2700 dma_addr_t iova)
2701{
2702 uint64_t ret;
2703 unsigned long flags;
2704 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2705 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2706
2707 if (!ops)
2708 return 0;
2709
2710 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2711 ret = ops->iova_to_pte(ops, iova);
2712 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2713 return ret;
2714}
2715
Will Deacon45ae7cf2013-06-24 18:31:25 +01002716static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2717 size_t size)
2718{
Will Deacon518f7132014-11-14 17:17:54 +00002719 size_t ret;
2720 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002721 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002722 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002723
Will Deacon518f7132014-11-14 17:17:54 +00002724 if (!ops)
2725 return 0;
2726
Charan Teja Reddy313991e2018-03-12 12:19:31 +05302727 if (arm_smmu_is_slave_side_secure(smmu_domain))
2728 return msm_secure_smmu_unmap(domain, iova, size);
2729
Patrick Daly8befb662016-08-17 20:03:28 -07002730 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002731 if (ret)
2732 return ret;
2733
Patrick Dalye271f212016-10-04 13:24:49 -07002734 arm_smmu_secure_domain_lock(smmu_domain);
2735
Will Deacon518f7132014-11-14 17:17:54 +00002736 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2737 ret = ops->unmap(ops, iova, size);
2738 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002739
Patrick Daly8befb662016-08-17 20:03:28 -07002740 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002741 /*
2742 * While splitting up block mappings, we might allocate page table
2743 * memory during unmap, so the vmids needs to be assigned to the
2744 * memory here as well.
2745 */
2746 arm_smmu_assign_table(smmu_domain);
2747 /* Also unassign any pages that were free'd during unmap */
2748 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002749 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002750 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002751}
2752
Patrick Daly88d321d2017-02-09 18:02:13 -08002753#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002754static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2755 struct scatterlist *sg, unsigned int nents, int prot)
2756{
2757 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002758 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002759 unsigned long flags;
2760 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2761 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002762 unsigned int idx_start, idx_end;
2763 struct scatterlist *sg_start, *sg_end;
2764 unsigned long __saved_iova_start;
Patrick Daly2d600832018-02-11 15:12:55 -08002765 LIST_HEAD(nonsecure_pool);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002766
2767 if (!ops)
2768 return -ENODEV;
2769
Charan Teja Reddy313991e2018-03-12 12:19:31 +05302770 if (arm_smmu_is_slave_side_secure(smmu_domain))
2771 return msm_secure_smmu_map_sg(domain, iova, sg, nents, prot);
2772
Patrick Dalya7414b12018-03-21 14:30:31 -07002773 arm_smmu_prealloc_memory_sg(smmu_domain, sg, nents, &nonsecure_pool);
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002774 arm_smmu_secure_domain_lock(smmu_domain);
2775
Patrick Daly88d321d2017-02-09 18:02:13 -08002776 __saved_iova_start = iova;
2777 idx_start = idx_end = 0;
2778 sg_start = sg_end = sg;
2779 while (idx_end < nents) {
2780 batch_size = sg_end->length;
2781 sg_end = sg_next(sg_end);
2782 idx_end++;
2783 while ((idx_end < nents) &&
2784 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002785
Patrick Daly88d321d2017-02-09 18:02:13 -08002786 batch_size += sg_end->length;
2787 sg_end = sg_next(sg_end);
2788 idx_end++;
2789 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002790
Patrick Daly88d321d2017-02-09 18:02:13 -08002791 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Daly2d600832018-02-11 15:12:55 -08002792 list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
Patrick Daly88d321d2017-02-09 18:02:13 -08002793 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2794 prot, &size);
Patrick Daly2d600832018-02-11 15:12:55 -08002795 list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
Patrick Daly88d321d2017-02-09 18:02:13 -08002796 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2797 /* Returns 0 on error */
2798 if (!ret) {
2799 size_to_unmap = iova + size - __saved_iova_start;
2800 goto out;
2801 }
2802
2803 iova += batch_size;
2804 idx_start = idx_end;
2805 sg_start = sg_end;
2806 }
2807
2808out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002809 arm_smmu_assign_table(smmu_domain);
2810
Patrick Daly88d321d2017-02-09 18:02:13 -08002811 if (size_to_unmap) {
2812 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2813 iova = __saved_iova_start;
2814 }
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002815 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly2d600832018-02-11 15:12:55 -08002816 arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
Patrick Daly88d321d2017-02-09 18:02:13 -08002817 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002818}
2819
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002820static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002821 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002822{
Joerg Roedel1d672632015-03-26 13:43:10 +01002823 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002824 struct arm_smmu_device *smmu = smmu_domain->smmu;
2825 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2826 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2827 struct device *dev = smmu->dev;
2828 void __iomem *cb_base;
2829 u32 tmp;
2830 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002831 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002832
2833 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2834
Robin Murphy661d9622015-05-27 17:09:34 +01002835 /* ATS1 registers can only be written atomically */
2836 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002837 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002838 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2839 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002840 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002841
2842 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2843 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002844 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002845 dev_err(dev,
2846 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2847 &iova, &phys);
2848 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002849 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002850 }
2851
Robin Murphyf9a05f02016-04-13 18:13:01 +01002852 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002853 if (phys & CB_PAR_F) {
2854 dev_err(dev, "translation fault!\n");
2855 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002856 phys = 0;
2857 } else {
2858 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002859 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002860
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002861 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002862}
2863
Will Deacon45ae7cf2013-06-24 18:31:25 +01002864static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002865 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002866{
Will Deacon518f7132014-11-14 17:17:54 +00002867 phys_addr_t ret;
2868 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002869 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002870 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002871
Will Deacon518f7132014-11-14 17:17:54 +00002872 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002873 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002874
Will Deacon518f7132014-11-14 17:17:54 +00002875 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002876 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002877 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002878
Will Deacon518f7132014-11-14 17:17:54 +00002879 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002880}
2881
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002882/*
2883 * This function can sleep, and cannot be called from atomic context. Will
2884 * power on register block if required. This restriction does not apply to the
2885 * original iova_to_phys() op.
2886 */
2887static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2888 dma_addr_t iova)
2889{
2890 phys_addr_t ret = 0;
2891 unsigned long flags;
2892 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly62ba1922017-08-30 16:47:18 -07002893 struct arm_smmu_device *smmu = smmu_domain->smmu;
2894
2895 if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS)
2896 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002897
Patrick Dalyad441dd2016-09-15 15:50:46 -07002898 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002899 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2900 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002901 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002902 return ret;
2903 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002904
2905 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2906 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2907 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002908 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002909
2910 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2911
2912 return ret;
2913}
2914
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002915static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002916{
Will Deacond0948942014-06-24 17:30:10 +01002917 switch (cap) {
2918 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002919 /*
2920 * Return true here as the SMMU can always send out coherent
2921 * requests.
2922 */
2923 return true;
Will Deacond0948942014-06-24 17:30:10 +01002924 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002925 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002926 case IOMMU_CAP_NOEXEC:
2927 return true;
Will Deacond0948942014-06-24 17:30:10 +01002928 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002929 return false;
Will Deacond0948942014-06-24 17:30:10 +01002930 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002931}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002932
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302933#ifdef CONFIG_MSM_TZ_SMMU
2934static struct arm_smmu_device *arm_smmu_get_by_addr(void __iomem *addr)
2935{
2936 struct arm_smmu_device *smmu;
2937 unsigned long flags;
2938
2939 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2940 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2941 unsigned long base = (unsigned long)smmu->base;
2942 unsigned long mask = ~(smmu->size - 1);
2943
2944 if ((base & mask) == ((unsigned long)addr & mask)) {
2945 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2946 return smmu;
2947 }
2948 }
2949 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2950 return NULL;
2951}
2952
2953bool arm_smmu_skip_write(void __iomem *addr)
2954{
2955 struct arm_smmu_device *smmu;
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05302956 int cb;
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302957
2958 smmu = arm_smmu_get_by_addr(addr);
Shiraz Hashima28a4792018-01-13 00:39:52 +05302959
2960 /* Skip write if smmu not available by now */
2961 if (!smmu)
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302962 return true;
Shiraz Hashima28a4792018-01-13 00:39:52 +05302963
Charan Teja Reddy65ff5e42018-02-19 15:32:28 +05302964 if (!arm_smmu_is_static_cb(smmu))
2965 return false;
2966
Shiraz Hashima28a4792018-01-13 00:39:52 +05302967 /* Do not write to global space */
2968 if (((unsigned long)addr & (smmu->size - 1)) < (smmu->size >> 1))
2969 return true;
2970
2971 /* Finally skip writing to secure CB */
2972 cb = ((unsigned long)addr & ((smmu->size >> 1) - 1)) >> PAGE_SHIFT;
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05302973 if (test_bit(cb, smmu->secure_context_map))
2974 return true;
Shiraz Hashima28a4792018-01-13 00:39:52 +05302975
2976 return false;
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302977}
Charan Teja Reddy313991e2018-03-12 12:19:31 +05302978
2979static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
2980 phys_addr_t paddr, size_t size, int prot)
2981{
2982 size_t ret;
2983 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2984 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2985
2986 ret = ops->map(ops, iova, paddr, size, prot);
2987
2988 return ret;
2989}
2990
2991static size_t msm_secure_smmu_unmap(struct iommu_domain *domain,
2992 unsigned long iova,
2993 size_t size)
2994{
2995 size_t ret;
2996 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2997 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2998
2999 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
3000 if (ret)
3001 return ret;
3002
3003 ret = ops->unmap(ops, iova, size);
3004
3005 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
3006
3007 return ret;
3008}
3009
3010static size_t msm_secure_smmu_map_sg(struct iommu_domain *domain,
3011 unsigned long iova,
3012 struct scatterlist *sg,
3013 unsigned int nents, int prot)
3014{
3015 int ret;
3016 size_t size;
3017 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3018 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3019
3020 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
3021
3022 if (!ret)
3023 msm_secure_smmu_unmap(domain, iova, size);
3024
3025 return ret;
3026}
3027
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05303028#endif
3029
Patrick Daly8e3371a2017-02-13 22:14:53 -08003030static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
3031{
3032 struct arm_smmu_device *smmu;
3033 unsigned long flags;
3034
3035 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
3036 list_for_each_entry(smmu, &arm_smmu_devices, list) {
3037 if (smmu->dev->of_node == np) {
3038 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
3039 return smmu;
3040 }
3041 }
3042 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
3043 return NULL;
3044}
3045
Robin Murphy7e96c742016-09-14 15:26:46 +01003046static int arm_smmu_match_node(struct device *dev, void *data)
3047{
3048 return dev->of_node == data;
3049}
3050
3051static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
3052{
3053 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
3054 np, arm_smmu_match_node);
3055 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08003056 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01003057}
3058
Will Deacon03edb222015-01-19 14:27:33 +00003059static int arm_smmu_add_device(struct device *dev)
3060{
Robin Murphy06e393e2016-09-12 17:13:55 +01003061 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01003062 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01003063 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01003064 int i, ret;
3065
Robin Murphy7e96c742016-09-14 15:26:46 +01003066 if (using_legacy_binding) {
3067 ret = arm_smmu_register_legacy_master(dev, &smmu);
3068 fwspec = dev->iommu_fwspec;
3069 if (ret)
3070 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00003071 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01003072 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
3073 if (!smmu)
3074 return -ENODEV;
3075 } else {
3076 return -ENODEV;
3077 }
Robin Murphyd5b41782016-09-14 15:21:39 +01003078
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003079 ret = arm_smmu_power_on(smmu->pwr);
3080 if (ret)
3081 goto out_free;
3082
Robin Murphyd5b41782016-09-14 15:21:39 +01003083 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01003084 for (i = 0; i < fwspec->num_ids; i++) {
3085 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01003086 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01003087
Robin Murphy06e393e2016-09-12 17:13:55 +01003088 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01003089 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01003090 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003091 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01003092 }
Robin Murphy7e96c742016-09-14 15:26:46 +01003093 if (mask & ~smmu->smr_mask_mask) {
3094 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
3095 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003096 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01003097 }
Robin Murphyd5b41782016-09-14 15:21:39 +01003098 }
Will Deacon03edb222015-01-19 14:27:33 +00003099
Robin Murphy06e393e2016-09-12 17:13:55 +01003100 ret = -ENOMEM;
3101 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
3102 GFP_KERNEL);
3103 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003104 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01003105
3106 cfg->smmu = smmu;
3107 fwspec->iommu_priv = cfg;
3108 while (i--)
3109 cfg->smendx[i] = INVALID_SMENDX;
3110
Robin Murphy6668f692016-09-12 17:13:54 +01003111 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01003112 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003113 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01003114
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003115 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01003116 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01003117
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003118out_pwr_off:
3119 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01003120out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01003121 if (fwspec)
3122 kfree(fwspec->iommu_priv);
3123 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01003124 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00003125}
3126
Will Deacon45ae7cf2013-06-24 18:31:25 +01003127static void arm_smmu_remove_device(struct device *dev)
3128{
Robin Murphy06e393e2016-09-12 17:13:55 +01003129 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003130 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01003131
Robin Murphy06e393e2016-09-12 17:13:55 +01003132 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01003133 return;
Robin Murphya754fd12016-09-12 17:13:50 +01003134
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003135 smmu = fwspec_smmu(fwspec);
3136 if (arm_smmu_power_on(smmu->pwr)) {
3137 WARN_ON(1);
3138 return;
3139 }
3140
Robin Murphy06e393e2016-09-12 17:13:55 +01003141 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01003142 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01003143 kfree(fwspec->iommu_priv);
3144 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003145 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003146}
3147
Joerg Roedelaf659932015-10-21 23:51:41 +02003148static struct iommu_group *arm_smmu_device_group(struct device *dev)
3149{
Robin Murphy06e393e2016-09-12 17:13:55 +01003150 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
3151 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01003152 struct iommu_group *group = NULL;
3153 int i, idx;
3154
Robin Murphy06e393e2016-09-12 17:13:55 +01003155 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01003156 if (group && smmu->s2crs[idx].group &&
3157 group != smmu->s2crs[idx].group)
3158 return ERR_PTR(-EINVAL);
3159
3160 group = smmu->s2crs[idx].group;
3161 }
3162
Patrick Daly03330cc2017-08-11 14:56:38 -07003163 if (!group) {
3164 if (dev_is_pci(dev))
3165 group = pci_device_group(dev);
3166 else
3167 group = generic_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02003168
Patrick Daly03330cc2017-08-11 14:56:38 -07003169 if (IS_ERR(group))
3170 return NULL;
3171 }
3172
3173 if (arm_smmu_arch_device_group(dev, group)) {
3174 iommu_group_put(group);
3175 return ERR_PTR(-EINVAL);
3176 }
Joerg Roedelaf659932015-10-21 23:51:41 +02003177
Joerg Roedelaf659932015-10-21 23:51:41 +02003178 return group;
3179}
3180
Will Deaconc752ce42014-06-25 22:46:31 +01003181static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
3182 enum iommu_attr attr, void *data)
3183{
Joerg Roedel1d672632015-03-26 13:43:10 +01003184 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003185 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01003186
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003187 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01003188 switch (attr) {
3189 case DOMAIN_ATTR_NESTING:
3190 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003191 ret = 0;
3192 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08003193 case DOMAIN_ATTR_PT_BASE_ADDR:
3194 *((phys_addr_t *)data) =
3195 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003196 ret = 0;
3197 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003198 case DOMAIN_ATTR_CONTEXT_BANK:
3199 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003200 if (smmu_domain->smmu == NULL) {
3201 ret = -ENODEV;
3202 break;
3203 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003204 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
3205 ret = 0;
3206 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003207 case DOMAIN_ATTR_TTBR0: {
3208 u64 val;
3209 struct arm_smmu_device *smmu = smmu_domain->smmu;
3210 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003211 if (smmu == NULL) {
3212 ret = -ENODEV;
3213 break;
3214 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003215 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
3216 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
3217 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
3218 << (TTBRn_ASID_SHIFT);
3219 *((u64 *)data) = val;
3220 ret = 0;
3221 break;
3222 }
3223 case DOMAIN_ATTR_CONTEXTIDR:
3224 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003225 if (smmu_domain->smmu == NULL) {
3226 ret = -ENODEV;
3227 break;
3228 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003229 *((u32 *)data) = smmu_domain->cfg.procid;
3230 ret = 0;
3231 break;
3232 case DOMAIN_ATTR_PROCID:
3233 *((u32 *)data) = smmu_domain->cfg.procid;
3234 ret = 0;
3235 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07003236 case DOMAIN_ATTR_DYNAMIC:
3237 *((int *)data) = !!(smmu_domain->attributes
3238 & (1 << DOMAIN_ATTR_DYNAMIC));
3239 ret = 0;
3240 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07003241 case DOMAIN_ATTR_NON_FATAL_FAULTS:
3242 *((int *)data) = !!(smmu_domain->attributes
3243 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
3244 ret = 0;
3245 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07003246 case DOMAIN_ATTR_S1_BYPASS:
3247 *((int *)data) = !!(smmu_domain->attributes
3248 & (1 << DOMAIN_ATTR_S1_BYPASS));
3249 ret = 0;
3250 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07003251 case DOMAIN_ATTR_SECURE_VMID:
3252 *((int *)data) = smmu_domain->secure_vmid;
3253 ret = 0;
3254 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08003255 case DOMAIN_ATTR_PGTBL_INFO: {
3256 struct iommu_pgtbl_info *info = data;
3257
3258 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
3259 ret = -ENODEV;
3260 break;
3261 }
3262 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
3263 ret = 0;
3264 break;
3265 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003266 case DOMAIN_ATTR_FAST:
3267 *((int *)data) = !!(smmu_domain->attributes
3268 & (1 << DOMAIN_ATTR_FAST));
3269 ret = 0;
3270 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003271 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3272 *((int *)data) = !!(smmu_domain->attributes
3273 & (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
3274 ret = 0;
3275 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08003276 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3277 *((int *)data) = !!(smmu_domain->attributes &
3278 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
3279 ret = 0;
3280 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003281 case DOMAIN_ATTR_EARLY_MAP:
3282 *((int *)data) = !!(smmu_domain->attributes
3283 & (1 << DOMAIN_ATTR_EARLY_MAP));
3284 ret = 0;
3285 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07003286 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003287 if (!smmu_domain->smmu) {
3288 ret = -ENODEV;
3289 break;
3290 }
Liam Mark53cf2342016-12-20 11:36:07 -08003291 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
3292 ret = 0;
3293 break;
3294 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
3295 *((int *)data) = !!(smmu_domain->attributes
3296 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07003297 ret = 0;
3298 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303299 case DOMAIN_ATTR_CB_STALL_DISABLE:
3300 *((int *)data) = !!(smmu_domain->attributes
3301 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
3302 ret = 0;
3303 break;
Patrick Daly83174c12017-10-26 12:31:15 -07003304 case DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN:
Patrick Daly23301482017-10-12 16:18:25 -07003305 *((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
3306 ret = 0;
3307 break;
Prakash Guptac2e909a2018-03-29 11:23:06 +05303308 case DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE:
3309 *((int *)data) = !!(smmu_domain->attributes
3310 & (1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE));
3311 ret = 0;
3312 break;
3313
Will Deaconc752ce42014-06-25 22:46:31 +01003314 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003315 ret = -ENODEV;
3316 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003317 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003318 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003319 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003320}
3321
3322static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
3323 enum iommu_attr attr, void *data)
3324{
Will Deacon518f7132014-11-14 17:17:54 +00003325 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01003326 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01003327
Will Deacon518f7132014-11-14 17:17:54 +00003328 mutex_lock(&smmu_domain->init_mutex);
3329
Will Deaconc752ce42014-06-25 22:46:31 +01003330 switch (attr) {
3331 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00003332 if (smmu_domain->smmu) {
3333 ret = -EPERM;
3334 goto out_unlock;
3335 }
3336
Will Deaconc752ce42014-06-25 22:46:31 +01003337 if (*(int *)data)
3338 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
3339 else
3340 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
3341
Will Deacon518f7132014-11-14 17:17:54 +00003342 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003343 case DOMAIN_ATTR_PROCID:
3344 if (smmu_domain->smmu != NULL) {
3345 dev_err(smmu_domain->smmu->dev,
3346 "cannot change procid attribute while attached\n");
3347 ret = -EBUSY;
3348 break;
3349 }
3350 smmu_domain->cfg.procid = *((u32 *)data);
3351 ret = 0;
3352 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07003353 case DOMAIN_ATTR_DYNAMIC: {
3354 int dynamic = *((int *)data);
3355
3356 if (smmu_domain->smmu != NULL) {
3357 dev_err(smmu_domain->smmu->dev,
3358 "cannot change dynamic attribute while attached\n");
3359 ret = -EBUSY;
3360 break;
3361 }
3362
3363 if (dynamic)
3364 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
3365 else
3366 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
3367 ret = 0;
3368 break;
3369 }
3370 case DOMAIN_ATTR_CONTEXT_BANK:
3371 /* context bank can't be set while attached */
3372 if (smmu_domain->smmu != NULL) {
3373 ret = -EBUSY;
3374 break;
3375 }
3376 /* ... and it can only be set for dynamic contexts. */
3377 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
3378 ret = -EINVAL;
3379 break;
3380 }
3381
3382 /* this will be validated during attach */
3383 smmu_domain->cfg.cbndx = *((unsigned int *)data);
3384 ret = 0;
3385 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07003386 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
3387 u32 non_fatal_faults = *((int *)data);
3388
3389 if (non_fatal_faults)
3390 smmu_domain->attributes |=
3391 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
3392 else
3393 smmu_domain->attributes &=
3394 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
3395 ret = 0;
3396 break;
3397 }
Patrick Dalye62d3362016-03-15 18:58:28 -07003398 case DOMAIN_ATTR_S1_BYPASS: {
3399 int bypass = *((int *)data);
3400
3401 /* bypass can't be changed while attached */
3402 if (smmu_domain->smmu != NULL) {
3403 ret = -EBUSY;
3404 break;
3405 }
3406 if (bypass)
3407 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
3408 else
3409 smmu_domain->attributes &=
3410 ~(1 << DOMAIN_ATTR_S1_BYPASS);
3411
3412 ret = 0;
3413 break;
3414 }
Patrick Daly8befb662016-08-17 20:03:28 -07003415 case DOMAIN_ATTR_ATOMIC:
3416 {
3417 int atomic_ctx = *((int *)data);
3418
3419 /* can't be changed while attached */
3420 if (smmu_domain->smmu != NULL) {
3421 ret = -EBUSY;
3422 break;
3423 }
3424 if (atomic_ctx)
3425 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
3426 else
3427 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
3428 break;
3429 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07003430 case DOMAIN_ATTR_SECURE_VMID:
3431 if (smmu_domain->secure_vmid != VMID_INVAL) {
3432 ret = -ENODEV;
3433 WARN(1, "secure vmid already set!");
3434 break;
3435 }
3436 smmu_domain->secure_vmid = *((int *)data);
3437 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003438 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3439 if (*((int *)data))
3440 smmu_domain->attributes |=
3441 1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
3442 ret = 0;
3443 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003444 /*
3445 * fast_smmu_unmap_page() and fast_smmu_alloc_iova() both
3446 * expect that the bus/clock/regulator are already on. Thus also
3447 * force DOMAIN_ATTR_ATOMIC to bet set.
3448 */
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003449 case DOMAIN_ATTR_FAST:
Patrick Daly0df84ac2017-10-11 17:32:41 -07003450 {
3451 int fast = *((int *)data);
3452
3453 if (fast) {
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003454 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003455 smmu_domain->attributes |= 1 << DOMAIN_ATTR_ATOMIC;
3456 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003457 ret = 0;
3458 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003459 }
Patrick Dalyce6786f2016-11-09 14:19:23 -08003460 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3461 /* can't be changed while attached */
3462 if (smmu_domain->smmu != NULL) {
3463 ret = -EBUSY;
3464 break;
3465 }
3466 if (*((int *)data))
3467 smmu_domain->attributes |=
3468 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
3469 ret = 0;
3470 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003471 case DOMAIN_ATTR_EARLY_MAP: {
3472 int early_map = *((int *)data);
3473
3474 ret = 0;
3475 if (early_map) {
3476 smmu_domain->attributes |=
3477 1 << DOMAIN_ATTR_EARLY_MAP;
3478 } else {
3479 if (smmu_domain->smmu)
3480 ret = arm_smmu_enable_s1_translations(
3481 smmu_domain);
3482
3483 if (!ret)
3484 smmu_domain->attributes &=
3485 ~(1 << DOMAIN_ATTR_EARLY_MAP);
3486 }
3487 break;
3488 }
Liam Mark53cf2342016-12-20 11:36:07 -08003489 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
3490 int force_coherent = *((int *)data);
3491
3492 if (smmu_domain->smmu != NULL) {
3493 dev_err(smmu_domain->smmu->dev,
3494 "cannot change force coherent attribute while attached\n");
3495 ret = -EBUSY;
3496 break;
3497 }
3498
3499 if (force_coherent)
3500 smmu_domain->attributes |=
3501 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
3502 else
3503 smmu_domain->attributes &=
3504 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
3505
3506 ret = 0;
3507 break;
3508 }
3509
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303510 case DOMAIN_ATTR_CB_STALL_DISABLE:
3511 if (*((int *)data))
3512 smmu_domain->attributes |=
3513 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
3514 ret = 0;
3515 break;
Prakash Guptac2e909a2018-03-29 11:23:06 +05303516
3517 case DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE: {
3518 int force_iova_guard_page = *((int *)data);
3519
3520 if (smmu_domain->smmu != NULL) {
3521 dev_err(smmu_domain->smmu->dev,
3522 "cannot change force guard page attribute while attached\n");
3523 ret = -EBUSY;
3524 break;
3525 }
3526
3527 if (force_iova_guard_page)
3528 smmu_domain->attributes |=
3529 1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE;
3530 else
3531 smmu_domain->attributes &=
3532 ~(1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE);
3533
3534 ret = 0;
3535 break;
3536 }
3537
Will Deaconc752ce42014-06-25 22:46:31 +01003538 default:
Will Deacon518f7132014-11-14 17:17:54 +00003539 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01003540 }
Will Deacon518f7132014-11-14 17:17:54 +00003541
3542out_unlock:
3543 mutex_unlock(&smmu_domain->init_mutex);
3544 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003545}
3546
Robin Murphy7e96c742016-09-14 15:26:46 +01003547static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
3548{
3549 u32 fwid = 0;
3550
3551 if (args->args_count > 0)
3552 fwid |= (u16)args->args[0];
3553
3554 if (args->args_count > 1)
3555 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
3556
3557 return iommu_fwspec_add_ids(dev, &fwid, 1);
3558}
3559
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003560static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
3561{
3562 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3563 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy6549a1f2017-08-08 14:56:14 +01003564 struct arm_smmu_cb *cb = &smmu->cbs[cfg->cbndx];
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003565 int ret;
3566
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003567 ret = arm_smmu_power_on(smmu->pwr);
3568 if (ret)
3569 return ret;
3570
Robin Murphy6549a1f2017-08-08 14:56:14 +01003571 cb->attributes &= ~(1 << DOMAIN_ATTR_EARLY_MAP);
3572 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003573
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003574 arm_smmu_power_off(smmu->pwr);
3575 return ret;
3576}
3577
Liam Mark3ba41cf2016-12-09 14:39:04 -08003578static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
3579 dma_addr_t iova)
3580{
3581 bool ret;
3582 unsigned long flags;
3583 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3584 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3585
3586 if (!ops)
3587 return false;
3588
3589 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3590 ret = ops->is_iova_coherent(ops, iova);
3591 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3592 return ret;
3593}
3594
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003595static void arm_smmu_trigger_fault(struct iommu_domain *domain,
3596 unsigned long flags)
3597{
3598 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3599 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3600 struct arm_smmu_device *smmu;
3601 void __iomem *cb_base;
3602
3603 if (!smmu_domain->smmu) {
3604 pr_err("Can't trigger faults on non-attached domains\n");
3605 return;
3606 }
3607
3608 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003609 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003610 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003611
3612 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3613 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
3614 flags, cfg->cbndx);
3615 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07003616 /* give the interrupt time to fire... */
3617 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003618
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003619 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003620}
3621
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003622static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3623{
Patrick Dalyda765c62017-09-11 16:31:07 -07003624 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3625 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
3626
3627 tlb->tlb_flush_all(smmu_domain);
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003628}
3629
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003630static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3631{
3632 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3633
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003634 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003635}
3636
3637static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3638{
3639 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3640
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003641 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003642}
3643
Will Deacon518f7132014-11-14 17:17:54 +00003644static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003645 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003646 .domain_alloc = arm_smmu_domain_alloc,
3647 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003648 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003649 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003650 .map = arm_smmu_map,
3651 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003652 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003653 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003654 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003655 .add_device = arm_smmu_add_device,
3656 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003657 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003658 .domain_get_attr = arm_smmu_domain_get_attr,
3659 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003660 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003661 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003662 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003663 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003664 .enable_config_clocks = arm_smmu_enable_config_clocks,
3665 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003666 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003667 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003668};
3669
Patrick Dalyad441dd2016-09-15 15:50:46 -07003670#define IMPL_DEF1_MICRO_MMU_CTRL 0
3671#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3672#define MICRO_MMU_CTRL_IDLE (1 << 3)
3673
3674/* Definitions for implementation-defined registers */
3675#define ACTLR_QCOM_OSH_SHIFT 28
3676#define ACTLR_QCOM_OSH 1
3677
3678#define ACTLR_QCOM_ISH_SHIFT 29
3679#define ACTLR_QCOM_ISH 1
3680
3681#define ACTLR_QCOM_NSH_SHIFT 30
3682#define ACTLR_QCOM_NSH 1
3683
3684static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003685{
3686 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003687 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003688
3689 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3690 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3691 0, 30000)) {
3692 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3693 return -EBUSY;
3694 }
3695
3696 return 0;
3697}
3698
Patrick Dalyad441dd2016-09-15 15:50:46 -07003699static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003700{
3701 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3702 u32 reg;
3703
3704 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3705 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303706
3707 if (arm_smmu_is_static_cb(smmu)) {
3708 phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
3709 smmu->phys_addr;
3710
3711 if (scm_io_write(impl_def1_base_phys +
3712 IMPL_DEF1_MICRO_MMU_CTRL, reg)) {
3713 dev_err(smmu->dev,
3714 "scm_io_write fail. SMMU might not be halted");
3715 return -EINVAL;
3716 }
3717 } else {
3718 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3719 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003720
Patrick Dalyad441dd2016-09-15 15:50:46 -07003721 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003722}
3723
Patrick Dalyad441dd2016-09-15 15:50:46 -07003724static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003725{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003726 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003727}
3728
Patrick Dalyad441dd2016-09-15 15:50:46 -07003729static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003730{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003731 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003732}
3733
Patrick Dalyad441dd2016-09-15 15:50:46 -07003734static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003735{
3736 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3737 u32 reg;
3738
3739 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3740 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303741
3742 if (arm_smmu_is_static_cb(smmu)) {
3743 phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
3744 smmu->phys_addr;
3745
3746 if (scm_io_write(impl_def1_base_phys +
3747 IMPL_DEF1_MICRO_MMU_CTRL, reg))
3748 dev_err(smmu->dev,
3749 "scm_io_write fail. SMMU might not be resumed");
3750 } else {
3751 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3752 }
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003753}
3754
Patrick Dalyad441dd2016-09-15 15:50:46 -07003755static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003756{
3757 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003758 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003759 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003760 /*
3761 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3762 * to prevent table walks with an inconsistent state.
3763 */
3764 for (i = 0; i < smmu->num_context_banks; ++i) {
Patrick Dalyad521082018-04-06 18:07:13 -07003765 struct arm_smmu_cb *cb = &smmu->cbs[i];
3766
Patrick Dalyad441dd2016-09-15 15:50:46 -07003767 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3768 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3769 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
Patrick Dalyad521082018-04-06 18:07:13 -07003770 cb->actlr = val;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003771 }
3772
3773 /* Program implementation defined registers */
3774 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003775 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3776 writel_relaxed(regs[i].value,
3777 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003778 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003779}
3780
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003781static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3782 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003783{
3784 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3785 struct arm_smmu_device *smmu = smmu_domain->smmu;
3786 int ret;
3787 phys_addr_t phys = 0;
3788 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003789 u32 sctlr, sctlr_orig, fsr;
3790 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003791
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003792 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003793 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003794 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003795
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003796 spin_lock_irqsave(&smmu->atos_lock, flags);
3797 cb_base = ARM_SMMU_CB_BASE(smmu) +
3798 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003799
3800 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003801 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003802 qsmmuv2_wait_for_halt(smmu);
3803
3804 /* clear FSR to allow ATOS to log any faults */
3805 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3806 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3807
3808 /* disable stall mode momentarily */
3809 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3810 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3811 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3812
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003813 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003814
3815 /* restore SCTLR */
3816 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3817
3818 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003819 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3820
3821 arm_smmu_power_off(smmu_domain->smmu->pwr);
3822 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003823}
3824
3825struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3826 .device_reset = qsmmuv2_device_reset,
3827 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003828};
3829
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003830static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003831{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003832 int i;
3833 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003834 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003835
Peng Fan3ca37122016-05-03 21:50:30 +08003836 /*
3837 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3838 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3839 * bit is only present in MMU-500r2 onwards.
3840 */
3841 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3842 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3843 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3844 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3845 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3846 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3847 }
3848
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003849 /* Make sure all context banks are disabled and clear CB_FSR */
3850 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy6549a1f2017-08-08 14:56:14 +01003851 void __iomem *cb_base = ARM_SMMU_CB_BASE(smmu) +
3852 ARM_SMMU_CB(smmu, i);
3853
3854 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003855 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003856 /*
3857 * Disable MMU-500's not-particularly-beneficial next-page
3858 * prefetcher for the sake of errata #841119 and #826419.
3859 */
3860 if (smmu->model == ARM_MMU500) {
3861 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3862 reg &= ~ARM_MMU500_ACTLR_CPRE;
3863 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3864 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003865 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003866}
3867
3868static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3869{
3870 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003871 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003872 u32 reg;
3873
3874 /* clear global FSR */
3875 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3876 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3877
Robin Murphy468f4942016-09-12 17:13:49 +01003878 /*
3879 * Reset stream mapping groups: Initial values mark all SMRn as
3880 * invalid and all S2CRn as bypass unless overridden.
3881 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003882 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3883 for (i = 0; i < smmu->num_mapping_groups; ++i)
3884 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003885
Patrick Daly59b6d202017-06-12 13:12:15 -07003886 arm_smmu_context_bank_reset(smmu);
3887 }
Will Deacon1463fe42013-07-31 19:21:27 +01003888
Will Deacon45ae7cf2013-06-24 18:31:25 +01003889 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003890 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3891 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3892
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003893 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003894
Will Deacon45ae7cf2013-06-24 18:31:25 +01003895 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003896 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003897
3898 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003899 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003900
Robin Murphy25a1c962016-02-10 14:25:33 +00003901 /* Enable client access, handling unmatched streams as appropriate */
3902 reg &= ~sCR0_CLIENTPD;
3903 if (disable_bypass)
3904 reg |= sCR0_USFCFG;
3905 else
3906 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003907
3908 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003909 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003910
3911 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003912 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003913
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003914 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3915 reg |= sCR0_VMID16EN;
3916
Patrick Daly7f377fe2017-10-06 17:37:10 -07003917 /* Force bypass transaction to be Non-Shareable & not io-coherent */
3918 reg &= ~(sCR0_SHCFG_MASK << sCR0_SHCFG_SHIFT);
Prakash Gupta673a79f2017-11-16 18:07:00 +05303919 reg |= sCR0_SHCFG_NSH << sCR0_SHCFG_SHIFT;
Patrick Daly7f377fe2017-10-06 17:37:10 -07003920
Will Deacon45ae7cf2013-06-24 18:31:25 +01003921 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003922 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003923 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003924
3925 /* Manage any implementation defined features */
3926 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003927}
3928
3929static int arm_smmu_id_size_to_bits(int size)
3930{
3931 switch (size) {
3932 case 0:
3933 return 32;
3934 case 1:
3935 return 36;
3936 case 2:
3937 return 40;
3938 case 3:
3939 return 42;
3940 case 4:
3941 return 44;
3942 case 5:
3943 default:
3944 return 48;
3945 }
3946}
3947
Patrick Dalyda688822017-05-17 20:12:48 -07003948
3949/*
3950 * Some context banks needs to be transferred from bootloader to HLOS in a way
3951 * that allows ongoing traffic. The current expectation is that these context
3952 * banks operate in bypass mode.
3953 * Additionally, there must be exactly one device in devicetree with stream-ids
3954 * overlapping those used by the bootloader.
3955 */
3956static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3957 struct arm_smmu_device *smmu,
3958 struct device *dev)
3959{
3960 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003961 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003962 u32 i, idx;
3963 int cb = -EINVAL;
3964 bool dynamic;
3965
Patrick Dalye72526b2017-07-18 16:21:44 -07003966 /*
3967 * Dynamic domains have already set cbndx through domain attribute.
3968 * Verify that they picked a valid value.
3969 */
Patrick Dalyda688822017-05-17 20:12:48 -07003970 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07003971 if (dynamic) {
3972 cb = smmu_domain->cfg.cbndx;
3973 if (cb < smmu->num_context_banks)
3974 return cb;
3975 else
3976 return -EINVAL;
3977 }
Patrick Dalyda688822017-05-17 20:12:48 -07003978
3979 mutex_lock(&smmu->stream_map_mutex);
3980 for_each_cfg_sme(fwspec, i, idx) {
3981 if (smmu->s2crs[idx].cb_handoff)
3982 cb = smmu->s2crs[idx].cbndx;
3983 }
3984
Shiraz Hashima28a4792018-01-13 00:39:52 +05303985 if (cb >= 0 && arm_smmu_is_static_cb(smmu)) {
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303986 smmu_domain->slave_side_secure = true;
3987
Shiraz Hashima28a4792018-01-13 00:39:52 +05303988 if (arm_smmu_is_slave_side_secure(smmu_domain))
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05303989 bitmap_set(smmu->secure_context_map, cb, 1);
Shiraz Hashima28a4792018-01-13 00:39:52 +05303990 }
3991
Charan Teja Reddyf0758df2017-09-04 18:52:07 +05303992 if (cb < 0 && !arm_smmu_is_static_cb(smmu)) {
Patrick Dalyda688822017-05-17 20:12:48 -07003993 mutex_unlock(&smmu->stream_map_mutex);
3994 return __arm_smmu_alloc_bitmap(smmu->context_map,
3995 smmu->num_s2_context_banks,
3996 smmu->num_context_banks);
3997 }
3998
3999 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07004000 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304001 if (!arm_smmu_is_static_cb(smmu))
4002 smmu->s2crs[i].cb_handoff = false;
Patrick Dalyda688822017-05-17 20:12:48 -07004003 smmu->s2crs[i].count -= 1;
4004 }
4005 }
4006 mutex_unlock(&smmu->stream_map_mutex);
4007
4008 return cb;
4009}
4010
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004011static void parse_static_cb_cfg(struct arm_smmu_device *smmu)
4012{
4013 u32 idx = 0;
4014 u32 val;
4015 int ret;
4016
4017 if (!(arm_smmu_is_static_cb(smmu) &&
4018 arm_smmu_opt_hibernation(smmu)))
4019 return;
4020
4021 /*
4022 * Context banks may be xpu-protected. Require a devicetree property to
4023 * indicate which context banks HLOS has access to.
4024 */
4025 bitmap_set(smmu->secure_context_map, 0, ARM_SMMU_MAX_CBS);
4026 while (idx < ARM_SMMU_MAX_CBS) {
4027 ret = of_property_read_u32_index(
4028 smmu->dev->of_node, "qcom,static-ns-cbs",
4029 idx++, &val);
4030 if (ret)
4031 break;
4032
4033 bitmap_clear(smmu->secure_context_map, val, 1);
4034 dev_dbg(smmu->dev, "Detected NS context bank: %d\n", idx);
4035 }
4036}
4037
Patrick Dalyda688822017-05-17 20:12:48 -07004038static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
4039{
4040 u32 i, raw_smr, raw_s2cr;
4041 struct arm_smmu_smr smr;
4042 struct arm_smmu_s2cr s2cr;
4043
4044 for (i = 0; i < smmu->num_mapping_groups; i++) {
4045 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
4046 ARM_SMMU_GR0_SMR(i));
4047 if (!(raw_smr & SMR_VALID))
4048 continue;
4049
4050 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
4051 smr.id = (u16)raw_smr;
4052 smr.valid = true;
4053
4054 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
4055 ARM_SMMU_GR0_S2CR(i));
Patrick Daly4ea4bdf2017-08-29 19:24:49 -07004056 memset(&s2cr, 0, sizeof(s2cr));
Patrick Dalyda688822017-05-17 20:12:48 -07004057 s2cr.group = NULL;
4058 s2cr.count = 1;
4059 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
4060 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
4061 S2CR_PRIVCFG_MASK;
4062 s2cr.cbndx = (u8)raw_s2cr;
4063 s2cr.cb_handoff = true;
4064
4065 if (s2cr.type != S2CR_TYPE_TRANS)
4066 continue;
4067
4068 smmu->smrs[i] = smr;
4069 smmu->s2crs[i] = s2cr;
4070 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
4071 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
4072 raw_smr, raw_s2cr, s2cr.cbndx);
4073 }
4074
4075 return 0;
4076}
4077
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004078static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
4079{
4080 struct device *dev = smmu->dev;
4081 int i, ntuples, ret;
4082 u32 *tuples;
4083 struct arm_smmu_impl_def_reg *regs, *regit;
4084
4085 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
4086 return 0;
4087
4088 ntuples /= sizeof(u32);
4089 if (ntuples % 2) {
4090 dev_err(dev,
4091 "Invalid number of attach-impl-defs registers: %d\n",
4092 ntuples);
4093 return -EINVAL;
4094 }
4095
4096 regs = devm_kmalloc(
4097 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
4098 GFP_KERNEL);
4099 if (!regs)
4100 return -ENOMEM;
4101
4102 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
4103 if (!tuples)
4104 return -ENOMEM;
4105
4106 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
4107 tuples, ntuples);
4108 if (ret)
4109 return ret;
4110
4111 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
4112 regit->offset = tuples[i];
4113 regit->value = tuples[i + 1];
4114 }
4115
4116 devm_kfree(dev, tuples);
4117
4118 smmu->impl_def_attach_registers = regs;
4119 smmu->num_impl_def_attach_registers = ntuples / 2;
4120
4121 return 0;
4122}
4123
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004124
4125static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004126{
4127 const char *cname;
4128 struct property *prop;
4129 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004130 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004131
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004132 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004133 of_property_count_strings(dev->of_node, "clock-names");
4134
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004135 if (pwr->num_clocks < 1) {
4136 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004137 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07004138 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004139
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004140 pwr->clocks = devm_kzalloc(
4141 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004142 GFP_KERNEL);
4143
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004144 if (!pwr->clocks)
4145 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004146
4147 i = 0;
4148 of_property_for_each_string(dev->of_node, "clock-names",
4149 prop, cname) {
4150 struct clk *c = devm_clk_get(dev, cname);
4151
4152 if (IS_ERR(c)) {
4153 dev_err(dev, "Couldn't get clock: %s",
4154 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07004155 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004156 }
4157
4158 if (clk_get_rate(c) == 0) {
4159 long rate = clk_round_rate(c, 1000);
4160
4161 clk_set_rate(c, rate);
4162 }
4163
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004164 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004165
4166 ++i;
4167 }
4168 return 0;
4169}
4170
Charan Teja Reddyf8464882017-12-05 20:29:05 +05304171static int regulator_notifier(struct notifier_block *nb,
4172 unsigned long event, void *data)
4173{
4174 int ret = 0;
4175 struct arm_smmu_device *smmu = container_of(nb, struct arm_smmu_device,
4176 regulator_nb);
4177
4178 if (event != REGULATOR_EVENT_PRE_DISABLE &&
4179 event != REGULATOR_EVENT_ENABLE)
4180 return NOTIFY_OK;
4181
4182 ret = arm_smmu_prepare_clocks(smmu->pwr);
4183 if (ret)
4184 goto out;
4185
4186 ret = arm_smmu_power_on_atomic(smmu->pwr);
4187 if (ret)
4188 goto unprepare_clock;
4189
4190 if (event == REGULATOR_EVENT_PRE_DISABLE)
4191 qsmmuv2_halt(smmu);
4192 else if (event == REGULATOR_EVENT_ENABLE) {
Charan Teja Reddyec6f7822018-01-10 17:32:52 +05304193 if (arm_smmu_restore_sec_cfg(smmu, 0))
Charan Teja Reddyf8464882017-12-05 20:29:05 +05304194 goto power_off;
4195 qsmmuv2_resume(smmu);
4196 }
4197power_off:
4198 arm_smmu_power_off_atomic(smmu->pwr);
4199unprepare_clock:
4200 arm_smmu_unprepare_clocks(smmu->pwr);
4201out:
4202 return NOTIFY_OK;
4203}
4204
4205static int register_regulator_notifier(struct arm_smmu_device *smmu)
4206{
4207 struct device *dev = smmu->dev;
4208 struct regulator_bulk_data *consumers;
4209 int ret = 0, num_consumers;
4210 struct arm_smmu_power_resources *pwr = smmu->pwr;
4211
4212 if (!(smmu->options & ARM_SMMU_OPT_HALT))
4213 goto out;
4214
4215 num_consumers = pwr->num_gdscs;
4216 consumers = pwr->gdscs;
4217
4218 if (!num_consumers) {
4219 dev_info(dev, "no regulator info exist for %s\n",
4220 dev_name(dev));
4221 goto out;
4222 }
4223
4224 smmu->regulator_nb.notifier_call = regulator_notifier;
4225 /* registering the notifier against one gdsc is sufficient as
4226 * we do enable/disable regulators in group.
4227 */
4228 ret = regulator_register_notifier(consumers[0].consumer,
4229 &smmu->regulator_nb);
4230 if (ret)
4231 dev_err(dev, "Regulator notifier request failed\n");
4232out:
4233 return ret;
4234}
4235
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004236static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004237{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004238 const char *cname;
4239 struct property *prop;
4240 int i, ret = 0;
4241 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004242
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004243 pwr->num_gdscs =
4244 of_property_count_strings(dev->of_node, "qcom,regulator-names");
4245
4246 if (pwr->num_gdscs < 1) {
4247 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004248 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004249 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004250
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004251 pwr->gdscs = devm_kzalloc(
4252 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
4253
4254 if (!pwr->gdscs)
4255 return -ENOMEM;
4256
Prakash Guptafad87ca2017-05-16 12:13:02 +05304257 if (!of_property_read_u32(dev->of_node,
4258 "qcom,deferred-regulator-disable-delay",
4259 &(pwr->regulator_defer)))
4260 dev_info(dev, "regulator defer delay %d\n",
4261 pwr->regulator_defer);
4262
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004263 i = 0;
4264 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
4265 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07004266 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004267
4268 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
4269 return ret;
4270}
4271
4272static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
4273{
4274 struct device *dev = pwr->dev;
4275
4276 /* We don't want the bus APIs to print an error message */
4277 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
4278 dev_dbg(dev, "No bus scaling info\n");
4279 return 0;
4280 }
4281
4282 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
4283 if (!pwr->bus_dt_data) {
4284 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
4285 return -EINVAL;
4286 }
4287
4288 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
4289 if (!pwr->bus_client) {
4290 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004291 return -EINVAL;
4292 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004293
4294 return 0;
4295}
4296
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004297/*
4298 * Cleanup done by devm. Any non-devm resources must clean up themselves.
4299 */
4300static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
4301 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07004302{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004303 struct arm_smmu_power_resources *pwr;
4304 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07004305
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004306 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
4307 if (!pwr)
4308 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07004309
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004310 pwr->dev = &pdev->dev;
4311 pwr->pdev = pdev;
4312 mutex_init(&pwr->power_lock);
4313 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07004314
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004315 ret = arm_smmu_init_clocks(pwr);
4316 if (ret)
4317 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004318
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004319 ret = arm_smmu_init_regulators(pwr);
4320 if (ret)
4321 return ERR_PTR(ret);
4322
4323 ret = arm_smmu_init_bus_scaling(pwr);
4324 if (ret)
4325 return ERR_PTR(ret);
4326
4327 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07004328}
4329
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004330/*
Patrick Dalyabeee952017-04-13 18:14:59 -07004331 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004332 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004333static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004334{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004335 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004336}
4337
Will Deacon45ae7cf2013-06-24 18:31:25 +01004338static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
4339{
4340 unsigned long size;
4341 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
4342 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01004343 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01004344 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004345
Charan Teja Reddyec6f7822018-01-10 17:32:52 +05304346 if (arm_smmu_restore_sec_cfg(smmu, 0))
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304347 return -ENODEV;
4348
Mitchel Humpherysba822582015-10-20 11:37:41 -07004349 dev_dbg(smmu->dev, "probing hardware configuration...\n");
4350 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01004351 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004352
4353 /* ID0 */
4354 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01004355
4356 /* Restrict available stages based on module parameter */
4357 if (force_stage == 1)
4358 id &= ~(ID0_S2TS | ID0_NTS);
4359 else if (force_stage == 2)
4360 id &= ~(ID0_S1TS | ID0_NTS);
4361
Will Deacon45ae7cf2013-06-24 18:31:25 +01004362 if (id & ID0_S1TS) {
4363 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004364 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004365 }
4366
4367 if (id & ID0_S2TS) {
4368 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004369 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004370 }
4371
4372 if (id & ID0_NTS) {
4373 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004374 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004375 }
4376
4377 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01004378 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004379 dev_err(smmu->dev, "\tno translation support!\n");
4380 return -ENODEV;
4381 }
4382
Robin Murphyb7862e32016-04-13 18:13:03 +01004383 if ((id & ID0_S1TS) &&
4384 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00004385 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004386 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00004387 }
4388
Robin Murphybae2c2d2015-07-29 19:46:05 +01004389 /*
4390 * In order for DMA API calls to work properly, we must defer to what
4391 * the DT says about coherency, regardless of what the hardware claims.
4392 * Fortunately, this also opens up a workaround for systems where the
4393 * ID register value has ended up configured incorrectly.
4394 */
4395 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
4396 cttw_reg = !!(id & ID0_CTTW);
4397 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01004398 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01004399 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004400 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01004401 cttw_dt ? "" : "non-");
4402 if (cttw_dt != cttw_reg)
4403 dev_notice(smmu->dev,
4404 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004405
Robin Murphy53867802016-09-12 17:13:48 +01004406 /* Max. number of entries we have for stream matching/indexing */
4407 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
4408 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004409 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01004410 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08004411 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004412
4413 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01004414 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
4415 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004416 dev_err(smmu->dev,
4417 "stream-matching supported, but no SMRs present!\n");
4418 return -ENODEV;
4419 }
4420
Robin Murphy53867802016-09-12 17:13:48 +01004421 /*
4422 * SMR.ID bits may not be preserved if the corresponding MASK
4423 * bits are set, so check each one separately. We can reject
4424 * masters later if they try to claim IDs outside these masks.
4425 */
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304426 if (!arm_smmu_is_static_cb(smmu)) {
4427 for (i = 0; i < size; i++) {
4428 smr = readl_relaxed(
4429 gr0_base + ARM_SMMU_GR0_SMR(i));
4430 if (!(smr & SMR_VALID))
4431 break;
4432 }
4433 if (i == size) {
4434 dev_err(smmu->dev,
4435 "Unable to compute streamid_masks\n");
4436 return -ENODEV;
4437 }
4438
4439 smr = smmu->streamid_mask << SMR_ID_SHIFT;
4440 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
Patrick Daly937de532016-12-12 18:44:09 -08004441 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304442 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08004443
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304444 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
4445 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
4446 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
4447 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
4448 } else {
4449 smmu->smr_mask_mask = SMR_MASK_MASK;
4450 smmu->streamid_mask = SID_MASK;
4451 }
Dhaval Patel031d7462015-05-09 14:47:29 -07004452
Robin Murphy468f4942016-09-12 17:13:49 +01004453 /* Zero-initialised to mark as invalid */
4454 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
4455 GFP_KERNEL);
4456 if (!smmu->smrs)
4457 return -ENOMEM;
4458
Robin Murphy53867802016-09-12 17:13:48 +01004459 dev_notice(smmu->dev,
4460 "\tstream matching with %lu register groups, mask 0x%x",
4461 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004462 }
Robin Murphya754fd12016-09-12 17:13:50 +01004463 /* s2cr->type == 0 means translation, so initialise explicitly */
4464 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
4465 GFP_KERNEL);
4466 if (!smmu->s2crs)
4467 return -ENOMEM;
4468 for (i = 0; i < size; i++)
4469 smmu->s2crs[i] = s2cr_init_val;
4470
Robin Murphy53867802016-09-12 17:13:48 +01004471 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01004472 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004473
Robin Murphy7602b872016-04-28 17:12:09 +01004474 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
4475 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
4476 if (!(id & ID0_PTFS_NO_AARCH32S))
4477 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
4478 }
4479
Will Deacon45ae7cf2013-06-24 18:31:25 +01004480 /* ID1 */
4481 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01004482 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004483
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01004484 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00004485 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01004486 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01004487 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07004488 dev_warn(smmu->dev,
4489 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
4490 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004491
Will Deacon518f7132014-11-14 17:17:54 +00004492 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004493 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
4494 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
4495 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
4496 return -ENODEV;
4497 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07004498 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01004499 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01004500 /*
4501 * Cavium CN88xx erratum #27704.
4502 * Ensure ASID and VMID allocation is unique across all SMMUs in
4503 * the system.
4504 */
4505 if (smmu->model == CAVIUM_SMMUV2) {
4506 smmu->cavium_id_base =
4507 atomic_add_return(smmu->num_context_banks,
4508 &cavium_smmu_context_count);
4509 smmu->cavium_id_base -= smmu->num_context_banks;
4510 }
Robin Murphy6549a1f2017-08-08 14:56:14 +01004511 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
4512 sizeof(*smmu->cbs), GFP_KERNEL);
4513 if (!smmu->cbs)
4514 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004515
4516 /* ID2 */
4517 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
4518 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004519 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004520
Will Deacon518f7132014-11-14 17:17:54 +00004521 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01004522 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004523 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004524
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08004525 if (id & ID2_VMID16)
4526 smmu->features |= ARM_SMMU_FEAT_VMID16;
4527
Robin Murphyf1d84542015-03-04 16:41:05 +00004528 /*
4529 * What the page table walker can address actually depends on which
4530 * descriptor format is in use, but since a) we don't know that yet,
4531 * and b) it can vary per context bank, this will have to do...
4532 */
4533 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
4534 dev_warn(smmu->dev,
4535 "failed to set DMA mask for table walker\n");
4536
Robin Murphyb7862e32016-04-13 18:13:03 +01004537 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00004538 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01004539 if (smmu->version == ARM_SMMU_V1_64K)
4540 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004541 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004542 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00004543 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00004544 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01004545 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00004546 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01004547 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00004548 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01004549 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004550 }
4551
Robin Murphy7602b872016-04-28 17:12:09 +01004552 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01004553 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01004554 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01004555 if (smmu->features &
4556 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01004557 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01004558 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01004559 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01004560 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01004561 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01004562
Robin Murphyd5466352016-05-09 17:20:09 +01004563 if (arm_smmu_ops.pgsize_bitmap == -1UL)
4564 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
4565 else
4566 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004567 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01004568 smmu->pgsize_bitmap);
4569
Will Deacon518f7132014-11-14 17:17:54 +00004570
Will Deacon28d60072014-09-01 16:24:48 +01004571 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004572 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
4573 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004574
4575 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004576 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
4577 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004578
Will Deacon45ae7cf2013-06-24 18:31:25 +01004579 return 0;
4580}
4581
Robin Murphy67b65a32016-04-13 18:12:57 +01004582struct arm_smmu_match_data {
4583 enum arm_smmu_arch_version version;
4584 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004585 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01004586};
4587
Patrick Dalyd7476202016-09-08 18:23:28 -07004588#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
4589static struct arm_smmu_match_data name = { \
4590.version = ver, \
4591.model = imp, \
4592.arch_ops = ops, \
4593} \
Robin Murphy67b65a32016-04-13 18:12:57 +01004594
Patrick Daly1f8a2882016-09-12 17:32:05 -07004595struct arm_smmu_arch_ops qsmmuv500_arch_ops;
4596
Patrick Dalyd7476202016-09-08 18:23:28 -07004597ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
4598ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
4599ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
4600ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
4601ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07004602ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004603ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
4604 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01004605
Joerg Roedel09b52692014-10-02 12:24:45 +02004606static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01004607 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
4608 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
4609 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01004610 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01004611 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01004612 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07004613 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07004614 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01004615 { },
4616};
4617MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
4618
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304619#ifdef CONFIG_MSM_TZ_SMMU
4620int register_iommu_sec_ptbl(void)
4621{
4622 struct device_node *np;
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004623
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304624 for_each_matching_node(np, arm_smmu_of_match)
4625 if (of_find_property(np, "qcom,tz-device-id", NULL) &&
4626 of_device_is_available(np))
4627 break;
4628 if (!np)
4629 return -ENODEV;
4630
4631 of_node_put(np);
4632
4633 return msm_iommu_sec_pgtbl_init();
4634}
4635#endif
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004636static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
4637{
4638 if (!dev->iommu_fwspec)
4639 of_iommu_configure(dev, dev->of_node);
4640 return 0;
4641}
4642
Patrick Daly000a2f22017-02-13 22:18:12 -08004643static int arm_smmu_add_device_fixup(struct device *dev, void *data)
4644{
4645 struct iommu_ops *ops = data;
4646
4647 ops->add_device(dev);
4648 return 0;
4649}
4650
Patrick Daly1f8a2882016-09-12 17:32:05 -07004651static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004652static int arm_smmu_device_dt_probe(struct platform_device *pdev)
4653{
Robin Murphy67b65a32016-04-13 18:12:57 +01004654 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004655 struct resource *res;
4656 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004657 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01004658 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01004659 bool legacy_binding;
4660
4661 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
4662 if (legacy_binding && !using_generic_binding) {
4663 if (!using_legacy_binding)
4664 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
4665 using_legacy_binding = true;
4666 } else if (!legacy_binding && !using_legacy_binding) {
4667 using_generic_binding = true;
4668 } else {
4669 dev_err(dev, "not probing due to mismatched DT properties\n");
4670 return -ENODEV;
4671 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004672
4673 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4674 if (!smmu) {
4675 dev_err(dev, "failed to allocate arm_smmu_device\n");
4676 return -ENOMEM;
4677 }
4678 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08004679 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07004680 idr_init(&smmu->asid_idr);
4681 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004682
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004683 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01004684 smmu->version = data->version;
4685 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004686 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01004687
Will Deacon45ae7cf2013-06-24 18:31:25 +01004688 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Prakash Guptaa87818d2018-02-09 19:24:02 +05304689 if (res == NULL) {
4690 dev_err(dev, "no MEM resource info\n");
4691 return -EINVAL;
4692 }
4693
4694 smmu->phys_addr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01004695 smmu->base = devm_ioremap_resource(dev, res);
4696 if (IS_ERR(smmu->base))
4697 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004698 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004699
4700 if (of_property_read_u32(dev->of_node, "#global-interrupts",
4701 &smmu->num_global_irqs)) {
4702 dev_err(dev, "missing #global-interrupts property\n");
4703 return -ENODEV;
4704 }
4705
4706 num_irqs = 0;
4707 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
4708 num_irqs++;
4709 if (num_irqs > smmu->num_global_irqs)
4710 smmu->num_context_irqs++;
4711 }
4712
Andreas Herrmann44a08de2013-10-01 13:39:07 +01004713 if (!smmu->num_context_irqs) {
4714 dev_err(dev, "found %d interrupts but expected at least %d\n",
4715 num_irqs, smmu->num_global_irqs + 1);
4716 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004717 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004718
4719 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
4720 GFP_KERNEL);
4721 if (!smmu->irqs) {
4722 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
4723 return -ENOMEM;
4724 }
4725
4726 for (i = 0; i < num_irqs; ++i) {
4727 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07004728
Will Deacon45ae7cf2013-06-24 18:31:25 +01004729 if (irq < 0) {
4730 dev_err(dev, "failed to get irq index %d\n", i);
4731 return -ENODEV;
4732 }
4733 smmu->irqs[i] = irq;
4734 }
4735
Dhaval Patel031d7462015-05-09 14:47:29 -07004736 parse_driver_options(smmu);
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004737 parse_static_cb_cfg(smmu);
Dhaval Patel031d7462015-05-09 14:47:29 -07004738
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004739 smmu->pwr = arm_smmu_init_power_resources(pdev);
4740 if (IS_ERR(smmu->pwr))
4741 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004742
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004743 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004744 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004745 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004746
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304747 smmu->sec_id = msm_dev_to_device_id(dev);
Charan Teja Reddy65ff5e42018-02-19 15:32:28 +05304748 INIT_LIST_HEAD(&smmu->list);
4749 spin_lock(&arm_smmu_devices_lock);
4750 list_add(&smmu->list, &arm_smmu_devices);
4751 spin_unlock(&arm_smmu_devices_lock);
4752
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004753 err = arm_smmu_device_cfg_probe(smmu);
4754 if (err)
4755 goto out_power_off;
4756
Patrick Dalyda688822017-05-17 20:12:48 -07004757 err = arm_smmu_handoff_cbs(smmu);
4758 if (err)
4759 goto out_power_off;
4760
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004761 err = arm_smmu_parse_impl_def_registers(smmu);
4762 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004763 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004764
Robin Murphyb7862e32016-04-13 18:13:03 +01004765 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004766 smmu->num_context_banks != smmu->num_context_irqs) {
4767 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004768 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4769 smmu->num_context_irqs, smmu->num_context_banks,
4770 smmu->num_context_banks);
4771 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004772 }
4773
Will Deacon45ae7cf2013-06-24 18:31:25 +01004774 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004775 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4776 NULL, arm_smmu_global_fault,
4777 IRQF_ONESHOT | IRQF_SHARED,
4778 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004779 if (err) {
4780 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4781 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004782 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004783 }
4784 }
4785
Patrick Dalyd7476202016-09-08 18:23:28 -07004786 err = arm_smmu_arch_init(smmu);
4787 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004788 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004789
Robin Murphy06e393e2016-09-12 17:13:55 +01004790 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004791 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004792 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004793 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004794
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004795 /* bus_set_iommu depends on this. */
4796 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4797 arm_smmu_of_iommu_configure_fixup);
4798
Robin Murphy7e96c742016-09-14 15:26:46 +01004799 /* Oh, for a proper bus abstraction */
4800 if (!iommu_present(&platform_bus_type))
4801 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004802 else
4803 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4804 arm_smmu_add_device_fixup);
Charan Teja Reddyf8464882017-12-05 20:29:05 +05304805
4806 err = register_regulator_notifier(smmu);
4807 if (err)
4808 goto out_power_off;
4809
Robin Murphy7e96c742016-09-14 15:26:46 +01004810#ifdef CONFIG_ARM_AMBA
4811 if (!iommu_present(&amba_bustype))
4812 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4813#endif
4814#ifdef CONFIG_PCI
4815 if (!iommu_present(&pci_bus_type)) {
4816 pci_request_acs();
4817 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4818 }
4819#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004820 return 0;
4821
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004822out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004823 arm_smmu_power_off(smmu->pwr);
Charan Teja Reddy65ff5e42018-02-19 15:32:28 +05304824 spin_lock(&arm_smmu_devices_lock);
4825 list_del(&smmu->list);
4826 spin_unlock(&arm_smmu_devices_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004827
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004828out_exit_power_resources:
4829 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004830
Will Deacon45ae7cf2013-06-24 18:31:25 +01004831 return err;
4832}
4833
4834static int arm_smmu_device_remove(struct platform_device *pdev)
4835{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004836 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004837
4838 if (!smmu)
4839 return -ENODEV;
4840
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004841 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004842 return -EINVAL;
4843
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004844 if (!(bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS) &&
4845 (bitmap_empty(smmu->secure_context_map, ARM_SMMU_MAX_CBS) ||
4846 arm_smmu_opt_hibernation(smmu))))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004847 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004848
Patrick Dalyc190d932016-08-30 17:23:28 -07004849 idr_destroy(&smmu->asid_idr);
4850
Will Deacon45ae7cf2013-06-24 18:31:25 +01004851 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004852 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004853 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004854
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004855 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004856
Will Deacon45ae7cf2013-06-24 18:31:25 +01004857 return 0;
4858}
4859
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004860static int arm_smmu_pm_freeze(struct device *dev)
4861{
4862 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
4863
4864 if (!arm_smmu_opt_hibernation(smmu)) {
4865 dev_err(smmu->dev, "Aborting: Hibernation not supported\n");
4866 return -EINVAL;
4867 }
4868 return 0;
4869}
4870
4871static int arm_smmu_pm_restore(struct device *dev)
4872{
4873 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
4874 int ret;
4875
4876 ret = arm_smmu_power_on(smmu->pwr);
4877 if (ret)
4878 return ret;
4879
4880 arm_smmu_device_reset(smmu);
4881 arm_smmu_power_off(smmu->pwr);
4882 return 0;
4883}
4884
4885static const struct dev_pm_ops arm_smmu_pm_ops = {
4886#ifdef CONFIG_PM_SLEEP
4887 .freeze = arm_smmu_pm_freeze,
4888 .restore = arm_smmu_pm_restore,
4889#endif
4890};
4891
Will Deacon45ae7cf2013-06-24 18:31:25 +01004892static struct platform_driver arm_smmu_driver = {
4893 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004894 .name = "arm-smmu",
4895 .of_match_table = of_match_ptr(arm_smmu_of_match),
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004896 .pm = &arm_smmu_pm_ops,
Will Deacon45ae7cf2013-06-24 18:31:25 +01004897 },
4898 .probe = arm_smmu_device_dt_probe,
4899 .remove = arm_smmu_device_remove,
4900};
4901
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004902static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004903static int __init arm_smmu_init(void)
4904{
Robin Murphy7e96c742016-09-14 15:26:46 +01004905 static bool registered;
4906 int ret = 0;
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004907 struct device_node *node;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004908 ktime_t cur;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004909
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004910 if (registered)
4911 return 0;
4912
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004913 cur = ktime_get();
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004914 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4915 if (ret)
4916 return ret;
4917
4918 ret = platform_driver_register(&arm_smmu_driver);
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004919 /* Disable secure usecases if hibernation support is enabled */
4920 node = of_find_compatible_node(NULL, NULL, "qcom,qsmmu-v500");
4921 if (IS_ENABLED(CONFIG_MSM_TZ_SMMU) && node &&
4922 !of_find_property(node, "qcom,hibernation-support", NULL))
4923 ret = register_iommu_sec_ptbl();
4924
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004925 registered = !ret;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004926 trace_smmu_init(ktime_us_delta(ktime_get(), cur));
4927
Robin Murphy7e96c742016-09-14 15:26:46 +01004928 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004929}
4930
4931static void __exit arm_smmu_exit(void)
4932{
4933 return platform_driver_unregister(&arm_smmu_driver);
4934}
4935
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004936subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004937module_exit(arm_smmu_exit);
4938
Robin Murphy7e96c742016-09-14 15:26:46 +01004939static int __init arm_smmu_of_init(struct device_node *np)
4940{
4941 int ret = arm_smmu_init();
4942
4943 if (ret)
4944 return ret;
4945
4946 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4947 return -ENODEV;
4948
4949 return 0;
4950}
4951IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4952IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4953IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4954IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4955IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4956IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004957
Patrick Dalya0fddb62017-03-27 19:26:59 -07004958#define TCU_HW_VERSION_HLOS1 (0x18)
4959
Patrick Daly1f8a2882016-09-12 17:32:05 -07004960#define DEBUG_SID_HALT_REG 0x0
4961#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004962#define DEBUG_SID_HALT_SID_MASK 0x3ff
4963
4964#define DEBUG_VA_ADDR_REG 0x8
4965
4966#define DEBUG_TXN_TRIGG_REG 0x18
4967#define DEBUG_TXN_AXPROT_SHIFT 6
4968#define DEBUG_TXN_AXCACHE_SHIFT 2
4969#define DEBUG_TRX_WRITE (0x1 << 1)
4970#define DEBUG_TXN_READ (0x0 << 1)
4971#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004972
4973#define DEBUG_SR_HALT_ACK_REG 0x20
4974#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004975#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4976
4977#define DEBUG_PAR_REG 0x28
4978#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4979#define DEBUG_PAR_PA_SHIFT 12
4980#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004981
Patrick Daly8c1202b2017-05-10 15:42:30 -07004982#define TBU_DBG_TIMEOUT_US 100
Patrick Daly1f8a2882016-09-12 17:32:05 -07004983
Patrick Daly23301482017-10-12 16:18:25 -07004984#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK 0x3
4985#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT 0x8
4986
Patrick Daly03330cc2017-08-11 14:56:38 -07004987
4988struct actlr_setting {
4989 struct arm_smmu_smr smr;
4990 u32 actlr;
4991};
4992
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004993struct qsmmuv500_archdata {
4994 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004995 void __iomem *tcu_base;
4996 u32 version;
Patrick Dalyda765c62017-09-11 16:31:07 -07004997
4998 struct actlr_setting *actlrs;
4999 u32 actlr_tbl_size;
5000
5001 struct arm_smmu_smr *errata1_clients;
5002 u32 num_errata1_clients;
5003 remote_spinlock_t errata1_lock;
5004 ktime_t last_tlbi_ktime;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005005};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005006#define get_qsmmuv500_archdata(smmu) \
5007 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005008
Patrick Daly1f8a2882016-09-12 17:32:05 -07005009struct qsmmuv500_tbu_device {
5010 struct list_head list;
5011 struct device *dev;
5012 struct arm_smmu_device *smmu;
5013 void __iomem *base;
5014 void __iomem *status_reg;
5015
5016 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005017 u32 sid_start;
5018 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005019
5020 /* Protects halt count */
5021 spinlock_t halt_lock;
5022 u32 halt_count;
5023};
5024
Patrick Daly03330cc2017-08-11 14:56:38 -07005025struct qsmmuv500_group_iommudata {
5026 bool has_actlr;
5027 u32 actlr;
5028};
5029#define to_qsmmuv500_group_iommudata(group) \
5030 ((struct qsmmuv500_group_iommudata *) \
5031 (iommu_group_get_iommudata(group)))
5032
5033
5034static bool arm_smmu_fwspec_match_smr(struct iommu_fwspec *fwspec,
Patrick Dalyda765c62017-09-11 16:31:07 -07005035 struct arm_smmu_smr *smr)
5036{
5037 struct arm_smmu_smr *smr2;
Patrick Daly03330cc2017-08-11 14:56:38 -07005038 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Patrick Dalyda765c62017-09-11 16:31:07 -07005039 int i, idx;
5040
Patrick Daly03330cc2017-08-11 14:56:38 -07005041 for_each_cfg_sme(fwspec, i, idx) {
5042 smr2 = &smmu->smrs[idx];
Patrick Dalyda765c62017-09-11 16:31:07 -07005043 /* Continue if table entry does not match */
5044 if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
5045 continue;
5046 return true;
5047 }
5048 return false;
5049}
5050
5051#define ERRATA1_REMOTE_SPINLOCK "S:6"
5052#define ERRATA1_TLBI_INTERVAL_US 10
5053static bool
5054qsmmuv500_errata1_required(struct arm_smmu_domain *smmu_domain,
5055 struct qsmmuv500_archdata *data)
5056{
5057 bool ret = false;
5058 int j;
5059 struct arm_smmu_smr *smr;
Patrick Daly03330cc2017-08-11 14:56:38 -07005060 struct iommu_fwspec *fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07005061
5062 if (smmu_domain->qsmmuv500_errata1_init)
5063 return smmu_domain->qsmmuv500_errata1_client;
5064
Patrick Daly03330cc2017-08-11 14:56:38 -07005065 fwspec = smmu_domain->dev->iommu_fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07005066 for (j = 0; j < data->num_errata1_clients; j++) {
5067 smr = &data->errata1_clients[j];
Patrick Daly03330cc2017-08-11 14:56:38 -07005068 if (arm_smmu_fwspec_match_smr(fwspec, smr)) {
Patrick Dalyda765c62017-09-11 16:31:07 -07005069 ret = true;
5070 break;
5071 }
5072 }
5073
5074 smmu_domain->qsmmuv500_errata1_init = true;
5075 smmu_domain->qsmmuv500_errata1_client = ret;
5076 return ret;
5077}
5078
Patrick Daly86960052017-12-04 18:53:13 -08005079#define SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
5080#define SCM_CONFIG_ERRATA1 0x3
Patrick Dalyda765c62017-09-11 16:31:07 -07005081static void __qsmmuv500_errata1_tlbiall(struct arm_smmu_domain *smmu_domain)
5082{
5083 struct arm_smmu_device *smmu = smmu_domain->smmu;
5084 struct device *dev = smmu_domain->dev;
5085 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
5086 void __iomem *base;
Patrick Daly86960052017-12-04 18:53:13 -08005087 int ret;
Patrick Dalyda765c62017-09-11 16:31:07 -07005088 ktime_t cur;
5089 u32 val;
Patrick Daly86960052017-12-04 18:53:13 -08005090 struct scm_desc desc = {
5091 .args[0] = SCM_CONFIG_ERRATA1_CLIENT_ALL,
5092 .args[1] = false,
5093 .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
5094 };
Patrick Dalyda765c62017-09-11 16:31:07 -07005095
5096 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
5097 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
5098 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
Patrick Daly86960052017-12-04 18:53:13 -08005099 if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
5100 !(val & TLBSTATUS_SACTIVE), 0, 100))
5101 return;
5102
5103 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
5104 SCM_CONFIG_ERRATA1),
5105 &desc);
5106 if (ret) {
5107 dev_err(smmu->dev, "Calling into TZ to disable ERRATA1 failed - IOMMU hardware in bad state\n");
5108 BUG();
5109 return;
5110 }
5111
5112 cur = ktime_get();
5113 trace_tlbi_throttle_start(dev, 0);
5114 msm_bus_noc_throttle_wa(true);
5115
Patrick Dalyda765c62017-09-11 16:31:07 -07005116 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
Patrick Daly86960052017-12-04 18:53:13 -08005117 !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
5118 dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout - IOMMU hardware in bad state");
5119 trace_tlbsync_timeout(dev, 0);
5120 BUG();
5121 }
Patrick Dalyda765c62017-09-11 16:31:07 -07005122
Patrick Daly86960052017-12-04 18:53:13 -08005123 msm_bus_noc_throttle_wa(false);
5124 trace_tlbi_throttle_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07005125
Patrick Daly86960052017-12-04 18:53:13 -08005126 desc.args[1] = true;
5127 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
5128 SCM_CONFIG_ERRATA1),
5129 &desc);
5130 if (ret) {
5131 dev_err(smmu->dev, "Calling into TZ to reenable ERRATA1 failed - IOMMU hardware in bad state\n");
5132 BUG();
Patrick Dalyda765c62017-09-11 16:31:07 -07005133 }
5134}
5135
5136/* Must be called with clocks/regulators enabled */
5137static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
5138{
5139 struct arm_smmu_domain *smmu_domain = cookie;
5140 struct device *dev = smmu_domain->dev;
5141 struct qsmmuv500_archdata *data =
5142 get_qsmmuv500_archdata(smmu_domain->smmu);
5143 ktime_t cur;
Patrick Daly1faa3112017-10-31 16:40:40 -07005144 unsigned long flags;
Patrick Dalyda765c62017-09-11 16:31:07 -07005145 bool errata;
5146
5147 cur = ktime_get();
Prakash Gupta25f90512017-11-20 14:56:54 +05305148 trace_tlbi_start(dev, 0);
Patrick Dalyda765c62017-09-11 16:31:07 -07005149
5150 errata = qsmmuv500_errata1_required(smmu_domain, data);
Patrick Daly1faa3112017-10-31 16:40:40 -07005151 remote_spin_lock_irqsave(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07005152 if (errata) {
5153 s64 delta;
5154
5155 delta = ktime_us_delta(ktime_get(), data->last_tlbi_ktime);
5156 if (delta < ERRATA1_TLBI_INTERVAL_US)
5157 udelay(ERRATA1_TLBI_INTERVAL_US - delta);
5158
5159 __qsmmuv500_errata1_tlbiall(smmu_domain);
5160
5161 data->last_tlbi_ktime = ktime_get();
5162 } else {
5163 __qsmmuv500_errata1_tlbiall(smmu_domain);
5164 }
Patrick Daly1faa3112017-10-31 16:40:40 -07005165 remote_spin_unlock_irqrestore(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07005166
Prakash Gupta25f90512017-11-20 14:56:54 +05305167 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07005168}
5169
5170static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
5171 .tlb_flush_all = qsmmuv500_errata1_tlb_inv_context,
5172 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
5173 .free_pages_exact = arm_smmu_free_pages_exact,
5174};
5175
Patrick Daly8c1202b2017-05-10 15:42:30 -07005176static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
5177 struct arm_smmu_domain *smmu_domain)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005178{
5179 unsigned long flags;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005180 u32 halt, fsr, sctlr_orig, sctlr, status;
5181 void __iomem *base, *cb_base;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005182
5183 spin_lock_irqsave(&tbu->halt_lock, flags);
5184 if (tbu->halt_count) {
5185 tbu->halt_count++;
5186 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5187 return 0;
5188 }
5189
Patrick Daly8c1202b2017-05-10 15:42:30 -07005190 cb_base = ARM_SMMU_CB_BASE(smmu_domain->smmu) +
5191 ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005192 base = tbu->base;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005193 halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
5194 halt |= DEBUG_SID_HALT_VAL;
5195 writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005196
Patrick Daly8c1202b2017-05-10 15:42:30 -07005197 if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
5198 (status & DEBUG_SR_HALT_ACK_VAL),
5199 0, TBU_DBG_TIMEOUT_US))
5200 goto out;
5201
5202 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5203 if (!(fsr & FSR_FAULT)) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07005204 dev_err(tbu->dev, "Couldn't halt TBU!\n");
5205 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5206 return -ETIMEDOUT;
5207 }
5208
Patrick Daly8c1202b2017-05-10 15:42:30 -07005209 /*
5210 * We are in a fault; Our request to halt the bus will not complete
5211 * until transactions in front of us (such as the fault itself) have
5212 * completed. Disable iommu faults and terminate any existing
5213 * transactions.
5214 */
5215 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
5216 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
5217 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
5218
5219 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
5220 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
5221
5222 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
5223 (status & DEBUG_SR_HALT_ACK_VAL),
5224 0, TBU_DBG_TIMEOUT_US)) {
5225 dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
5226 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
5227 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5228 return -ETIMEDOUT;
5229 }
5230
5231 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
5232out:
Patrick Daly1f8a2882016-09-12 17:32:05 -07005233 tbu->halt_count = 1;
5234 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5235 return 0;
5236}
5237
5238static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
5239{
5240 unsigned long flags;
5241 u32 val;
5242 void __iomem *base;
5243
5244 spin_lock_irqsave(&tbu->halt_lock, flags);
5245 if (!tbu->halt_count) {
5246 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
5247 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5248 return;
5249
5250 } else if (tbu->halt_count > 1) {
5251 tbu->halt_count--;
5252 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5253 return;
5254 }
5255
5256 base = tbu->base;
5257 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
5258 val &= ~DEBUG_SID_HALT_VAL;
5259 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
5260
5261 tbu->halt_count = 0;
5262 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5263}
5264
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005265static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
5266 struct arm_smmu_device *smmu, u32 sid)
5267{
5268 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005269 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005270
5271 list_for_each_entry(tbu, &data->tbus, list) {
5272 if (tbu->sid_start <= sid &&
5273 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07005274 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005275 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07005276 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005277}
5278
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005279static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
5280 struct qsmmuv500_tbu_device *tbu,
5281 unsigned long *flags)
5282{
5283 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005284 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005285 u32 val;
5286
5287 spin_lock_irqsave(&smmu->atos_lock, *flags);
5288 /* The status register is not accessible on version 1.0 */
5289 if (data->version == 0x01000000)
5290 return 0;
5291
5292 if (readl_poll_timeout_atomic(tbu->status_reg,
5293 val, (val == 0x1), 0,
5294 TBU_DBG_TIMEOUT_US)) {
5295 dev_err(tbu->dev, "ECATS hw busy!\n");
5296 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
5297 return -ETIMEDOUT;
5298 }
5299
5300 return 0;
5301}
5302
5303static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
5304 struct qsmmuv500_tbu_device *tbu,
5305 unsigned long *flags)
5306{
5307 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005308 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005309
5310 /* The status register is not accessible on version 1.0 */
5311 if (data->version != 0x01000000)
5312 writel_relaxed(0, tbu->status_reg);
5313 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
5314}
5315
5316/*
5317 * Zero means failure.
5318 */
5319static phys_addr_t qsmmuv500_iova_to_phys(
5320 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
5321{
5322 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
5323 struct arm_smmu_device *smmu = smmu_domain->smmu;
5324 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
5325 struct qsmmuv500_tbu_device *tbu;
5326 int ret;
5327 phys_addr_t phys = 0;
5328 u64 val, fsr;
5329 unsigned long flags;
5330 void __iomem *cb_base;
5331 u32 sctlr_orig, sctlr;
5332 int needs_redo = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005333 ktime_t timeout;
5334
5335 /* only 36 bit iova is supported */
5336 if (iova >= (1ULL << 36)) {
5337 dev_err_ratelimited(smmu->dev, "ECATS: address too large: %pad\n",
5338 &iova);
5339 return 0;
5340 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005341
5342 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
5343 tbu = qsmmuv500_find_tbu(smmu, sid);
5344 if (!tbu)
5345 return 0;
5346
5347 ret = arm_smmu_power_on(tbu->pwr);
5348 if (ret)
5349 return 0;
5350
Patrick Daly8c1202b2017-05-10 15:42:30 -07005351 ret = qsmmuv500_tbu_halt(tbu, smmu_domain);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005352 if (ret)
5353 goto out_power_off;
5354
Patrick Daly8c1202b2017-05-10 15:42:30 -07005355 /*
5356 * ECATS can trigger the fault interrupt, so disable it temporarily
5357 * and check for an interrupt manually.
5358 */
5359 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
5360 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
5361 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
5362
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005363 /* Only one concurrent atos operation */
5364 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
5365 if (ret)
5366 goto out_resume;
5367
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005368redo:
5369 /* Set address and stream-id */
5370 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
5371 val |= sid & DEBUG_SID_HALT_SID_MASK;
5372 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
5373 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
5374
5375 /*
5376 * Write-back Read and Write-Allocate
5377 * Priviledged, nonsecure, data transaction
5378 * Read operation.
5379 */
5380 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
5381 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
5382 val |= DEBUG_TXN_TRIGGER;
5383 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
5384
5385 ret = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005386 //based on readx_poll_timeout_atomic
5387 timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
5388 for (;;) {
5389 val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
5390 if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
5391 break;
5392 val = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5393 if (val & FSR_FAULT)
5394 break;
5395 if (ktime_compare(ktime_get(), timeout) > 0) {
5396 dev_err(tbu->dev, "ECATS translation timed out!\n");
5397 ret = -ETIMEDOUT;
5398 break;
5399 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005400 }
5401
5402 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5403 if (fsr & FSR_FAULT) {
5404 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
Patrick Daly8c1202b2017-05-10 15:42:30 -07005405 fsr);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005406 ret = -EINVAL;
5407
5408 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
5409 /*
5410 * Clear pending interrupts
5411 * Barrier required to ensure that the FSR is cleared
5412 * before resuming SMMU operation
5413 */
5414 wmb();
5415 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
5416 }
5417
5418 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
5419 if (val & DEBUG_PAR_FAULT_VAL) {
5420 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
5421 val);
5422 ret = -EINVAL;
5423 }
5424
5425 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
5426 if (ret < 0)
5427 phys = 0;
5428
5429 /* Reset hardware */
5430 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
5431 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
5432
5433 /*
5434 * After a failed translation, the next successful translation will
5435 * incorrectly be reported as a failure.
5436 */
5437 if (!phys && needs_redo++ < 2)
5438 goto redo;
5439
5440 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
5441 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
5442
5443out_resume:
5444 qsmmuv500_tbu_resume(tbu);
5445
5446out_power_off:
5447 arm_smmu_power_off(tbu->pwr);
5448
5449 return phys;
5450}
5451
5452static phys_addr_t qsmmuv500_iova_to_phys_hard(
5453 struct iommu_domain *domain, dma_addr_t iova)
5454{
5455 u16 sid;
5456 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
5457 struct iommu_fwspec *fwspec;
5458
5459 /* Select a sid */
5460 fwspec = smmu_domain->dev->iommu_fwspec;
5461 sid = (u16)fwspec->ids[0];
5462
5463 return qsmmuv500_iova_to_phys(domain, iova, sid);
5464}
5465
Patrick Daly03330cc2017-08-11 14:56:38 -07005466static void qsmmuv500_release_group_iommudata(void *data)
5467{
5468 kfree(data);
5469}
5470
5471/* If a device has a valid actlr, it must match */
5472static int qsmmuv500_device_group(struct device *dev,
5473 struct iommu_group *group)
5474{
5475 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
5476 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
5477 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5478 struct qsmmuv500_group_iommudata *iommudata;
5479 u32 actlr, i;
5480 struct arm_smmu_smr *smr;
5481
5482 iommudata = to_qsmmuv500_group_iommudata(group);
5483 if (!iommudata) {
5484 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
5485 if (!iommudata)
5486 return -ENOMEM;
5487
5488 iommu_group_set_iommudata(group, iommudata,
5489 qsmmuv500_release_group_iommudata);
5490 }
5491
5492 for (i = 0; i < data->actlr_tbl_size; i++) {
5493 smr = &data->actlrs[i].smr;
5494 actlr = data->actlrs[i].actlr;
5495
5496 if (!arm_smmu_fwspec_match_smr(fwspec, smr))
5497 continue;
5498
5499 if (!iommudata->has_actlr) {
5500 iommudata->actlr = actlr;
5501 iommudata->has_actlr = true;
5502 } else if (iommudata->actlr != actlr) {
5503 return -EINVAL;
5504 }
5505 }
5506
5507 return 0;
5508}
5509
5510static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
5511 struct device *dev)
5512{
5513 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyad521082018-04-06 18:07:13 -07005514 struct arm_smmu_cb *cb = &smmu->cbs[smmu_domain->cfg.cbndx];
Patrick Daly03330cc2017-08-11 14:56:38 -07005515 struct qsmmuv500_group_iommudata *iommudata =
5516 to_qsmmuv500_group_iommudata(dev->iommu_group);
Patrick Daly03330cc2017-08-11 14:56:38 -07005517
5518 if (!iommudata->has_actlr)
5519 return;
5520
Patrick Dalyad521082018-04-06 18:07:13 -07005521 cb->actlr = iommudata->actlr;
Patrick Daly03330cc2017-08-11 14:56:38 -07005522 /*
Patrick Daly23301482017-10-12 16:18:25 -07005523 * Prefetch only works properly if the start and end of all
5524 * buffers in the page table are aligned to 16 Kb.
5525 */
Patrick Daly27bd9292017-11-22 13:59:59 -08005526 if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
Patrick Daly23301482017-10-12 16:18:25 -07005527 QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
5528 smmu_domain->qsmmuv500_errata2_min_align = true;
Patrick Daly03330cc2017-08-11 14:56:38 -07005529}
5530
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005531static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005532{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005533 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005534 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005535 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005536
5537 if (!dev->driver) {
5538 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
5539 return -EINVAL;
5540 }
5541
5542 tbu = dev_get_drvdata(dev);
5543
5544 INIT_LIST_HEAD(&tbu->list);
5545 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005546 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005547 return 0;
5548}
5549
Patrick Dalyda765c62017-09-11 16:31:07 -07005550static int qsmmuv500_parse_errata1(struct arm_smmu_device *smmu)
5551{
5552 int len, i;
5553 struct device *dev = smmu->dev;
5554 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5555 struct arm_smmu_smr *smrs;
5556 const __be32 *cell;
5557
5558 cell = of_get_property(dev->of_node, "qcom,mmu500-errata-1", NULL);
5559 if (!cell)
5560 return 0;
5561
5562 remote_spin_lock_init(&data->errata1_lock, ERRATA1_REMOTE_SPINLOCK);
5563 len = of_property_count_elems_of_size(
5564 dev->of_node, "qcom,mmu500-errata-1", sizeof(u32) * 2);
5565 if (len < 0)
5566 return 0;
5567
5568 smrs = devm_kzalloc(dev, sizeof(*smrs) * len, GFP_KERNEL);
5569 if (!smrs)
5570 return -ENOMEM;
5571
5572 for (i = 0; i < len; i++) {
5573 smrs[i].id = of_read_number(cell++, 1);
5574 smrs[i].mask = of_read_number(cell++, 1);
5575 }
5576
5577 data->errata1_clients = smrs;
5578 data->num_errata1_clients = len;
5579 return 0;
5580}
5581
Patrick Daly03330cc2017-08-11 14:56:38 -07005582static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
5583{
5584 int len, i;
5585 struct device *dev = smmu->dev;
5586 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5587 struct actlr_setting *actlrs;
5588 const __be32 *cell;
5589
5590 cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
5591 if (!cell)
5592 return 0;
5593
5594 len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
5595 sizeof(u32) * 3);
5596 if (len < 0)
5597 return 0;
5598
5599 actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
5600 if (!actlrs)
5601 return -ENOMEM;
5602
5603 for (i = 0; i < len; i++) {
5604 actlrs[i].smr.id = of_read_number(cell++, 1);
5605 actlrs[i].smr.mask = of_read_number(cell++, 1);
5606 actlrs[i].actlr = of_read_number(cell++, 1);
5607 }
5608
5609 data->actlrs = actlrs;
5610 data->actlr_tbl_size = len;
5611 return 0;
5612}
5613
Patrick Daly1f8a2882016-09-12 17:32:05 -07005614static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
5615{
Patrick Dalya0fddb62017-03-27 19:26:59 -07005616 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005617 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005618 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07005619 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005620 int ret;
Patrick Daly03330cc2017-08-11 14:56:38 -07005621 u32 val;
5622 void __iomem *reg;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005623
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005624 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
5625 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005626 return -ENOMEM;
5627
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005628 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07005629
5630 pdev = container_of(dev, struct platform_device, dev);
5631 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
Charan Teja Reddy97fb6c52018-03-20 15:55:37 +05305632 if (!res) {
5633 dev_err(dev, "Unable to get the tcu-base\n");
5634 return -EINVAL;
5635 }
5636 data->tcu_base = devm_ioremap(dev, res->start, resource_size(res));
Patrick Dalya0fddb62017-03-27 19:26:59 -07005637 if (IS_ERR(data->tcu_base))
5638 return PTR_ERR(data->tcu_base);
5639
5640 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005641 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005642
Charan Teja Reddy424ed342018-01-18 12:25:06 +05305643 if (arm_smmu_is_static_cb(smmu))
5644 return 0;
5645
Patrick Dalyda765c62017-09-11 16:31:07 -07005646 ret = qsmmuv500_parse_errata1(smmu);
5647 if (ret)
5648 return ret;
5649
Patrick Daly03330cc2017-08-11 14:56:38 -07005650 ret = qsmmuv500_read_actlr_tbl(smmu);
5651 if (ret)
5652 return ret;
5653
5654 reg = ARM_SMMU_GR0(smmu);
5655 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5656 val &= ~ARM_MMU500_ACR_CACHE_LOCK;
5657 writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
5658 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5659 /*
5660 * Modifiying the nonsecure copy of the sACR register is only
5661 * allowed if permission is given in the secure sACR register.
5662 * Attempt to detect if we were able to update the value.
5663 */
5664 WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
5665
Patrick Daly1f8a2882016-09-12 17:32:05 -07005666 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
5667 if (ret)
5668 return ret;
5669
5670 /* Attempt to register child devices */
5671 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
5672 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07005673 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005674
5675 return 0;
5676}
5677
5678struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
5679 .init = qsmmuv500_arch_init,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005680 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly03330cc2017-08-11 14:56:38 -07005681 .init_context_bank = qsmmuv500_init_cb,
5682 .device_group = qsmmuv500_device_group,
Patrick Daly1f8a2882016-09-12 17:32:05 -07005683};
5684
5685static const struct of_device_id qsmmuv500_tbu_of_match[] = {
5686 {.compatible = "qcom,qsmmuv500-tbu"},
5687 {}
5688};
5689
5690static int qsmmuv500_tbu_probe(struct platform_device *pdev)
5691{
5692 struct resource *res;
5693 struct device *dev = &pdev->dev;
5694 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005695 const __be32 *cell;
5696 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005697
5698 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
5699 if (!tbu)
5700 return -ENOMEM;
5701
5702 INIT_LIST_HEAD(&tbu->list);
5703 tbu->dev = dev;
5704 spin_lock_init(&tbu->halt_lock);
5705
5706 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
5707 tbu->base = devm_ioremap_resource(dev, res);
5708 if (IS_ERR(tbu->base))
5709 return PTR_ERR(tbu->base);
5710
5711 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
5712 tbu->status_reg = devm_ioremap_resource(dev, res);
5713 if (IS_ERR(tbu->status_reg))
5714 return PTR_ERR(tbu->status_reg);
5715
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005716 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
5717 if (!cell || len < 8)
5718 return -EINVAL;
5719
5720 tbu->sid_start = of_read_number(cell, 1);
5721 tbu->num_sids = of_read_number(cell + 1, 1);
5722
Patrick Daly1f8a2882016-09-12 17:32:05 -07005723 tbu->pwr = arm_smmu_init_power_resources(pdev);
5724 if (IS_ERR(tbu->pwr))
5725 return PTR_ERR(tbu->pwr);
5726
5727 dev_set_drvdata(dev, tbu);
5728 return 0;
5729}
5730
5731static struct platform_driver qsmmuv500_tbu_driver = {
5732 .driver = {
5733 .name = "qsmmuv500-tbu",
5734 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
5735 },
5736 .probe = qsmmuv500_tbu_probe,
5737};
5738
Will Deacon45ae7cf2013-06-24 18:31:25 +01005739MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
5740MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
5741MODULE_LICENSE("GPL v2");