blob: a83616951d47563a970506f5e05a67b2dabe1987 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Daly86960052017-12-04 18:53:13 -080050#include <soc/qcom/scm.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070051#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070052#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070053#include <linux/msm-bus.h>
54#include <dt-bindings/msm/msm-bus-ids.h>
Patrick Dalyda765c62017-09-11 16:31:07 -070055#include <linux/remote_spinlock.h>
56#include <linux/ktime.h>
57#include <trace/events/iommu.h>
Charan Teja Reddyf8464882017-12-05 20:29:05 +053058#include <linux/notifier.h>
Prakash Gupta5b8eb322018-01-09 15:16:39 +053059#include <dt-bindings/arm/arm-smmu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010060
61#include <linux/amba/bus.h>
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +053062#include <soc/qcom/msm_tz_smmu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010063
Will Deacon518f7132014-11-14 17:17:54 +000064#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Will Deacon45ae7cf2013-06-24 18:31:25 +010066/* Maximum number of context banks per SMMU */
67#define ARM_SMMU_MAX_CBS 128
68
Will Deacon45ae7cf2013-06-24 18:31:25 +010069/* SMMU global address space */
70#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010071#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010072
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000073/*
74 * SMMU global address space with conditional offset to access secure
75 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
76 * nsGFSYNR0: 0x450)
77 */
78#define ARM_SMMU_GR0_NS(smmu) \
79 ((smmu)->base + \
80 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
81 ? 0x400 : 0))
82
Robin Murphyf9a05f02016-04-13 18:13:01 +010083/*
84 * Some 64-bit registers only make sense to write atomically, but in such
85 * cases all the data relevant to AArch32 formats lies within the lower word,
86 * therefore this actually makes more sense than it might first appear.
87 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010088#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010089#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010090#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010091#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010092#endif
93
Will Deacon45ae7cf2013-06-24 18:31:25 +010094/* Configuration registers */
95#define ARM_SMMU_GR0_sCR0 0x0
96#define sCR0_CLIENTPD (1 << 0)
97#define sCR0_GFRE (1 << 1)
98#define sCR0_GFIE (1 << 2)
99#define sCR0_GCFGFRE (1 << 4)
100#define sCR0_GCFGFIE (1 << 5)
101#define sCR0_USFCFG (1 << 10)
102#define sCR0_VMIDPNE (1 << 11)
103#define sCR0_PTM (1 << 12)
104#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800105#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100106#define sCR0_BSU_SHIFT 14
107#define sCR0_BSU_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700108#define sCR0_SHCFG_SHIFT 22
109#define sCR0_SHCFG_MASK 0x3
110#define sCR0_SHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100111
Peng Fan3ca37122016-05-03 21:50:30 +0800112/* Auxiliary Configuration register */
113#define ARM_SMMU_GR0_sACR 0x10
114
Will Deacon45ae7cf2013-06-24 18:31:25 +0100115/* Identification registers */
116#define ARM_SMMU_GR0_ID0 0x20
117#define ARM_SMMU_GR0_ID1 0x24
118#define ARM_SMMU_GR0_ID2 0x28
119#define ARM_SMMU_GR0_ID3 0x2c
120#define ARM_SMMU_GR0_ID4 0x30
121#define ARM_SMMU_GR0_ID5 0x34
122#define ARM_SMMU_GR0_ID6 0x38
123#define ARM_SMMU_GR0_ID7 0x3c
124#define ARM_SMMU_GR0_sGFSR 0x48
125#define ARM_SMMU_GR0_sGFSYNR0 0x50
126#define ARM_SMMU_GR0_sGFSYNR1 0x54
127#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128
129#define ID0_S1TS (1 << 30)
130#define ID0_S2TS (1 << 29)
131#define ID0_NTS (1 << 28)
132#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000133#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100134#define ID0_PTFS_NO_AARCH32 (1 << 25)
135#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100136#define ID0_CTTW (1 << 14)
137#define ID0_NUMIRPT_SHIFT 16
138#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700139#define ID0_NUMSIDB_SHIFT 9
140#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100141#define ID0_NUMSMRG_SHIFT 0
142#define ID0_NUMSMRG_MASK 0xff
143
144#define ID1_PAGESIZE (1 << 31)
145#define ID1_NUMPAGENDXB_SHIFT 28
146#define ID1_NUMPAGENDXB_MASK 7
147#define ID1_NUMS2CB_SHIFT 16
148#define ID1_NUMS2CB_MASK 0xff
149#define ID1_NUMCB_SHIFT 0
150#define ID1_NUMCB_MASK 0xff
151
152#define ID2_OAS_SHIFT 4
153#define ID2_OAS_MASK 0xf
154#define ID2_IAS_SHIFT 0
155#define ID2_IAS_MASK 0xf
156#define ID2_UBS_SHIFT 8
157#define ID2_UBS_MASK 0xf
158#define ID2_PTFS_4K (1 << 12)
159#define ID2_PTFS_16K (1 << 13)
160#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800161#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100162
Peng Fan3ca37122016-05-03 21:50:30 +0800163#define ID7_MAJOR_SHIFT 4
164#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100167#define ARM_SMMU_GR0_TLBIVMID 0x64
168#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
169#define ARM_SMMU_GR0_TLBIALLH 0x6c
170#define ARM_SMMU_GR0_sTLBGSYNC 0x70
171#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
172#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800173#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100174
175/* Stream mapping registers */
176#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
177#define SMR_VALID (1 << 31)
178#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700179#define SMR_MASK_MASK 0x7FFF
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530180#define SID_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100181#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100182
183#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
184#define S2CR_CBNDX_SHIFT 0
185#define S2CR_CBNDX_MASK 0xff
186#define S2CR_TYPE_SHIFT 16
187#define S2CR_TYPE_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700188#define S2CR_SHCFG_SHIFT 8
189#define S2CR_SHCFG_MASK 0x3
190#define S2CR_SHCFG_NSH 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100191enum arm_smmu_s2cr_type {
192 S2CR_TYPE_TRANS,
193 S2CR_TYPE_BYPASS,
194 S2CR_TYPE_FAULT,
195};
196
197#define S2CR_PRIVCFG_SHIFT 24
198#define S2CR_PRIVCFG_MASK 0x3
199enum arm_smmu_s2cr_privcfg {
200 S2CR_PRIVCFG_DEFAULT,
201 S2CR_PRIVCFG_DIPAN,
202 S2CR_PRIVCFG_UNPRIV,
203 S2CR_PRIVCFG_PRIV,
204};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100205
206/* Context bank attribute registers */
207#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
208#define CBAR_VMID_SHIFT 0
209#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000210#define CBAR_S1_BPSHCFG_SHIFT 8
211#define CBAR_S1_BPSHCFG_MASK 3
212#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define CBAR_S1_MEMATTR_SHIFT 12
214#define CBAR_S1_MEMATTR_MASK 0xf
215#define CBAR_S1_MEMATTR_WB 0xf
216#define CBAR_TYPE_SHIFT 16
217#define CBAR_TYPE_MASK 0x3
218#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
219#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
220#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
221#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
222#define CBAR_IRPTNDX_SHIFT 24
223#define CBAR_IRPTNDX_MASK 0xff
224
Shalaj Jain04059c52015-03-03 13:34:59 -0800225#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
226#define CBFRSYNRA_SID_MASK (0xffff)
227
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
229#define CBA2R_RW64_32BIT (0 << 0)
230#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800231#define CBA2R_VMID_SHIFT 16
232#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233
234/* Translation context bank */
235#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100236#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237
238#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100239#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100240#define ARM_SMMU_CB_RESUME 0x8
241#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100242#define ARM_SMMU_CB_TTBR0 0x20
243#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100244#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600245#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000247#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100248#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100249#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700250#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100251#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100252#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000253#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100254#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700255#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000256#define ARM_SMMU_CB_S1_TLBIVAL 0x620
257#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
258#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700259#define ARM_SMMU_CB_TLBSYNC 0x7f0
260#define ARM_SMMU_CB_TLBSTATUS 0x7f4
261#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100262#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000263#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264
Patrick Daly7f377fe2017-10-06 17:37:10 -0700265#define SCTLR_SHCFG_SHIFT 22
266#define SCTLR_SHCFG_MASK 0x3
267#define SCTLR_SHCFG_NSH 0x3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100268#define SCTLR_S1_ASIDPNE (1 << 12)
269#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530270#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100271#define SCTLR_CFIE (1 << 6)
272#define SCTLR_CFRE (1 << 5)
273#define SCTLR_E (1 << 4)
274#define SCTLR_AFE (1 << 2)
275#define SCTLR_TRE (1 << 1)
276#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100278#define ARM_MMU500_ACTLR_CPRE (1 << 1)
279
Peng Fan3ca37122016-05-03 21:50:30 +0800280#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
281
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700282#define ARM_SMMU_IMPL_DEF0(smmu) \
283 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
284#define ARM_SMMU_IMPL_DEF1(smmu) \
285 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000286#define CB_PAR_F (1 << 0)
287
288#define ATSR_ACTIVE (1 << 0)
289
Will Deacon45ae7cf2013-06-24 18:31:25 +0100290#define RESUME_RETRY (0 << 0)
291#define RESUME_TERMINATE (1 << 0)
292
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100294#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Tomasz Nowicki681e6612017-01-16 08:16:07 +0100295#define TTBCR2_AS (1 << 4)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100297#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100298
299#define FSR_MULTI (1 << 31)
300#define FSR_SS (1 << 30)
301#define FSR_UUT (1 << 8)
302#define FSR_ASF (1 << 7)
303#define FSR_TLBLKF (1 << 6)
304#define FSR_TLBMCF (1 << 5)
305#define FSR_EF (1 << 4)
306#define FSR_PF (1 << 3)
307#define FSR_AFF (1 << 2)
308#define FSR_TF (1 << 1)
309
Mitchel Humpherys29073202014-07-08 09:52:18 -0700310#define FSR_IGN (FSR_AFF | FSR_ASF | \
311 FSR_TLBMCF | FSR_TLBLKF)
312#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100313 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100314
315#define FSYNR0_WNR (1 << 4)
316
Will Deacon4cf740b2014-07-14 19:47:39 +0100317static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000318module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100319MODULE_PARM_DESC(force_stage,
320 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800321static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000322module_param(disable_bypass, bool, S_IRUGO);
323MODULE_PARM_DESC(disable_bypass,
324 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100325
Robin Murphy09360402014-08-28 17:51:59 +0100326enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100327 ARM_SMMU_V1,
328 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100329 ARM_SMMU_V2,
330};
331
Robin Murphy67b65a32016-04-13 18:12:57 +0100332enum arm_smmu_implementation {
333 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100334 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100335 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700336 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700337 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100338};
339
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700340struct arm_smmu_impl_def_reg {
341 u32 offset;
342 u32 value;
343};
344
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700345/*
346 * attach_count
347 * The SMR and S2CR registers are only programmed when the number of
348 * devices attached to the iommu using these registers is > 0. This
349 * is required for the "SID switch" use case for secure display.
350 * Protected by stream_map_mutex.
351 */
Robin Murphya754fd12016-09-12 17:13:50 +0100352struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100353 struct iommu_group *group;
354 int count;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700355 int attach_count;
Robin Murphya754fd12016-09-12 17:13:50 +0100356 enum arm_smmu_s2cr_type type;
357 enum arm_smmu_s2cr_privcfg privcfg;
358 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700359 bool cb_handoff;
Robin Murphya754fd12016-09-12 17:13:50 +0100360};
361
362#define s2cr_init_val (struct arm_smmu_s2cr){ \
363 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700364 .cb_handoff = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100365}
366
Will Deacon45ae7cf2013-06-24 18:31:25 +0100367struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100368 u16 mask;
369 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100370 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371};
372
Robin Murphy6549a1f2017-08-08 14:56:14 +0100373struct arm_smmu_cb {
374 u64 ttbr[2];
375 u32 tcr[2];
376 u32 mair[2];
377 struct arm_smmu_cfg *cfg;
Patrick Dalyad521082018-04-06 18:07:13 -0700378 u32 actlr;
Patrick Daly25317e82018-05-07 12:35:29 -0700379 bool has_actlr;
Robin Murphy6549a1f2017-08-08 14:56:14 +0100380 u32 attributes;
381};
382
Will Deacona9a1b0b2014-05-01 18:05:08 +0100383struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100384 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100385 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100386};
Robin Murphy468f4942016-09-12 17:13:49 +0100387#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100388#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
389#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000390#define fwspec_smendx(fw, i) \
391 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100392#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000393 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700395/*
396 * Describes resources required for on/off power operation.
397 * Separate reference count is provided for atomic/nonatomic
398 * operations.
399 */
400struct arm_smmu_power_resources {
401 struct platform_device *pdev;
402 struct device *dev;
403
404 struct clk **clocks;
405 int num_clocks;
406
407 struct regulator_bulk_data *gdscs;
408 int num_gdscs;
409
410 uint32_t bus_client;
411 struct msm_bus_scale_pdata *bus_dt_data;
412
413 /* Protects power_count */
414 struct mutex power_lock;
415 int power_count;
416
417 /* Protects clock_refs_count */
418 spinlock_t clock_refs_lock;
419 int clock_refs_count;
Prakash Guptafad87ca2017-05-16 12:13:02 +0530420 int regulator_defer;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700421};
422
Patrick Daly03330cc2017-08-11 14:56:38 -0700423struct arm_smmu_arch_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100424struct arm_smmu_device {
425 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100426
427 void __iomem *base;
428 unsigned long size;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530429 phys_addr_t phys_addr;
Will Deaconc757e852014-07-30 11:33:25 +0100430 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100431
432#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
433#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
434#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
435#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
436#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000437#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800438#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100439#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
440#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
441#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
442#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
443#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100444 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000445
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000446 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100447 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100448 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100449
450 u32 num_context_banks;
451 u32 num_s2_context_banks;
452 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Charan Teja Reddy4971ca42018-01-23 18:27:08 +0530453 DECLARE_BITMAP(secure_context_map, ARM_SMMU_MAX_CBS);
Robin Murphy6549a1f2017-08-08 14:56:14 +0100454 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100455 atomic_t irptndx;
456
457 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100458 u16 streamid_mask;
459 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100460 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100461 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100462 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100463
Will Deacon518f7132014-11-14 17:17:54 +0000464 unsigned long va_size;
465 unsigned long ipa_size;
466 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100467 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100468
469 u32 num_global_irqs;
470 u32 num_context_irqs;
471 unsigned int *irqs;
472
Patrick Daly8e3371a2017-02-13 22:14:53 -0800473 struct list_head list;
474
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800475 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700476 /* Specific to QCOM */
477 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
478 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800479
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700480 struct arm_smmu_power_resources *pwr;
Charan Teja Reddyf8464882017-12-05 20:29:05 +0530481 struct notifier_block regulator_nb;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700482
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800483 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700484
485 /* protects idr */
486 struct mutex idr_mutex;
487 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700488
489 struct arm_smmu_arch_ops *arch_ops;
490 void *archdata;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530491
492 enum tz_smmu_device_id sec_id;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100493};
494
Robin Murphy7602b872016-04-28 17:12:09 +0100495enum arm_smmu_context_fmt {
496 ARM_SMMU_CTX_FMT_NONE,
497 ARM_SMMU_CTX_FMT_AARCH64,
498 ARM_SMMU_CTX_FMT_AARCH32_L,
499 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100500};
501
502struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100503 u8 cbndx;
504 u8 irptndx;
505 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600506 u32 procid;
507 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100508 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100509};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100510#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600511#define INVALID_CBNDX 0xff
512#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700513/*
514 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
515 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
516 */
517#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100518
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600519#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800520#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100521
Will Deaconc752ce42014-06-25 22:46:31 +0100522enum arm_smmu_domain_stage {
523 ARM_SMMU_DOMAIN_S1 = 0,
524 ARM_SMMU_DOMAIN_S2,
525 ARM_SMMU_DOMAIN_NESTED,
526};
527
Patrick Dalyc11d1082016-09-01 15:52:44 -0700528struct arm_smmu_pte_info {
529 void *virt_addr;
530 size_t size;
531 struct list_head entry;
532};
533
Will Deacon45ae7cf2013-06-24 18:31:25 +0100534struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100535 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800536 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000537 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700538 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000539 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100540 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100541 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000542 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700543 u32 attributes;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530544 bool slave_side_secure;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700545 u32 secure_vmid;
546 struct list_head pte_info_list;
547 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700548 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700549 struct list_head secure_pool_list;
Patrick Daly2d600832018-02-11 15:12:55 -0800550 /* nonsecure pool protected by pgtbl_lock */
551 struct list_head nonsecure_pool;
Joerg Roedel1d672632015-03-26 13:43:10 +0100552 struct iommu_domain domain;
Patrick Dalyda765c62017-09-11 16:31:07 -0700553
554 bool qsmmuv500_errata1_init;
555 bool qsmmuv500_errata1_client;
Patrick Daly23301482017-10-12 16:18:25 -0700556 bool qsmmuv500_errata2_min_align;
Prakash Guptac2e909a2018-03-29 11:23:06 +0530557 bool is_force_guard_page;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100558};
559
Patrick Daly8e3371a2017-02-13 22:14:53 -0800560static DEFINE_SPINLOCK(arm_smmu_devices_lock);
561static LIST_HEAD(arm_smmu_devices);
562
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000563struct arm_smmu_option_prop {
564 u32 opt;
565 const char *prop;
566};
567
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800568static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
569
Robin Murphy7e96c742016-09-14 15:26:46 +0100570static bool using_legacy_binding, using_generic_binding;
571
Mitchel Humpherys29073202014-07-08 09:52:18 -0700572static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000573 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800574 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700575 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700576 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700577 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700578 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Patrick Daly62ba1922017-08-30 16:47:18 -0700579 { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
Patrick Daly83174c12017-10-26 12:31:15 -0700580 { ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530581 { ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
Charan Teja Reddyf8464882017-12-05 20:29:05 +0530582 { ARM_SMMU_OPT_HALT, "qcom,enable-smmu-halt"},
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000583 { 0, NULL},
584};
585
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800586static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
587 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700588static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
589 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600590static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800591
Patrick Dalyc11d1082016-09-01 15:52:44 -0700592static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
593static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700594static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700595static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
596
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700597static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
598 dma_addr_t iova);
599
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800600static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
601
Patrick Dalyda688822017-05-17 20:12:48 -0700602static int arm_smmu_alloc_cb(struct iommu_domain *domain,
603 struct arm_smmu_device *smmu,
604 struct device *dev);
Patrick Dalyda765c62017-09-11 16:31:07 -0700605static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops;
Patrick Dalyda688822017-05-17 20:12:48 -0700606
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530607static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530608static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
609static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
Patrick Dalycf93cac2018-05-16 20:51:04 -0700610static bool arm_smmu_opt_hibernation(struct arm_smmu_device *smmu);
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530611
Charan Teja Reddy313991e2018-03-12 12:19:31 +0530612static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
613 phys_addr_t paddr, size_t size, int prot);
614static size_t msm_secure_smmu_unmap(struct iommu_domain *domain,
615 unsigned long iova,
616 size_t size);
617static size_t msm_secure_smmu_map_sg(struct iommu_domain *domain,
618 unsigned long iova,
619 struct scatterlist *sg,
620 unsigned int nents, int prot);
621
Joerg Roedel1d672632015-03-26 13:43:10 +0100622static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
623{
624 return container_of(dom, struct arm_smmu_domain, domain);
625}
626
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000627static void parse_driver_options(struct arm_smmu_device *smmu)
628{
629 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700630
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000631 do {
632 if (of_property_read_bool(smmu->dev->of_node,
633 arm_smmu_options[i].prop)) {
634 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700635 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000636 arm_smmu_options[i].prop);
637 }
638 } while (arm_smmu_options[++i].opt);
Patrick Dalycf93cac2018-05-16 20:51:04 -0700639
640 if (arm_smmu_opt_hibernation(smmu) &&
641 smmu->options && ARM_SMMU_OPT_SKIP_INIT) {
642 dev_info(smmu->dev,
643 "Disabling incompatible option: skip-init\n");
644 smmu->options &= ~ARM_SMMU_OPT_SKIP_INIT;
645 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000646}
647
Patrick Dalyc190d932016-08-30 17:23:28 -0700648static bool is_dynamic_domain(struct iommu_domain *domain)
649{
650 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
651
652 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
653}
654
Charan Teja Reddyec6f7822018-01-10 17:32:52 +0530655static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu, u32 cb)
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530656{
657 int ret;
658 int scm_ret = 0;
659
660 if (!arm_smmu_is_static_cb(smmu))
661 return 0;
662
Charan Teja Reddyec6f7822018-01-10 17:32:52 +0530663 ret = scm_restore_sec_cfg(smmu->sec_id, cb, &scm_ret);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530664 if (ret || scm_ret) {
665 pr_err("scm call IOMMU_SECURE_CFG failed\n");
666 return -EINVAL;
667 }
668
669 return 0;
670}
Liam Mark53cf2342016-12-20 11:36:07 -0800671static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
672{
673 if (smmu_domain->attributes &
674 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
675 return true;
676 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
677 return smmu_domain->smmu->dev->archdata.dma_coherent;
678 else
679 return false;
680}
681
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530682static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu)
683{
684 return smmu->options & ARM_SMMU_OPT_STATIC_CB;
685}
686
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530687static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain)
Patrick Dalye271f212016-10-04 13:24:49 -0700688{
689 return (smmu_domain->secure_vmid != VMID_INVAL);
690}
691
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530692static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain)
693{
694 return arm_smmu_has_secure_vmid(smmu_domain) &&
695 smmu_domain->slave_side_secure;
696}
697
698static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain)
699{
700 return arm_smmu_has_secure_vmid(smmu_domain)
701 && !smmu_domain->slave_side_secure;
702}
703
Patrick Dalye271f212016-10-04 13:24:49 -0700704static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
705{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530706 if (arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -0700707 mutex_lock(&smmu_domain->assign_lock);
708}
709
710static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
711{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530712 if (arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -0700713 mutex_unlock(&smmu_domain->assign_lock);
714}
715
Patrick Dalyaddf1f82018-04-23 14:39:19 -0700716static bool arm_smmu_opt_hibernation(struct arm_smmu_device *smmu)
717{
Patrick Dalycf93cac2018-05-16 20:51:04 -0700718 return IS_ENABLED(CONFIG_HIBERNATION);
Patrick Dalyaddf1f82018-04-23 14:39:19 -0700719}
720
Patrick Daly03330cc2017-08-11 14:56:38 -0700721/*
722 * init()
723 * Hook for additional device tree parsing at probe time.
724 *
725 * device_reset()
726 * Hook for one-time architecture-specific register settings.
727 *
728 * iova_to_phys_hard()
729 * Provides debug information. May be called from the context fault irq handler.
730 *
731 * init_context_bank()
732 * Hook for architecture-specific settings which require knowledge of the
733 * dynamically allocated context bank number.
734 *
735 * device_group()
736 * Hook for checking whether a device is compatible with a said group.
737 */
738struct arm_smmu_arch_ops {
739 int (*init)(struct arm_smmu_device *smmu);
740 void (*device_reset)(struct arm_smmu_device *smmu);
741 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
742 dma_addr_t iova);
743 void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
744 struct device *dev);
745 int (*device_group)(struct device *dev, struct iommu_group *group);
746};
747
748static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
749{
750 if (!smmu->arch_ops)
751 return 0;
752 if (!smmu->arch_ops->init)
753 return 0;
754 return smmu->arch_ops->init(smmu);
755}
756
757static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
758{
759 if (!smmu->arch_ops)
760 return;
761 if (!smmu->arch_ops->device_reset)
762 return;
763 return smmu->arch_ops->device_reset(smmu);
764}
765
766static void arm_smmu_arch_init_context_bank(
767 struct arm_smmu_domain *smmu_domain, struct device *dev)
768{
769 struct arm_smmu_device *smmu = smmu_domain->smmu;
770
771 if (!smmu->arch_ops)
772 return;
773 if (!smmu->arch_ops->init_context_bank)
774 return;
775 return smmu->arch_ops->init_context_bank(smmu_domain, dev);
776}
777
778static int arm_smmu_arch_device_group(struct device *dev,
779 struct iommu_group *group)
780{
781 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
782 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
783
784 if (!smmu->arch_ops)
785 return 0;
786 if (!smmu->arch_ops->device_group)
787 return 0;
788 return smmu->arch_ops->device_group(dev, group);
789}
790
Will Deacon8f68f8e2014-07-15 11:27:08 +0100791static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100792{
793 if (dev_is_pci(dev)) {
794 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700795
Will Deacona9a1b0b2014-05-01 18:05:08 +0100796 while (!pci_is_root_bus(bus))
797 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100798 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100799 }
800
Robin Murphyd5b41782016-09-14 15:21:39 +0100801 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100802}
803
Robin Murphyd5b41782016-09-14 15:21:39 +0100804static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100805{
Robin Murphyd5b41782016-09-14 15:21:39 +0100806 *((__be32 *)data) = cpu_to_be32(alias);
807 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100808}
809
Robin Murphyd5b41782016-09-14 15:21:39 +0100810static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100811{
Robin Murphyd5b41782016-09-14 15:21:39 +0100812 struct of_phandle_iterator *it = *(void **)data;
813 struct device_node *np = it->node;
814 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100815
Robin Murphyd5b41782016-09-14 15:21:39 +0100816 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
817 "#stream-id-cells", 0)
818 if (it->node == np) {
819 *(void **)data = dev;
820 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700821 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100822 it->node = np;
823 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100824}
825
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100826static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100827static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100828
Robin Murphy06e393e2016-09-12 17:13:55 +0100829static int arm_smmu_register_legacy_master(struct device *dev,
830 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100831{
Robin Murphy06e393e2016-09-12 17:13:55 +0100832 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100833 struct device_node *np;
834 struct of_phandle_iterator it;
835 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100836 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100837 __be32 pci_sid;
838 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100839
Stephen Boydfecdeef2017-03-01 16:53:19 -0800840 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100841 np = dev_get_dev_node(dev);
842 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
843 of_node_put(np);
844 return -ENODEV;
845 }
846
847 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100848 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
849 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100850 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100851 of_node_put(np);
852 if (err == 0)
853 return -ENODEV;
854 if (err < 0)
855 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100856
Robin Murphyd5b41782016-09-14 15:21:39 +0100857 if (dev_is_pci(dev)) {
858 /* "mmu-masters" assumes Stream ID == Requester ID */
859 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
860 &pci_sid);
861 it.cur = &pci_sid;
862 it.cur_count = 1;
863 }
864
Robin Murphy06e393e2016-09-12 17:13:55 +0100865 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
866 &arm_smmu_ops);
867 if (err)
868 return err;
869
870 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
871 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100872 return -ENOMEM;
873
Robin Murphy06e393e2016-09-12 17:13:55 +0100874 *smmu = dev_get_drvdata(smmu_dev);
875 of_phandle_iterator_args(&it, sids, it.cur_count);
876 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
877 kfree(sids);
878 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100879}
880
881static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
882{
883 int idx;
884
885 do {
886 idx = find_next_zero_bit(map, end, start);
887 if (idx == end)
888 return -ENOSPC;
889 } while (test_and_set_bit(idx, map));
890
891 return idx;
892}
893
894static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
895{
896 clear_bit(idx, map);
897}
898
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700899static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700900{
901 int i, ret = 0;
902
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700903 for (i = 0; i < pwr->num_clocks; ++i) {
904 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700905 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700906 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700907 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700908 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700909 break;
910 }
911 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700912 return ret;
913}
914
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700915static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700916{
917 int i;
918
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700919 for (i = pwr->num_clocks; i; --i)
920 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700921}
922
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700923static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700924{
925 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700926
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700927 for (i = 0; i < pwr->num_clocks; ++i) {
928 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700929 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700930 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700931 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700932 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700933 break;
934 }
935 }
936
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700937 return ret;
938}
Patrick Daly8befb662016-08-17 20:03:28 -0700939
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700940static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
941{
942 int i;
943
944 for (i = pwr->num_clocks; i; --i)
945 clk_disable(pwr->clocks[i - 1]);
946}
947
948static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
949{
950 if (!pwr->bus_client)
951 return 0;
952 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
953}
954
955static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
956{
957 if (!pwr->bus_client)
958 return;
959 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
960}
961
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700962static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
963{
964 struct regulator_bulk_data *consumers;
965 int num_consumers, ret;
966 int i;
967
968 num_consumers = pwr->num_gdscs;
969 consumers = pwr->gdscs;
970 for (i = 0; i < num_consumers; i++) {
971 ret = regulator_enable(consumers[i].consumer);
972 if (ret)
973 goto out;
974 }
975 return 0;
976
977out:
978 i -= 1;
979 for (; i >= 0; i--)
980 regulator_disable(consumers[i].consumer);
981 return ret;
982}
983
Prakash Guptafad87ca2017-05-16 12:13:02 +0530984static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
985{
986 struct regulator_bulk_data *consumers;
987 int i;
988 int num_consumers, ret, r;
989
990 num_consumers = pwr->num_gdscs;
991 consumers = pwr->gdscs;
992 for (i = num_consumers - 1; i >= 0; --i) {
993 ret = regulator_disable_deferred(consumers[i].consumer,
994 pwr->regulator_defer);
995 if (ret != 0)
996 goto err;
997 }
998
999 return 0;
1000
1001err:
1002 pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
1003 for (++i; i < num_consumers; ++i) {
1004 r = regulator_enable(consumers[i].consumer);
1005 if (r != 0)
1006 pr_err("Failed to reename %s: %d\n",
1007 consumers[i].supply, r);
1008 }
1009
1010 return ret;
1011}
1012
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001013/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
1014static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
1015{
1016 int ret = 0;
1017 unsigned long flags;
1018
1019 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
1020 if (pwr->clock_refs_count > 0) {
1021 pwr->clock_refs_count++;
1022 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
1023 return 0;
1024 }
1025
1026 ret = arm_smmu_enable_clocks(pwr);
1027 if (!ret)
1028 pwr->clock_refs_count = 1;
1029
1030 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001031 return ret;
1032}
1033
1034/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001035static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001036{
Patrick Daly8befb662016-08-17 20:03:28 -07001037 unsigned long flags;
1038
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001039 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
1040 if (pwr->clock_refs_count == 0) {
1041 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
1042 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
1043 return;
1044
1045 } else if (pwr->clock_refs_count > 1) {
1046 pwr->clock_refs_count--;
1047 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001048 return;
1049 }
1050
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001051 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001052
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001053 pwr->clock_refs_count = 0;
1054 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001055}
1056
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001057static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001058{
1059 int ret;
1060
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001061 mutex_lock(&pwr->power_lock);
1062 if (pwr->power_count > 0) {
1063 pwr->power_count += 1;
1064 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001065 return 0;
1066 }
1067
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001068 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001069 if (ret)
1070 goto out_unlock;
1071
Patrick Dalyb26f97c2017-08-11 15:24:20 -07001072 ret = arm_smmu_enable_regulators(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001073 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001074 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001075
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001076 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07001077 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001078 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -07001079
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001080 pwr->power_count = 1;
1081 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001082 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001083
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001084out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001085 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001086out_disable_bus:
1087 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001088out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001089 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001090 return ret;
1091}
1092
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001093static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001094{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001095 mutex_lock(&pwr->power_lock);
1096 if (pwr->power_count == 0) {
1097 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
1098 mutex_unlock(&pwr->power_lock);
1099 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001100
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001101 } else if (pwr->power_count > 1) {
1102 pwr->power_count--;
1103 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001104 return;
1105 }
1106
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001107 arm_smmu_unprepare_clocks(pwr);
Prakash Guptafad87ca2017-05-16 12:13:02 +05301108 arm_smmu_disable_regulators(pwr);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001109 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -07001110 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001111 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001112}
1113
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001114static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001115{
1116 int ret;
1117
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001118 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001119 if (ret)
1120 return ret;
1121
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001122 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001123 if (ret)
1124 goto out_disable;
1125
1126 return 0;
1127
1128out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001129 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001130 return ret;
1131}
1132
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001133static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001134{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001135 arm_smmu_power_off_atomic(pwr);
1136 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001137}
1138
1139/*
1140 * Must be used instead of arm_smmu_power_on if it may be called from
1141 * atomic context
1142 */
1143static int arm_smmu_domain_power_on(struct iommu_domain *domain,
1144 struct arm_smmu_device *smmu)
1145{
1146 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1147 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1148
1149 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001150 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001151
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001152 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001153}
1154
1155/*
1156 * Must be used instead of arm_smmu_power_on if it may be called from
1157 * atomic context
1158 */
1159static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1160 struct arm_smmu_device *smmu)
1161{
1162 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1163 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1164
1165 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001166 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001167 return;
1168 }
1169
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001170 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001171}
1172
Will Deacon45ae7cf2013-06-24 18:31:25 +01001173/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001174static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1175 int cbndx)
1176{
1177 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1178 u32 val;
1179
1180 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1181 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1182 !(val & TLBSTATUS_SACTIVE),
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301183 0, TLB_LOOP_TIMEOUT)) {
1184 trace_tlbsync_timeout(smmu->dev, 0);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001185 dev_err(smmu->dev, "TLBSYNC timeout!\n");
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301186 }
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001187}
1188
Will Deacon518f7132014-11-14 17:17:54 +00001189static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001190{
1191 int count = 0;
1192 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1193
1194 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1195 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1196 & sTLBGSTATUS_GSACTIVE) {
1197 cpu_relax();
1198 if (++count == TLB_LOOP_TIMEOUT) {
1199 dev_err_ratelimited(smmu->dev,
1200 "TLB sync timed out -- SMMU may be deadlocked\n");
1201 return;
1202 }
1203 udelay(1);
1204 }
1205}
1206
Will Deacon518f7132014-11-14 17:17:54 +00001207static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001208{
Will Deacon518f7132014-11-14 17:17:54 +00001209 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001210 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001211}
1212
Patrick Daly8befb662016-08-17 20:03:28 -07001213/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001214static void arm_smmu_tlb_inv_context(void *cookie)
1215{
1216 struct arm_smmu_domain *smmu_domain = cookie;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301217 struct device *dev = smmu_domain->dev;
Will Deacon44680ee2014-06-25 11:29:12 +01001218 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1219 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001220 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001221 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -07001222 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301223 ktime_t cur = ktime_get();
1224
1225 trace_tlbi_start(dev, 0);
Will Deacon1463fe42013-07-31 19:21:27 +01001226
Patrick Dalye7069342017-07-11 12:35:55 -07001227 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001228 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001229 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001230 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001231 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001232 } else if (stage1 && use_tlbiall) {
1233 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1234 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1235 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001236 } else {
1237 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001238 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001239 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001240 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001241 }
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301242
1243 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Will Deacon1463fe42013-07-31 19:21:27 +01001244}
1245
Will Deacon518f7132014-11-14 17:17:54 +00001246static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001247 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001248{
1249 struct arm_smmu_domain *smmu_domain = cookie;
1250 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1251 struct arm_smmu_device *smmu = smmu_domain->smmu;
1252 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1253 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001254 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001255
Patrick Dalye7069342017-07-11 12:35:55 -07001256 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001257 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1258 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1259
Robin Murphy7602b872016-04-28 17:12:09 +01001260 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001261 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001262 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001263 do {
1264 writel_relaxed(iova, reg);
1265 iova += granule;
1266 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001267 } else {
1268 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001269 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001270 do {
1271 writeq_relaxed(iova, reg);
1272 iova += granule >> 12;
1273 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001274 }
Patrick Dalye7069342017-07-11 12:35:55 -07001275 } else if (stage1 && use_tlbiall) {
1276 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1277 reg += ARM_SMMU_CB_S1_TLBIALL;
1278 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001279 } else if (smmu->version == ARM_SMMU_V2) {
1280 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1281 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1282 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001283 iova >>= 12;
1284 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001285 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001286 iova += granule >> 12;
1287 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001288 } else {
1289 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001290 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001291 }
1292}
1293
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001294struct arm_smmu_secure_pool_chunk {
1295 void *addr;
1296 size_t size;
1297 struct list_head list;
1298};
1299
1300static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1301 size_t size)
1302{
1303 struct arm_smmu_secure_pool_chunk *it;
1304
1305 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1306 if (it->size == size) {
1307 void *addr = it->addr;
1308
1309 list_del(&it->list);
1310 kfree(it);
1311 return addr;
1312 }
1313 }
1314
1315 return NULL;
1316}
1317
1318static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1319 void *addr, size_t size)
1320{
1321 struct arm_smmu_secure_pool_chunk *chunk;
1322
1323 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1324 if (!chunk)
1325 return -ENOMEM;
1326
1327 chunk->addr = addr;
1328 chunk->size = size;
1329 memset(addr, 0, size);
1330 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1331
1332 return 0;
1333}
1334
1335static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1336{
1337 struct arm_smmu_secure_pool_chunk *it, *i;
1338
1339 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1340 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1341 /* pages will be freed later (after being unassigned) */
Prakash Gupta8e827be2017-10-04 12:37:11 +05301342 list_del(&it->list);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001343 kfree(it);
1344 }
1345}
1346
Patrick Dalyc11d1082016-09-01 15:52:44 -07001347static void *arm_smmu_alloc_pages_exact(void *cookie,
1348 size_t size, gfp_t gfp_mask)
1349{
1350 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001351 void *page;
1352 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001353
Patrick Daly2d600832018-02-11 15:12:55 -08001354 if (!arm_smmu_is_master_side_secure(smmu_domain)) {
1355 struct page *pg;
1356 /* size is expected to be 4K with current configuration */
1357 if (size == PAGE_SIZE) {
1358 pg = list_first_entry_or_null(
1359 &smmu_domain->nonsecure_pool, struct page, lru);
1360 if (pg) {
1361 list_del_init(&pg->lru);
1362 return page_address(pg);
1363 }
1364 }
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001365 return alloc_pages_exact(size, gfp_mask);
Patrick Daly2d600832018-02-11 15:12:55 -08001366 }
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001367
1368 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1369 if (page)
1370 return page;
1371
1372 page = alloc_pages_exact(size, gfp_mask);
1373 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001374 ret = arm_smmu_prepare_pgtable(page, cookie);
1375 if (ret) {
1376 free_pages_exact(page, size);
1377 return NULL;
1378 }
1379 }
1380
1381 return page;
1382}
1383
1384static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1385{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001386 struct arm_smmu_domain *smmu_domain = cookie;
1387
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301388 if (!arm_smmu_is_master_side_secure(smmu_domain)) {
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001389 free_pages_exact(virt, size);
1390 return;
1391 }
1392
1393 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1394 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001395}
1396
Will Deacon518f7132014-11-14 17:17:54 +00001397static struct iommu_gather_ops arm_smmu_gather_ops = {
1398 .tlb_flush_all = arm_smmu_tlb_inv_context,
1399 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1400 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001401 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1402 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001403};
1404
Charan Teja Reddy8e4c3bdc2018-03-02 14:15:21 +05301405static void msm_smmu_tlb_inv_context(void *cookie)
1406{
1407}
1408
1409static void msm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1410 size_t granule, bool leaf,
1411 void *cookie)
1412{
1413}
1414
1415static void msm_smmu_tlb_sync(void *cookie)
1416{
1417}
1418
1419static struct iommu_gather_ops msm_smmu_gather_ops = {
1420 .tlb_flush_all = msm_smmu_tlb_inv_context,
1421 .tlb_add_flush = msm_smmu_tlb_inv_range_nosync,
1422 .tlb_sync = msm_smmu_tlb_sync,
1423 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1424 .free_pages_exact = arm_smmu_free_pages_exact,
1425};
1426
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001427static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1428 dma_addr_t iova, u32 fsr)
1429{
1430 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001431 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyda765c62017-09-11 16:31:07 -07001432 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001433 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001434 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001435
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001436 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyda765c62017-09-11 16:31:07 -07001437 tlb->tlb_flush_all(smmu_domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001438 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001439
Patrick Dalyad441dd2016-09-15 15:50:46 -07001440 if (phys != phys_post_tlbiall) {
1441 dev_err(smmu->dev,
1442 "ATOS results differed across TLBIALL...\n"
1443 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1444 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001445
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001446 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001447}
1448
Will Deacon45ae7cf2013-06-24 18:31:25 +01001449static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1450{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001451 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001452 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001453 unsigned long iova;
1454 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001455 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001456 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1457 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001458 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001459 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001460 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001461 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001462 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001463 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001464 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001465
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001466 static DEFINE_RATELIMIT_STATE(_rs,
1467 DEFAULT_RATELIMIT_INTERVAL,
1468 DEFAULT_RATELIMIT_BURST);
1469
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001470 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001471 if (ret)
1472 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001473
Shalaj Jain04059c52015-03-03 13:34:59 -08001474 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001475 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001476 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1477
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001478 if (!(fsr & FSR_FAULT)) {
1479 ret = IRQ_NONE;
1480 goto out_power_off;
1481 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001482
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001483 if (fatal_asf && (fsr & FSR_ASF)) {
1484 dev_err(smmu->dev,
1485 "Took an address size fault. Refusing to recover.\n");
1486 BUG();
1487 }
1488
Will Deacon45ae7cf2013-06-24 18:31:25 +01001489 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001490 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001491 if (fsr & FSR_TF)
1492 flags |= IOMMU_FAULT_TRANSLATION;
1493 if (fsr & FSR_PF)
1494 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001495 if (fsr & FSR_EF)
1496 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001497 if (fsr & FSR_SS)
1498 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001499
Robin Murphyf9a05f02016-04-13 18:13:01 +01001500 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001501 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001502 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1503 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001504 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1505 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001506 dev_dbg(smmu->dev,
1507 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1508 iova, fsr, fsynr, cfg->cbndx);
1509 dev_dbg(smmu->dev,
1510 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001511 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001512 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001513 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001514 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1515 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001516 if (__ratelimit(&_rs)) {
1517 dev_err(smmu->dev,
1518 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1519 iova, fsr, fsynr, cfg->cbndx);
1520 dev_err(smmu->dev, "FAR = %016lx\n",
1521 (unsigned long)iova);
1522 dev_err(smmu->dev,
1523 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1524 fsr,
1525 (fsr & 0x02) ? "TF " : "",
1526 (fsr & 0x04) ? "AFF " : "",
1527 (fsr & 0x08) ? "PF " : "",
1528 (fsr & 0x10) ? "EF " : "",
1529 (fsr & 0x20) ? "TLBMCF " : "",
1530 (fsr & 0x40) ? "TLBLKF " : "",
1531 (fsr & 0x80) ? "MHF " : "",
1532 (fsr & 0x40000000) ? "SS " : "",
1533 (fsr & 0x80000000) ? "MULTI " : "");
1534 dev_err(smmu->dev,
1535 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001536 if (!phys_soft)
1537 dev_err(smmu->dev,
1538 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1539 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001540 if (phys_atos)
1541 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1542 &phys_atos);
1543 else
1544 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001545 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1546 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001547 ret = IRQ_NONE;
1548 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001549 if (!non_fatal_fault) {
1550 dev_err(smmu->dev,
1551 "Unhandled arm-smmu context fault!\n");
1552 BUG();
1553 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001554 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001555
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001556 /*
1557 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1558 * if stalled. This is required to keep the IOMMU client stalled on
1559 * the outstanding fault. This gives the client a chance to take any
1560 * debug action and then terminate the stalled transaction.
1561 * So, the sequence in case of stall on fault should be:
1562 * 1) Do not clear FSR or write to RESUME here
1563 * 2) Client takes any debug action
1564 * 3) Client terminates the stalled transaction and resumes the IOMMU
1565 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1566 * not before so that the fault remains outstanding. This ensures
1567 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1568 * need to be terminated.
1569 */
1570 if (tmp != -EBUSY) {
1571 /* Clear the faulting FSR */
1572 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001573
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001574 /*
1575 * Barrier required to ensure that the FSR is cleared
1576 * before resuming SMMU operation
1577 */
1578 wmb();
1579
1580 /* Retry or terminate any stalled transactions */
1581 if (fsr & FSR_SS)
1582 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1583 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001584
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001585out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001586 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001587
Patrick Daly5ba28112016-08-30 19:18:52 -07001588 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001589}
1590
1591static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1592{
1593 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1594 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001595 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001596
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001597 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001598 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001599
Will Deacon45ae7cf2013-06-24 18:31:25 +01001600 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1601 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1602 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1603 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1604
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001605 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001606 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001607 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001608 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001609
Will Deacon45ae7cf2013-06-24 18:31:25 +01001610 dev_err_ratelimited(smmu->dev,
1611 "Unexpected global fault, this could be serious\n");
1612 dev_err_ratelimited(smmu->dev,
1613 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1614 gfsr, gfsynr0, gfsynr1, gfsynr2);
1615
1616 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001617 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001618 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001619}
1620
Shiraz Hashimeca8c2e2018-01-15 20:08:38 +05301621static bool arm_smmu_master_attached(struct arm_smmu_device *smmu,
1622 struct iommu_fwspec *fwspec)
1623{
1624 int i, idx;
1625
1626 for_each_cfg_sme(fwspec, i, idx) {
1627 if (smmu->s2crs[idx].attach_count)
1628 return true;
1629 }
1630
1631 return false;
1632}
1633
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301634static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain,
1635 struct io_pgtable_cfg *pgtbl_cfg)
1636{
1637 struct arm_smmu_device *smmu = smmu_domain->smmu;
1638 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1639 int ret = 0;
1640
1641 if ((smmu->version > ARM_SMMU_V1) &&
1642 (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) &&
1643 !arm_smmu_has_secure_vmid(smmu_domain) &&
1644 arm_smmu_is_static_cb(smmu)) {
1645 ret = msm_tz_set_cb_format(smmu->sec_id, cfg->cbndx);
1646 }
1647 return ret;
1648}
1649
Will Deacon518f7132014-11-14 17:17:54 +00001650static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1651 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001652{
Will Deacon44680ee2014-06-25 11:29:12 +01001653 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy6549a1f2017-08-08 14:56:14 +01001654 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
1655 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1656
1657 cb->cfg = cfg;
1658
1659 /* TTBCR */
1660 if (stage1) {
1661 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1662 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
1663 } else {
1664 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1665 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1666 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
1667 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1668 cb->tcr[1] |= TTBCR2_AS;
1669 }
1670 } else {
1671 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1672 }
1673
1674 /* TTBRs */
1675 if (stage1) {
1676 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1677 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1678 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1679 } else {
1680 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1681 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
1682 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1683 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
1684 }
1685 } else {
1686 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1687 }
1688
1689 /* MAIRs (stage-1 only) */
1690 if (stage1) {
1691 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1692 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
1693 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
1694 } else {
1695 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1696 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1697 }
1698 }
1699
1700 cb->attributes = smmu_domain->attributes;
1701}
1702
1703static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
1704{
1705 u32 reg;
1706 bool stage1;
1707 struct arm_smmu_cb *cb = &smmu->cbs[idx];
1708 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001709 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001710
Robin Murphy6549a1f2017-08-08 14:56:14 +01001711 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, idx);
1712
1713 /* Unassigned context banks only need disabling */
1714 if (!cfg) {
1715 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1716 return;
1717 }
1718
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001720 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001721
Robin Murphy6549a1f2017-08-08 14:56:14 +01001722 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +00001723 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001724 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1725 reg = CBA2R_RW64_64BIT;
1726 else
1727 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001728 /* 16-bit VMIDs live in CBA2R */
1729 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001730 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001731
Robin Murphy6549a1f2017-08-08 14:56:14 +01001732 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +00001733 }
1734
Will Deacon45ae7cf2013-06-24 18:31:25 +01001735 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001736 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001737 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001738 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001739
Will Deacon57ca90f2014-02-06 14:59:05 +00001740 /*
1741 * Use the weakest shareability/memory types, so they are
1742 * overridden by the ttbcr/pte.
1743 */
1744 if (stage1) {
1745 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1746 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001747 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1748 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001749 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001750 }
Robin Murphy6549a1f2017-08-08 14:56:14 +01001751 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752
Sunil Gouthamf0b0a2a2017-03-28 16:11:12 +05301753 /*
1754 * TTBCR
1755 * We must write this before the TTBRs, since it determines the
1756 * access behaviour of some fields (in particular, ASID[15:8]).
1757 */
Robin Murphy6549a1f2017-08-08 14:56:14 +01001758 if (stage1 && smmu->version > ARM_SMMU_V1)
1759 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
1760 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Sunil Gouthamf0b0a2a2017-03-28 16:11:12 +05301761
Will Deacon518f7132014-11-14 17:17:54 +00001762 /* TTBRs */
Robin Murphy6549a1f2017-08-08 14:56:14 +01001763 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1764 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1765 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
1766 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001767 } else {
Robin Murphy6549a1f2017-08-08 14:56:14 +01001768 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
1769 if (stage1)
1770 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001771 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772
Will Deacon518f7132014-11-14 17:17:54 +00001773 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001774 if (stage1) {
Robin Murphy6549a1f2017-08-08 14:56:14 +01001775 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
1776 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001777 }
1778
Patrick Dalyad521082018-04-06 18:07:13 -07001779 /* ACTLR (implementation defined) */
Patrick Daly25317e82018-05-07 12:35:29 -07001780 if (cb->has_actlr)
1781 writel_relaxed(cb->actlr, cb_base + ARM_SMMU_CB_ACTLR);
Patrick Dalyad521082018-04-06 18:07:13 -07001782
Will Deacon45ae7cf2013-06-24 18:31:25 +01001783 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001784 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001785
Patrick Daly7f377fe2017-10-06 17:37:10 -07001786 /* Ensure bypass transactions are Non-shareable */
1787 reg |= SCTLR_SHCFG_NSH << SCTLR_SHCFG_SHIFT;
1788
Robin Murphy6549a1f2017-08-08 14:56:14 +01001789 if (cb->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301790 reg &= ~SCTLR_CFCFG;
1791 reg |= SCTLR_HUPCF;
1792 }
1793
Robin Murphy6549a1f2017-08-08 14:56:14 +01001794 if ((!(cb->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1795 !(cb->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001796 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001797 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001798 if (stage1)
1799 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy6549a1f2017-08-08 14:56:14 +01001800 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1801 reg |= SCTLR_E;
1802
Will Deacon25724842013-08-21 13:49:53 +01001803 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001804}
1805
Patrick Dalyc190d932016-08-30 17:23:28 -07001806static int arm_smmu_init_asid(struct iommu_domain *domain,
1807 struct arm_smmu_device *smmu)
1808{
1809 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1810 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1811 bool dynamic = is_dynamic_domain(domain);
1812 int ret;
1813
1814 if (!dynamic) {
1815 cfg->asid = cfg->cbndx + 1;
1816 } else {
1817 mutex_lock(&smmu->idr_mutex);
1818 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1819 smmu->num_context_banks + 2,
1820 MAX_ASID + 1, GFP_KERNEL);
1821
1822 mutex_unlock(&smmu->idr_mutex);
1823 if (ret < 0) {
1824 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1825 ret);
1826 return ret;
1827 }
1828 cfg->asid = ret;
1829 }
1830 return 0;
1831}
1832
1833static void arm_smmu_free_asid(struct iommu_domain *domain)
1834{
1835 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1836 struct arm_smmu_device *smmu = smmu_domain->smmu;
1837 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1838 bool dynamic = is_dynamic_domain(domain);
1839
1840 if (cfg->asid == INVALID_ASID || !dynamic)
1841 return;
1842
1843 mutex_lock(&smmu->idr_mutex);
1844 idr_remove(&smmu->asid_idr, cfg->asid);
1845 mutex_unlock(&smmu->idr_mutex);
1846}
1847
Will Deacon45ae7cf2013-06-24 18:31:25 +01001848static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001849 struct arm_smmu_device *smmu,
1850 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001851{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001852 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001853 unsigned long ias, oas;
1854 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001855 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001856 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001857 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001858 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001859 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001860 bool dynamic;
Patrick Dalyda765c62017-09-11 16:31:07 -07001861 const struct iommu_gather_ops *tlb;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001862
Will Deacon518f7132014-11-14 17:17:54 +00001863 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001864 if (smmu_domain->smmu)
1865 goto out_unlock;
1866
Patrick Dalyc190d932016-08-30 17:23:28 -07001867 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1868 smmu_domain->cfg.asid = INVALID_ASID;
1869
Patrick Dalyc190d932016-08-30 17:23:28 -07001870 dynamic = is_dynamic_domain(domain);
1871 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1872 dev_err(smmu->dev, "dynamic domains not supported\n");
1873 ret = -EPERM;
1874 goto out_unlock;
1875 }
1876
Patrick Dalyaddf1f82018-04-23 14:39:19 -07001877 if (arm_smmu_has_secure_vmid(smmu_domain) &&
1878 arm_smmu_opt_hibernation(smmu)) {
1879 dev_err(smmu->dev,
1880 "Secure usecases not supported with hibernation\n");
1881 ret = -EPERM;
1882 goto out_unlock;
1883 }
1884
Will Deaconc752ce42014-06-25 22:46:31 +01001885 /*
1886 * Mapping the requested stage onto what we support is surprisingly
1887 * complicated, mainly because the spec allows S1+S2 SMMUs without
1888 * support for nested translation. That means we end up with the
1889 * following table:
1890 *
1891 * Requested Supported Actual
1892 * S1 N S1
1893 * S1 S1+S2 S1
1894 * S1 S2 S2
1895 * S1 S1 S1
1896 * N N N
1897 * N S1+S2 S2
1898 * N S2 S2
1899 * N S1 S1
1900 *
1901 * Note that you can't actually request stage-2 mappings.
1902 */
1903 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1904 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1905 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1906 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1907
Robin Murphy7602b872016-04-28 17:12:09 +01001908 /*
1909 * Choosing a suitable context format is even more fiddly. Until we
1910 * grow some way for the caller to express a preference, and/or move
1911 * the decision into the io-pgtable code where it arguably belongs,
1912 * just aim for the closest thing to the rest of the system, and hope
1913 * that the hardware isn't esoteric enough that we can't assume AArch64
1914 * support to be a superset of AArch32 support...
1915 */
1916 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1917 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001918 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1919 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1920 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1921 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1922 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001923 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1924 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1925 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1926 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1927 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1928
1929 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1930 ret = -EINVAL;
1931 goto out_unlock;
1932 }
1933
Will Deaconc752ce42014-06-25 22:46:31 +01001934 switch (smmu_domain->stage) {
1935 case ARM_SMMU_DOMAIN_S1:
1936 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1937 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001938 ias = smmu->va_size;
1939 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001940 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001941 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001942 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1943 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001944 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001945 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001946 ias = min(ias, 32UL);
1947 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001948 } else {
1949 fmt = ARM_V7S;
1950 ias = min(ias, 32UL);
1951 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001952 }
Will Deaconc752ce42014-06-25 22:46:31 +01001953 break;
1954 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001955 /*
1956 * We will likely want to change this if/when KVM gets
1957 * involved.
1958 */
Will Deaconc752ce42014-06-25 22:46:31 +01001959 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001960 cfg->cbar = CBAR_TYPE_S2_TRANS;
1961 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001962 ias = smmu->ipa_size;
1963 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001964 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001965 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001966 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001967 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001968 ias = min(ias, 40UL);
1969 oas = min(oas, 40UL);
1970 }
Will Deaconc752ce42014-06-25 22:46:31 +01001971 break;
1972 default:
1973 ret = -EINVAL;
1974 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001975 }
1976
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001977 if (is_fast)
1978 fmt = ARM_V8L_FAST;
1979
Patrick Dalyce6786f2016-11-09 14:19:23 -08001980 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1981 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001982 if (is_iommu_pt_coherent(smmu_domain))
1983 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Patrick Daly49ccf332017-09-27 15:10:29 -07001984 if ((quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
1985 (smmu->model == QCOM_SMMUV500))
1986 quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001987
Patrick Dalyda765c62017-09-11 16:31:07 -07001988 tlb = &arm_smmu_gather_ops;
Patrick Daly83174c12017-10-26 12:31:15 -07001989 if (smmu->options & ARM_SMMU_OPT_MMU500_ERRATA1)
Patrick Dalyda765c62017-09-11 16:31:07 -07001990 tlb = &qsmmuv500_errata1_smmu_gather_ops;
1991
Charan Teja Reddy8e4c3bdc2018-03-02 14:15:21 +05301992 if (arm_smmu_is_slave_side_secure(smmu_domain))
1993 tlb = &msm_smmu_gather_ops;
1994
Patrick Dalyda688822017-05-17 20:12:48 -07001995 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1996 if (ret < 0)
1997 goto out_unlock;
1998 cfg->cbndx = ret;
1999
Robin Murphyb7862e32016-04-13 18:13:03 +01002000 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01002001 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
2002 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002003 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01002004 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002005 }
2006
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302007 if (arm_smmu_is_slave_side_secure(smmu_domain)) {
2008 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
2009 .quirks = quirks,
2010 .pgsize_bitmap = smmu->pgsize_bitmap,
2011 .arm_msm_secure_cfg = {
2012 .sec_id = smmu->sec_id,
2013 .cbndx = cfg->cbndx,
2014 },
Charan Teja Reddy8e4c3bdc2018-03-02 14:15:21 +05302015 .tlb = tlb,
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302016 .iommu_dev = smmu->dev,
2017 };
2018 fmt = ARM_MSM_SECURE;
2019 } else {
2020 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
2021 .quirks = quirks,
2022 .pgsize_bitmap = smmu->pgsize_bitmap,
2023 .ias = ias,
2024 .oas = oas,
2025 .tlb = tlb,
2026 .iommu_dev = smmu->dev,
2027 };
2028 }
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002029
Will Deacon518f7132014-11-14 17:17:54 +00002030 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08002031 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07002032 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
2033 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002034 if (!pgtbl_ops) {
2035 ret = -ENOMEM;
2036 goto out_clear_smmu;
2037 }
2038
Patrick Dalyc11d1082016-09-01 15:52:44 -07002039 /*
2040 * assign any page table memory that might have been allocated
2041 * during alloc_io_pgtable_ops
2042 */
Patrick Dalye271f212016-10-04 13:24:49 -07002043 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002044 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002045 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002046
Robin Murphyd5466352016-05-09 17:20:09 +01002047 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07002048 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01002049 domain->geometry.aperture_end = (1UL << ias) - 1;
2050 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00002051
Patrick Dalyc190d932016-08-30 17:23:28 -07002052 /* Assign an asid */
2053 ret = arm_smmu_init_asid(domain, smmu);
2054 if (ret)
2055 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00002056
Patrick Dalyc190d932016-08-30 17:23:28 -07002057 if (!dynamic) {
2058 /* Initialise the context bank with our page table cfg */
2059 arm_smmu_init_context_bank(smmu_domain,
Robin Murphy6549a1f2017-08-08 14:56:14 +01002060 &smmu_domain->pgtbl_cfg);
Patrick Dalyad521082018-04-06 18:07:13 -07002061 arm_smmu_arch_init_context_bank(smmu_domain, dev);
Robin Murphy6549a1f2017-08-08 14:56:14 +01002062 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302063 /* for slave side secure, we may have to force the pagetable
2064 * format to V8L.
2065 */
2066 ret = arm_smmu_set_pt_format(smmu_domain,
2067 &smmu_domain->pgtbl_cfg);
2068 if (ret)
2069 goto out_clear_smmu;
Patrick Dalyc190d932016-08-30 17:23:28 -07002070
2071 /*
2072 * Request context fault interrupt. Do this last to avoid the
2073 * handler seeing a half-initialised domain state.
2074 */
2075 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
2076 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08002077 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
2078 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07002079 if (ret < 0) {
2080 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
2081 cfg->irptndx, irq);
2082 cfg->irptndx = INVALID_IRPTNDX;
2083 goto out_clear_smmu;
2084 }
2085 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01002086 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002087 }
Will Deacon518f7132014-11-14 17:17:54 +00002088 mutex_unlock(&smmu_domain->init_mutex);
2089
2090 /* Publish page table ops for map/unmap */
2091 smmu_domain->pgtbl_ops = pgtbl_ops;
Shiraz Hashimeca8c2e2018-01-15 20:08:38 +05302092 if (arm_smmu_is_slave_side_secure(smmu_domain) &&
2093 !arm_smmu_master_attached(smmu, dev->iommu_fwspec))
2094 arm_smmu_restore_sec_cfg(smmu, cfg->cbndx);
2095
Will Deacona9a1b0b2014-05-01 18:05:08 +01002096 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002097
Will Deacon518f7132014-11-14 17:17:54 +00002098out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06002099 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002100 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002101out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00002102 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002103 return ret;
2104}
2105
Patrick Daly77db4f92016-10-14 15:34:10 -07002106static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
2107{
2108 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
2109 smmu_domain->cfg.cbndx = INVALID_CBNDX;
2110 smmu_domain->secure_vmid = VMID_INVAL;
2111}
2112
Will Deacon45ae7cf2013-06-24 18:31:25 +01002113static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
2114{
Joerg Roedel1d672632015-03-26 13:43:10 +01002115 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01002116 struct arm_smmu_device *smmu = smmu_domain->smmu;
2117 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002118 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07002119 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002120 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002121
Robin Murphy7e96c742016-09-14 15:26:46 +01002122 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002123 return;
2124
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002125 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002126 if (ret) {
2127 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
2128 smmu);
2129 return;
2130 }
2131
Patrick Dalyc190d932016-08-30 17:23:28 -07002132 dynamic = is_dynamic_domain(domain);
2133 if (dynamic) {
2134 arm_smmu_free_asid(domain);
2135 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002136 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07002137 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002138 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002139 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002140 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07002141 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07002142 return;
2143 }
2144
Will Deacon518f7132014-11-14 17:17:54 +00002145 /*
2146 * Disable the context bank and free the page tables before freeing
2147 * it.
2148 */
Robin Murphy6549a1f2017-08-08 14:56:14 +01002149 smmu->cbs[cfg->cbndx].cfg = NULL;
2150 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01002151
Will Deacon44680ee2014-06-25 11:29:12 +01002152 if (cfg->irptndx != INVALID_IRPTNDX) {
2153 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08002154 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002155 }
2156
Markus Elfring44830b02015-11-06 18:32:41 +01002157 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07002158 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002159 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002160 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002161 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01002162 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05302163 /* As the nonsecure context bank index is any way set to zero,
2164 * so, directly clearing up the secure cb bitmap.
2165 */
2166 if (arm_smmu_is_slave_side_secure(smmu_domain))
2167 __arm_smmu_free_bitmap(smmu->secure_context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002168
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002169 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07002170 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002171}
2172
Joerg Roedel1d672632015-03-26 13:43:10 +01002173static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002174{
2175 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002176
Patrick Daly09801312016-08-29 17:02:52 -07002177 /* Do not support DOMAIN_DMA for now */
2178 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01002179 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002180 /*
2181 * Allocate the domain and initialise some of its data structures.
2182 * We can't really do anything meaningful until we've added a
2183 * master.
2184 */
2185 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
2186 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01002187 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002188
Robin Murphy7e96c742016-09-14 15:26:46 +01002189 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
2190 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00002191 kfree(smmu_domain);
2192 return NULL;
2193 }
2194
Will Deacon518f7132014-11-14 17:17:54 +00002195 mutex_init(&smmu_domain->init_mutex);
2196 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002197 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
2198 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07002199 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002200 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly2d600832018-02-11 15:12:55 -08002201 INIT_LIST_HEAD(&smmu_domain->nonsecure_pool);
Patrick Daly77db4f92016-10-14 15:34:10 -07002202 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01002203
2204 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002205}
2206
Joerg Roedel1d672632015-03-26 13:43:10 +01002207static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002208{
Joerg Roedel1d672632015-03-26 13:43:10 +01002209 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01002210
2211 /*
2212 * Free the domain resources. We assume that all devices have
2213 * already been detached.
2214 */
Robin Murphy9adb9592016-01-26 18:06:36 +00002215 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002216 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002217 kfree(smmu_domain);
2218}
2219
Robin Murphy468f4942016-09-12 17:13:49 +01002220static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
2221{
2222 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01002223 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01002224
2225 if (smr->valid)
2226 reg |= SMR_VALID;
2227 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2228}
2229
Robin Murphya754fd12016-09-12 17:13:50 +01002230static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
2231{
2232 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
2233 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
2234 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
Patrick Daly7f377fe2017-10-06 17:37:10 -07002235 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT |
2236 S2CR_SHCFG_NSH << S2CR_SHCFG_SHIFT;
Robin Murphya754fd12016-09-12 17:13:50 +01002237
2238 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2239}
2240
2241static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
2242{
2243 arm_smmu_write_s2cr(smmu, idx);
2244 if (smmu->smrs)
2245 arm_smmu_write_smr(smmu, idx);
2246}
2247
Robin Murphy6668f692016-09-12 17:13:54 +01002248static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01002249{
2250 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01002251 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002252
Robin Murphy6668f692016-09-12 17:13:54 +01002253 /* Stream indexing is blissfully easy */
2254 if (!smrs)
2255 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01002256
Robin Murphy6668f692016-09-12 17:13:54 +01002257 /* Validating SMRs is... less so */
2258 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2259 if (!smrs[i].valid) {
2260 /*
2261 * Note the first free entry we come across, which
2262 * we'll claim in the end if nothing else matches.
2263 */
2264 if (free_idx < 0)
2265 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01002266 continue;
2267 }
Robin Murphy6668f692016-09-12 17:13:54 +01002268 /*
2269 * If the new entry is _entirely_ matched by an existing entry,
2270 * then reuse that, with the guarantee that there also cannot
2271 * be any subsequent conflicting entries. In normal use we'd
2272 * expect simply identical entries for this case, but there's
2273 * no harm in accommodating the generalisation.
2274 */
2275 if ((mask & smrs[i].mask) == mask &&
2276 !((id ^ smrs[i].id) & ~smrs[i].mask))
2277 return i;
2278 /*
2279 * If the new entry has any other overlap with an existing one,
2280 * though, then there always exists at least one stream ID
2281 * which would cause a conflict, and we can't allow that risk.
2282 */
2283 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
2284 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002285 }
2286
Robin Murphy6668f692016-09-12 17:13:54 +01002287 return free_idx;
2288}
2289
2290static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
2291{
2292 if (--smmu->s2crs[idx].count)
2293 return false;
2294
2295 smmu->s2crs[idx] = s2cr_init_val;
2296 if (smmu->smrs)
2297 smmu->smrs[idx].valid = false;
2298
2299 return true;
2300}
2301
2302static int arm_smmu_master_alloc_smes(struct device *dev)
2303{
Robin Murphy06e393e2016-09-12 17:13:55 +01002304 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2305 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01002306 struct arm_smmu_device *smmu = cfg->smmu;
2307 struct arm_smmu_smr *smrs = smmu->smrs;
2308 struct iommu_group *group;
2309 int i, idx, ret;
2310
2311 mutex_lock(&smmu->stream_map_mutex);
2312 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01002313 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002314 u16 sid = fwspec->ids[i];
2315 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
2316
Robin Murphy6668f692016-09-12 17:13:54 +01002317 if (idx != INVALID_SMENDX) {
2318 ret = -EEXIST;
2319 goto out_err;
2320 }
2321
Robin Murphy7e96c742016-09-14 15:26:46 +01002322 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01002323 if (ret < 0)
2324 goto out_err;
2325
2326 idx = ret;
2327 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002328 smrs[idx].id = sid;
2329 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01002330 smrs[idx].valid = true;
2331 }
2332 smmu->s2crs[idx].count++;
2333 cfg->smendx[i] = (s16)idx;
2334 }
2335
2336 group = iommu_group_get_for_dev(dev);
2337 if (!group)
2338 group = ERR_PTR(-ENOMEM);
2339 if (IS_ERR(group)) {
2340 ret = PTR_ERR(group);
2341 goto out_err;
2342 }
2343 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01002344
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002345 /* It worked! Don't poke the actual hardware until we've attached */
2346 for_each_cfg_sme(fwspec, i, idx)
Robin Murphy6668f692016-09-12 17:13:54 +01002347 smmu->s2crs[idx].group = group;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002348
Robin Murphy6668f692016-09-12 17:13:54 +01002349 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002350 return 0;
2351
Robin Murphy6668f692016-09-12 17:13:54 +01002352out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01002353 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01002354 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01002355 cfg->smendx[i] = INVALID_SMENDX;
2356 }
Robin Murphy6668f692016-09-12 17:13:54 +01002357 mutex_unlock(&smmu->stream_map_mutex);
2358 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002359}
2360
Robin Murphy06e393e2016-09-12 17:13:55 +01002361static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002362{
Robin Murphy06e393e2016-09-12 17:13:55 +01002363 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
2364 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01002365 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01002366
Robin Murphy6668f692016-09-12 17:13:54 +01002367 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002368 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002369 if (arm_smmu_free_sme(smmu, idx))
2370 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01002371 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002372 }
Robin Murphy6668f692016-09-12 17:13:54 +01002373 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002374}
2375
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002376static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
2377 struct iommu_fwspec *fwspec)
2378{
2379 struct arm_smmu_device *smmu = smmu_domain->smmu;
2380 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2381 int i, idx;
2382 const struct iommu_gather_ops *tlb;
2383
2384 tlb = smmu_domain->pgtbl_cfg.tlb;
2385
2386 mutex_lock(&smmu->stream_map_mutex);
2387 for_each_cfg_sme(fwspec, i, idx) {
2388 WARN_ON(s2cr[idx].attach_count == 0);
2389 s2cr[idx].attach_count -= 1;
2390
2391 if (s2cr[idx].attach_count > 0)
2392 continue;
2393
2394 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2395 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2396 }
2397 mutex_unlock(&smmu->stream_map_mutex);
2398
2399 /* Ensure there are no stale mappings for this context bank */
2400 tlb->tlb_flush_all(smmu_domain);
2401}
2402
Will Deacon45ae7cf2013-06-24 18:31:25 +01002403static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01002404 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002405{
Will Deacon44680ee2014-06-25 11:29:12 +01002406 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002407 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2408 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2409 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002410 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002411
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002412 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002413 for_each_cfg_sme(fwspec, i, idx) {
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002414 if (s2cr[idx].attach_count++ > 0)
Robin Murphy6668f692016-09-12 17:13:54 +01002415 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002416
2417 s2cr[idx].type = type;
2418 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2419 s2cr[idx].cbndx = cbndx;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002420 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002421 }
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002422 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002423
2424 return 0;
2425}
2426
Patrick Daly09801312016-08-29 17:02:52 -07002427static void arm_smmu_detach_dev(struct iommu_domain *domain,
2428 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002429{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002430 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002431 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002432 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly09801312016-08-29 17:02:52 -07002433 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002434 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002435
2436 if (dynamic)
2437 return;
2438
Patrick Daly09801312016-08-29 17:02:52 -07002439 if (!smmu) {
2440 dev_err(dev, "Domain not attached; cannot detach!\n");
2441 return;
2442 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002443
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302444 if (atomic_domain)
2445 arm_smmu_power_on_atomic(smmu->pwr);
2446 else
2447 arm_smmu_power_on(smmu->pwr);
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002448
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302449 arm_smmu_domain_remove_master(smmu_domain, fwspec);
2450 arm_smmu_power_off(smmu->pwr);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002451}
2452
Patrick Dalye271f212016-10-04 13:24:49 -07002453static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002454{
Patrick Dalye271f212016-10-04 13:24:49 -07002455 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002456 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2457 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2458 int source_vmid = VMID_HLOS;
2459 struct arm_smmu_pte_info *pte_info, *temp;
2460
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302461 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -07002462 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002463
Patrick Dalye271f212016-10-04 13:24:49 -07002464 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002465 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2466 PAGE_SIZE, &source_vmid, 1,
2467 dest_vmids, dest_perms, 2);
2468 if (WARN_ON(ret))
2469 break;
2470 }
2471
2472 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2473 entry) {
2474 list_del(&pte_info->entry);
2475 kfree(pte_info);
2476 }
Patrick Dalye271f212016-10-04 13:24:49 -07002477 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002478}
2479
2480static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2481{
2482 int ret;
2483 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002484 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002485 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2486 struct arm_smmu_pte_info *pte_info, *temp;
2487
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302488 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002489 return;
2490
2491 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2492 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2493 PAGE_SIZE, source_vmlist, 2,
2494 &dest_vmids, &dest_perms, 1);
2495 if (WARN_ON(ret))
2496 break;
2497 free_pages_exact(pte_info->virt_addr, pte_info->size);
2498 }
2499
2500 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2501 entry) {
2502 list_del(&pte_info->entry);
2503 kfree(pte_info);
2504 }
2505}
2506
2507static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2508{
2509 struct arm_smmu_domain *smmu_domain = cookie;
2510 struct arm_smmu_pte_info *pte_info;
2511
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302512 if (smmu_domain->slave_side_secure ||
2513 !arm_smmu_has_secure_vmid(smmu_domain)) {
2514 if (smmu_domain->slave_side_secure)
2515 WARN(1, "slave side secure is enforced\n");
2516 else
2517 WARN(1, "Invalid VMID is set !!\n");
2518 return;
2519 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002520
2521 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2522 if (!pte_info)
2523 return;
2524
2525 pte_info->virt_addr = addr;
2526 pte_info->size = size;
2527 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2528}
2529
2530static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2531{
2532 struct arm_smmu_domain *smmu_domain = cookie;
2533 struct arm_smmu_pte_info *pte_info;
2534
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302535 if (smmu_domain->slave_side_secure ||
2536 !arm_smmu_has_secure_vmid(smmu_domain)) {
2537 if (smmu_domain->slave_side_secure)
2538 WARN(1, "slave side secure is enforced\n");
2539 else
2540 WARN(1, "Invalid VMID is set !!\n");
2541 return -EINVAL;
2542 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002543
2544 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2545 if (!pte_info)
2546 return -ENOMEM;
2547 pte_info->virt_addr = addr;
2548 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2549 return 0;
2550}
2551
Patrick Daly2d600832018-02-11 15:12:55 -08002552static void arm_smmu_prealloc_memory(struct arm_smmu_domain *smmu_domain,
Patrick Dalya7414b12018-03-21 14:30:31 -07002553 size_t size, struct list_head *pool)
Patrick Daly2d600832018-02-11 15:12:55 -08002554{
Patrick Daly2d600832018-02-11 15:12:55 -08002555 int i;
Patrick Dalya7414b12018-03-21 14:30:31 -07002556 u32 nr = 0;
Patrick Daly2d600832018-02-11 15:12:55 -08002557 struct page *page;
2558
2559 if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
2560 arm_smmu_has_secure_vmid(smmu_domain))
2561 return;
2562
Patrick Daly2d600832018-02-11 15:12:55 -08002563 /* number of 2nd level pagetable entries */
2564 nr += round_up(size, SZ_1G) >> 30;
2565 /* number of 3rd level pagetabel entries */
2566 nr += round_up(size, SZ_2M) >> 21;
2567
2568 /* Retry later with atomic allocation on error */
2569 for (i = 0; i < nr; i++) {
2570 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
2571 if (!page)
2572 break;
2573 list_add(&page->lru, pool);
2574 }
2575}
2576
Patrick Dalya7414b12018-03-21 14:30:31 -07002577static void arm_smmu_prealloc_memory_sg(struct arm_smmu_domain *smmu_domain,
2578 struct scatterlist *sgl, int nents,
2579 struct list_head *pool)
2580{
2581 int i;
2582 size_t size = 0;
2583 struct scatterlist *sg;
2584
2585 if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
2586 arm_smmu_has_secure_vmid(smmu_domain))
2587 return;
2588
2589 for_each_sg(sgl, sg, nents, i)
2590 size += sg->length;
2591
2592 arm_smmu_prealloc_memory(smmu_domain, size, pool);
2593}
2594
Patrick Daly2d600832018-02-11 15:12:55 -08002595static void arm_smmu_release_prealloc_memory(
2596 struct arm_smmu_domain *smmu_domain, struct list_head *list)
2597{
2598 struct page *page, *tmp;
Patrick Daly2d600832018-02-11 15:12:55 -08002599
2600 list_for_each_entry_safe(page, tmp, list, lru) {
2601 list_del(&page->lru);
2602 __free_pages(page, 0);
Patrick Daly2d600832018-02-11 15:12:55 -08002603 }
2604}
2605
Will Deacon45ae7cf2013-06-24 18:31:25 +01002606static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2607{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002608 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002609 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002610 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002611 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002612 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002613
Robin Murphy06e393e2016-09-12 17:13:55 +01002614 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002615 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2616 return -ENXIO;
2617 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002618
Robin Murphy4f79b142016-10-17 12:06:21 +01002619 /*
2620 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2621 * domains between of_xlate() and add_device() - we have no way to cope
2622 * with that, so until ARM gets converted to rely on groups and default
2623 * domains, just say no (but more politely than by dereferencing NULL).
2624 * This should be at least a WARN_ON once that's sorted.
2625 */
2626 if (!fwspec->iommu_priv)
2627 return -ENODEV;
2628
Robin Murphy06e393e2016-09-12 17:13:55 +01002629 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002630
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002631 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002632 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002633 if (ret)
2634 return ret;
2635
Will Deacon518f7132014-11-14 17:17:54 +00002636 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002637 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002638 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002639 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002640
Patrick Dalyc190d932016-08-30 17:23:28 -07002641 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002642 if (is_dynamic_domain(domain)) {
2643 ret = 0;
2644 goto out_power_off;
2645 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002646
Will Deacon45ae7cf2013-06-24 18:31:25 +01002647 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002648 * Sanity check the domain. We don't support domains across
2649 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002650 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002651 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002652 dev_err(dev,
2653 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002654 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002655 ret = -EINVAL;
2656 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002657 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002658
2659 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002660 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002661
2662out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002663 /*
2664 * Keep an additional vote for non-atomic power until domain is
2665 * detached
2666 */
2667 if (!ret && atomic_domain) {
2668 WARN_ON(arm_smmu_power_on(smmu->pwr));
2669 arm_smmu_power_off_atomic(smmu->pwr);
2670 }
2671
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002672 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002673
Will Deacon45ae7cf2013-06-24 18:31:25 +01002674 return ret;
2675}
2676
Will Deacon45ae7cf2013-06-24 18:31:25 +01002677static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002678 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002679{
Will Deacon518f7132014-11-14 17:17:54 +00002680 int ret;
2681 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002682 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002683 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Patrick Dalya7414b12018-03-21 14:30:31 -07002684 LIST_HEAD(nonsecure_pool);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002685
Will Deacon518f7132014-11-14 17:17:54 +00002686 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002687 return -ENODEV;
2688
Charan Teja Reddy313991e2018-03-12 12:19:31 +05302689 if (arm_smmu_is_slave_side_secure(smmu_domain))
2690 return msm_secure_smmu_map(domain, iova, paddr, size, prot);
2691
Patrick Dalya7414b12018-03-21 14:30:31 -07002692 arm_smmu_prealloc_memory(smmu_domain, size, &nonsecure_pool);
Patrick Dalye271f212016-10-04 13:24:49 -07002693 arm_smmu_secure_domain_lock(smmu_domain);
2694
Will Deacon518f7132014-11-14 17:17:54 +00002695 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya7414b12018-03-21 14:30:31 -07002696 list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
Will Deacon518f7132014-11-14 17:17:54 +00002697 ret = ops->map(ops, iova, paddr, size, prot);
Patrick Dalya7414b12018-03-21 14:30:31 -07002698 list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
Will Deacon518f7132014-11-14 17:17:54 +00002699 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002700
2701 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002702 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002703
Patrick Dalya7414b12018-03-21 14:30:31 -07002704 arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
Will Deacon518f7132014-11-14 17:17:54 +00002705 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002706}
2707
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002708static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2709 dma_addr_t iova)
2710{
2711 uint64_t ret;
2712 unsigned long flags;
2713 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2714 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2715
2716 if (!ops)
2717 return 0;
2718
2719 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2720 ret = ops->iova_to_pte(ops, iova);
2721 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2722 return ret;
2723}
2724
Will Deacon45ae7cf2013-06-24 18:31:25 +01002725static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2726 size_t size)
2727{
Will Deacon518f7132014-11-14 17:17:54 +00002728 size_t ret;
2729 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002730 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002731 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002732
Will Deacon518f7132014-11-14 17:17:54 +00002733 if (!ops)
2734 return 0;
2735
Charan Teja Reddy313991e2018-03-12 12:19:31 +05302736 if (arm_smmu_is_slave_side_secure(smmu_domain))
2737 return msm_secure_smmu_unmap(domain, iova, size);
2738
Patrick Daly8befb662016-08-17 20:03:28 -07002739 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002740 if (ret)
2741 return ret;
2742
Patrick Dalye271f212016-10-04 13:24:49 -07002743 arm_smmu_secure_domain_lock(smmu_domain);
2744
Will Deacon518f7132014-11-14 17:17:54 +00002745 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2746 ret = ops->unmap(ops, iova, size);
2747 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002748
Patrick Daly8befb662016-08-17 20:03:28 -07002749 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002750 /*
2751 * While splitting up block mappings, we might allocate page table
2752 * memory during unmap, so the vmids needs to be assigned to the
2753 * memory here as well.
2754 */
2755 arm_smmu_assign_table(smmu_domain);
2756 /* Also unassign any pages that were free'd during unmap */
2757 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002758 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002759 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002760}
2761
Patrick Daly88d321d2017-02-09 18:02:13 -08002762#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002763static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2764 struct scatterlist *sg, unsigned int nents, int prot)
2765{
2766 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002767 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002768 unsigned long flags;
2769 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2770 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002771 unsigned int idx_start, idx_end;
2772 struct scatterlist *sg_start, *sg_end;
2773 unsigned long __saved_iova_start;
Patrick Daly2d600832018-02-11 15:12:55 -08002774 LIST_HEAD(nonsecure_pool);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002775
2776 if (!ops)
2777 return -ENODEV;
2778
Charan Teja Reddy313991e2018-03-12 12:19:31 +05302779 if (arm_smmu_is_slave_side_secure(smmu_domain))
2780 return msm_secure_smmu_map_sg(domain, iova, sg, nents, prot);
2781
Patrick Dalya7414b12018-03-21 14:30:31 -07002782 arm_smmu_prealloc_memory_sg(smmu_domain, sg, nents, &nonsecure_pool);
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002783 arm_smmu_secure_domain_lock(smmu_domain);
2784
Patrick Daly88d321d2017-02-09 18:02:13 -08002785 __saved_iova_start = iova;
2786 idx_start = idx_end = 0;
2787 sg_start = sg_end = sg;
2788 while (idx_end < nents) {
2789 batch_size = sg_end->length;
2790 sg_end = sg_next(sg_end);
2791 idx_end++;
2792 while ((idx_end < nents) &&
2793 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002794
Patrick Daly88d321d2017-02-09 18:02:13 -08002795 batch_size += sg_end->length;
2796 sg_end = sg_next(sg_end);
2797 idx_end++;
2798 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002799
Patrick Daly88d321d2017-02-09 18:02:13 -08002800 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Daly2d600832018-02-11 15:12:55 -08002801 list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
Patrick Daly88d321d2017-02-09 18:02:13 -08002802 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2803 prot, &size);
Patrick Daly2d600832018-02-11 15:12:55 -08002804 list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
Patrick Daly88d321d2017-02-09 18:02:13 -08002805 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2806 /* Returns 0 on error */
2807 if (!ret) {
2808 size_to_unmap = iova + size - __saved_iova_start;
2809 goto out;
2810 }
2811
2812 iova += batch_size;
2813 idx_start = idx_end;
2814 sg_start = sg_end;
2815 }
2816
2817out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002818 arm_smmu_assign_table(smmu_domain);
2819
Patrick Daly88d321d2017-02-09 18:02:13 -08002820 if (size_to_unmap) {
2821 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2822 iova = __saved_iova_start;
2823 }
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002824 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly2d600832018-02-11 15:12:55 -08002825 arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
Patrick Daly88d321d2017-02-09 18:02:13 -08002826 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002827}
2828
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002829static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002830 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002831{
Joerg Roedel1d672632015-03-26 13:43:10 +01002832 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002833 struct arm_smmu_device *smmu = smmu_domain->smmu;
2834 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2835 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2836 struct device *dev = smmu->dev;
2837 void __iomem *cb_base;
2838 u32 tmp;
2839 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002840 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002841
2842 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2843
Robin Murphy661d9622015-05-27 17:09:34 +01002844 /* ATS1 registers can only be written atomically */
2845 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002846 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002847 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2848 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002849 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002850
2851 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2852 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002853 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002854 dev_err(dev,
2855 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2856 &iova, &phys);
2857 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002858 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002859 }
2860
Robin Murphyf9a05f02016-04-13 18:13:01 +01002861 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002862 if (phys & CB_PAR_F) {
2863 dev_err(dev, "translation fault!\n");
2864 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002865 phys = 0;
2866 } else {
2867 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002868 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002869
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002870 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002871}
2872
Will Deacon45ae7cf2013-06-24 18:31:25 +01002873static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002874 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002875{
Will Deacon518f7132014-11-14 17:17:54 +00002876 phys_addr_t ret;
2877 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002878 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002879 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002880
Will Deacon518f7132014-11-14 17:17:54 +00002881 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002882 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002883
Will Deacon518f7132014-11-14 17:17:54 +00002884 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002885 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002886 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002887
Will Deacon518f7132014-11-14 17:17:54 +00002888 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002889}
2890
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002891/*
2892 * This function can sleep, and cannot be called from atomic context. Will
2893 * power on register block if required. This restriction does not apply to the
2894 * original iova_to_phys() op.
2895 */
2896static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2897 dma_addr_t iova)
2898{
2899 phys_addr_t ret = 0;
2900 unsigned long flags;
2901 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly62ba1922017-08-30 16:47:18 -07002902 struct arm_smmu_device *smmu = smmu_domain->smmu;
2903
2904 if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS)
2905 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002906
Patrick Dalyad441dd2016-09-15 15:50:46 -07002907 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002908 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2909 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002910 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002911 return ret;
2912 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002913
2914 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2915 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2916 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002917 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002918
2919 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2920
2921 return ret;
2922}
2923
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002924static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002925{
Will Deacond0948942014-06-24 17:30:10 +01002926 switch (cap) {
2927 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002928 /*
2929 * Return true here as the SMMU can always send out coherent
2930 * requests.
2931 */
2932 return true;
Will Deacond0948942014-06-24 17:30:10 +01002933 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002934 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002935 case IOMMU_CAP_NOEXEC:
2936 return true;
Will Deacond0948942014-06-24 17:30:10 +01002937 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002938 return false;
Will Deacond0948942014-06-24 17:30:10 +01002939 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002940}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002941
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302942#ifdef CONFIG_MSM_TZ_SMMU
2943static struct arm_smmu_device *arm_smmu_get_by_addr(void __iomem *addr)
2944{
2945 struct arm_smmu_device *smmu;
2946 unsigned long flags;
2947
2948 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2949 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2950 unsigned long base = (unsigned long)smmu->base;
2951 unsigned long mask = ~(smmu->size - 1);
2952
2953 if ((base & mask) == ((unsigned long)addr & mask)) {
2954 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2955 return smmu;
2956 }
2957 }
2958 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2959 return NULL;
2960}
2961
2962bool arm_smmu_skip_write(void __iomem *addr)
2963{
2964 struct arm_smmu_device *smmu;
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05302965 int cb;
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302966
2967 smmu = arm_smmu_get_by_addr(addr);
Shiraz Hashima28a4792018-01-13 00:39:52 +05302968
2969 /* Skip write if smmu not available by now */
2970 if (!smmu)
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302971 return true;
Shiraz Hashima28a4792018-01-13 00:39:52 +05302972
Charan Teja Reddy65ff5e42018-02-19 15:32:28 +05302973 if (!arm_smmu_is_static_cb(smmu))
2974 return false;
2975
Shiraz Hashima28a4792018-01-13 00:39:52 +05302976 /* Do not write to global space */
2977 if (((unsigned long)addr & (smmu->size - 1)) < (smmu->size >> 1))
2978 return true;
2979
2980 /* Finally skip writing to secure CB */
2981 cb = ((unsigned long)addr & ((smmu->size >> 1) - 1)) >> PAGE_SHIFT;
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05302982 if (test_bit(cb, smmu->secure_context_map))
2983 return true;
Shiraz Hashima28a4792018-01-13 00:39:52 +05302984
2985 return false;
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302986}
Charan Teja Reddy313991e2018-03-12 12:19:31 +05302987
2988static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
2989 phys_addr_t paddr, size_t size, int prot)
2990{
2991 size_t ret;
2992 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2993 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2994
2995 ret = ops->map(ops, iova, paddr, size, prot);
2996
2997 return ret;
2998}
2999
3000static size_t msm_secure_smmu_unmap(struct iommu_domain *domain,
3001 unsigned long iova,
3002 size_t size)
3003{
3004 size_t ret;
3005 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3006 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3007
3008 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
3009 if (ret)
3010 return ret;
3011
3012 ret = ops->unmap(ops, iova, size);
3013
3014 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
3015
3016 return ret;
3017}
3018
3019static size_t msm_secure_smmu_map_sg(struct iommu_domain *domain,
3020 unsigned long iova,
3021 struct scatterlist *sg,
3022 unsigned int nents, int prot)
3023{
3024 int ret;
3025 size_t size;
3026 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3027 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3028
3029 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
3030
3031 if (!ret)
3032 msm_secure_smmu_unmap(domain, iova, size);
3033
3034 return ret;
3035}
3036
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05303037#endif
3038
Patrick Daly8e3371a2017-02-13 22:14:53 -08003039static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
3040{
3041 struct arm_smmu_device *smmu;
3042 unsigned long flags;
3043
3044 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
3045 list_for_each_entry(smmu, &arm_smmu_devices, list) {
3046 if (smmu->dev->of_node == np) {
3047 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
3048 return smmu;
3049 }
3050 }
3051 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
3052 return NULL;
3053}
3054
Robin Murphy7e96c742016-09-14 15:26:46 +01003055static int arm_smmu_match_node(struct device *dev, void *data)
3056{
3057 return dev->of_node == data;
3058}
3059
3060static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
3061{
3062 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
3063 np, arm_smmu_match_node);
3064 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08003065 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01003066}
3067
Will Deacon03edb222015-01-19 14:27:33 +00003068static int arm_smmu_add_device(struct device *dev)
3069{
Robin Murphy06e393e2016-09-12 17:13:55 +01003070 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01003071 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01003072 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01003073 int i, ret;
3074
Robin Murphy7e96c742016-09-14 15:26:46 +01003075 if (using_legacy_binding) {
3076 ret = arm_smmu_register_legacy_master(dev, &smmu);
3077 fwspec = dev->iommu_fwspec;
3078 if (ret)
3079 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00003080 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01003081 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
3082 if (!smmu)
3083 return -ENODEV;
3084 } else {
3085 return -ENODEV;
3086 }
Robin Murphyd5b41782016-09-14 15:21:39 +01003087
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003088 ret = arm_smmu_power_on(smmu->pwr);
3089 if (ret)
3090 goto out_free;
3091
Robin Murphyd5b41782016-09-14 15:21:39 +01003092 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01003093 for (i = 0; i < fwspec->num_ids; i++) {
3094 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01003095 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01003096
Robin Murphy06e393e2016-09-12 17:13:55 +01003097 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01003098 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01003099 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003100 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01003101 }
Robin Murphy7e96c742016-09-14 15:26:46 +01003102 if (mask & ~smmu->smr_mask_mask) {
3103 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
3104 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003105 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01003106 }
Robin Murphyd5b41782016-09-14 15:21:39 +01003107 }
Will Deacon03edb222015-01-19 14:27:33 +00003108
Robin Murphy06e393e2016-09-12 17:13:55 +01003109 ret = -ENOMEM;
3110 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
3111 GFP_KERNEL);
3112 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003113 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01003114
3115 cfg->smmu = smmu;
3116 fwspec->iommu_priv = cfg;
3117 while (i--)
3118 cfg->smendx[i] = INVALID_SMENDX;
3119
Robin Murphy6668f692016-09-12 17:13:54 +01003120 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01003121 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003122 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01003123
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003124 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01003125 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01003126
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003127out_pwr_off:
3128 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01003129out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01003130 if (fwspec)
3131 kfree(fwspec->iommu_priv);
3132 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01003133 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00003134}
3135
Will Deacon45ae7cf2013-06-24 18:31:25 +01003136static void arm_smmu_remove_device(struct device *dev)
3137{
Robin Murphy06e393e2016-09-12 17:13:55 +01003138 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003139 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01003140
Robin Murphy06e393e2016-09-12 17:13:55 +01003141 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01003142 return;
Robin Murphya754fd12016-09-12 17:13:50 +01003143
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003144 smmu = fwspec_smmu(fwspec);
3145 if (arm_smmu_power_on(smmu->pwr)) {
3146 WARN_ON(1);
3147 return;
3148 }
3149
Robin Murphy06e393e2016-09-12 17:13:55 +01003150 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01003151 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01003152 kfree(fwspec->iommu_priv);
3153 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08003154 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003155}
3156
Joerg Roedelaf659932015-10-21 23:51:41 +02003157static struct iommu_group *arm_smmu_device_group(struct device *dev)
3158{
Robin Murphy06e393e2016-09-12 17:13:55 +01003159 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
3160 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01003161 struct iommu_group *group = NULL;
3162 int i, idx;
3163
Robin Murphy06e393e2016-09-12 17:13:55 +01003164 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01003165 if (group && smmu->s2crs[idx].group &&
3166 group != smmu->s2crs[idx].group)
3167 return ERR_PTR(-EINVAL);
3168
3169 group = smmu->s2crs[idx].group;
3170 }
3171
Patrick Daly03330cc2017-08-11 14:56:38 -07003172 if (!group) {
3173 if (dev_is_pci(dev))
3174 group = pci_device_group(dev);
3175 else
3176 group = generic_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02003177
Patrick Daly03330cc2017-08-11 14:56:38 -07003178 if (IS_ERR(group))
3179 return NULL;
3180 }
3181
3182 if (arm_smmu_arch_device_group(dev, group)) {
3183 iommu_group_put(group);
3184 return ERR_PTR(-EINVAL);
3185 }
Joerg Roedelaf659932015-10-21 23:51:41 +02003186
Joerg Roedelaf659932015-10-21 23:51:41 +02003187 return group;
3188}
3189
Will Deaconc752ce42014-06-25 22:46:31 +01003190static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
3191 enum iommu_attr attr, void *data)
3192{
Joerg Roedel1d672632015-03-26 13:43:10 +01003193 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003194 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01003195
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003196 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01003197 switch (attr) {
3198 case DOMAIN_ATTR_NESTING:
3199 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003200 ret = 0;
3201 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08003202 case DOMAIN_ATTR_PT_BASE_ADDR:
3203 *((phys_addr_t *)data) =
3204 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003205 ret = 0;
3206 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003207 case DOMAIN_ATTR_CONTEXT_BANK:
3208 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003209 if (smmu_domain->smmu == NULL) {
3210 ret = -ENODEV;
3211 break;
3212 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003213 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
3214 ret = 0;
3215 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003216 case DOMAIN_ATTR_TTBR0: {
3217 u64 val;
3218 struct arm_smmu_device *smmu = smmu_domain->smmu;
3219 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003220 if (smmu == NULL) {
3221 ret = -ENODEV;
3222 break;
3223 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003224 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
3225 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
3226 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
3227 << (TTBRn_ASID_SHIFT);
3228 *((u64 *)data) = val;
3229 ret = 0;
3230 break;
3231 }
3232 case DOMAIN_ATTR_CONTEXTIDR:
3233 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003234 if (smmu_domain->smmu == NULL) {
3235 ret = -ENODEV;
3236 break;
3237 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003238 *((u32 *)data) = smmu_domain->cfg.procid;
3239 ret = 0;
3240 break;
3241 case DOMAIN_ATTR_PROCID:
3242 *((u32 *)data) = smmu_domain->cfg.procid;
3243 ret = 0;
3244 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07003245 case DOMAIN_ATTR_DYNAMIC:
3246 *((int *)data) = !!(smmu_domain->attributes
3247 & (1 << DOMAIN_ATTR_DYNAMIC));
3248 ret = 0;
3249 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07003250 case DOMAIN_ATTR_NON_FATAL_FAULTS:
3251 *((int *)data) = !!(smmu_domain->attributes
3252 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
3253 ret = 0;
3254 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07003255 case DOMAIN_ATTR_S1_BYPASS:
3256 *((int *)data) = !!(smmu_domain->attributes
3257 & (1 << DOMAIN_ATTR_S1_BYPASS));
3258 ret = 0;
3259 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07003260 case DOMAIN_ATTR_SECURE_VMID:
3261 *((int *)data) = smmu_domain->secure_vmid;
3262 ret = 0;
3263 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08003264 case DOMAIN_ATTR_PGTBL_INFO: {
3265 struct iommu_pgtbl_info *info = data;
3266
3267 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
3268 ret = -ENODEV;
3269 break;
3270 }
3271 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
3272 ret = 0;
3273 break;
3274 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003275 case DOMAIN_ATTR_FAST:
3276 *((int *)data) = !!(smmu_domain->attributes
3277 & (1 << DOMAIN_ATTR_FAST));
3278 ret = 0;
3279 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003280 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3281 *((int *)data) = !!(smmu_domain->attributes
3282 & (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
3283 ret = 0;
3284 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08003285 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3286 *((int *)data) = !!(smmu_domain->attributes &
3287 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
3288 ret = 0;
3289 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003290 case DOMAIN_ATTR_EARLY_MAP:
3291 *((int *)data) = !!(smmu_domain->attributes
3292 & (1 << DOMAIN_ATTR_EARLY_MAP));
3293 ret = 0;
3294 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07003295 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003296 if (!smmu_domain->smmu) {
3297 ret = -ENODEV;
3298 break;
3299 }
Liam Mark53cf2342016-12-20 11:36:07 -08003300 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
3301 ret = 0;
3302 break;
3303 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
3304 *((int *)data) = !!(smmu_domain->attributes
3305 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07003306 ret = 0;
3307 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303308 case DOMAIN_ATTR_CB_STALL_DISABLE:
3309 *((int *)data) = !!(smmu_domain->attributes
3310 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
3311 ret = 0;
3312 break;
Patrick Daly83174c12017-10-26 12:31:15 -07003313 case DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN:
Patrick Daly23301482017-10-12 16:18:25 -07003314 *((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
3315 ret = 0;
3316 break;
Prakash Guptac2e909a2018-03-29 11:23:06 +05303317 case DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE:
3318 *((int *)data) = !!(smmu_domain->attributes
3319 & (1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE));
3320 ret = 0;
3321 break;
3322
Will Deaconc752ce42014-06-25 22:46:31 +01003323 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003324 ret = -ENODEV;
3325 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003326 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003327 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003328 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003329}
3330
3331static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
3332 enum iommu_attr attr, void *data)
3333{
Will Deacon518f7132014-11-14 17:17:54 +00003334 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01003335 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01003336
Will Deacon518f7132014-11-14 17:17:54 +00003337 mutex_lock(&smmu_domain->init_mutex);
3338
Will Deaconc752ce42014-06-25 22:46:31 +01003339 switch (attr) {
3340 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00003341 if (smmu_domain->smmu) {
3342 ret = -EPERM;
3343 goto out_unlock;
3344 }
3345
Will Deaconc752ce42014-06-25 22:46:31 +01003346 if (*(int *)data)
3347 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
3348 else
3349 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
3350
Will Deacon518f7132014-11-14 17:17:54 +00003351 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003352 case DOMAIN_ATTR_PROCID:
3353 if (smmu_domain->smmu != NULL) {
3354 dev_err(smmu_domain->smmu->dev,
3355 "cannot change procid attribute while attached\n");
3356 ret = -EBUSY;
3357 break;
3358 }
3359 smmu_domain->cfg.procid = *((u32 *)data);
3360 ret = 0;
3361 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07003362 case DOMAIN_ATTR_DYNAMIC: {
3363 int dynamic = *((int *)data);
3364
3365 if (smmu_domain->smmu != NULL) {
3366 dev_err(smmu_domain->smmu->dev,
3367 "cannot change dynamic attribute while attached\n");
3368 ret = -EBUSY;
3369 break;
3370 }
3371
3372 if (dynamic)
3373 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
3374 else
3375 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
3376 ret = 0;
3377 break;
3378 }
3379 case DOMAIN_ATTR_CONTEXT_BANK:
3380 /* context bank can't be set while attached */
3381 if (smmu_domain->smmu != NULL) {
3382 ret = -EBUSY;
3383 break;
3384 }
3385 /* ... and it can only be set for dynamic contexts. */
3386 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
3387 ret = -EINVAL;
3388 break;
3389 }
3390
3391 /* this will be validated during attach */
3392 smmu_domain->cfg.cbndx = *((unsigned int *)data);
3393 ret = 0;
3394 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07003395 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
3396 u32 non_fatal_faults = *((int *)data);
3397
3398 if (non_fatal_faults)
3399 smmu_domain->attributes |=
3400 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
3401 else
3402 smmu_domain->attributes &=
3403 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
3404 ret = 0;
3405 break;
3406 }
Patrick Dalye62d3362016-03-15 18:58:28 -07003407 case DOMAIN_ATTR_S1_BYPASS: {
3408 int bypass = *((int *)data);
3409
3410 /* bypass can't be changed while attached */
3411 if (smmu_domain->smmu != NULL) {
3412 ret = -EBUSY;
3413 break;
3414 }
3415 if (bypass)
3416 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
3417 else
3418 smmu_domain->attributes &=
3419 ~(1 << DOMAIN_ATTR_S1_BYPASS);
3420
3421 ret = 0;
3422 break;
3423 }
Patrick Daly8befb662016-08-17 20:03:28 -07003424 case DOMAIN_ATTR_ATOMIC:
3425 {
3426 int atomic_ctx = *((int *)data);
3427
3428 /* can't be changed while attached */
3429 if (smmu_domain->smmu != NULL) {
3430 ret = -EBUSY;
3431 break;
3432 }
3433 if (atomic_ctx)
3434 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
3435 else
3436 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
3437 break;
3438 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07003439 case DOMAIN_ATTR_SECURE_VMID:
3440 if (smmu_domain->secure_vmid != VMID_INVAL) {
3441 ret = -ENODEV;
3442 WARN(1, "secure vmid already set!");
3443 break;
3444 }
3445 smmu_domain->secure_vmid = *((int *)data);
3446 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003447 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3448 if (*((int *)data))
3449 smmu_domain->attributes |=
3450 1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
3451 ret = 0;
3452 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003453 /*
3454 * fast_smmu_unmap_page() and fast_smmu_alloc_iova() both
3455 * expect that the bus/clock/regulator are already on. Thus also
3456 * force DOMAIN_ATTR_ATOMIC to bet set.
3457 */
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003458 case DOMAIN_ATTR_FAST:
Patrick Daly0df84ac2017-10-11 17:32:41 -07003459 {
3460 int fast = *((int *)data);
3461
3462 if (fast) {
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003463 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003464 smmu_domain->attributes |= 1 << DOMAIN_ATTR_ATOMIC;
3465 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003466 ret = 0;
3467 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003468 }
Patrick Dalyce6786f2016-11-09 14:19:23 -08003469 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3470 /* can't be changed while attached */
3471 if (smmu_domain->smmu != NULL) {
3472 ret = -EBUSY;
3473 break;
3474 }
3475 if (*((int *)data))
3476 smmu_domain->attributes |=
3477 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
3478 ret = 0;
3479 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003480 case DOMAIN_ATTR_EARLY_MAP: {
3481 int early_map = *((int *)data);
3482
3483 ret = 0;
3484 if (early_map) {
3485 smmu_domain->attributes |=
3486 1 << DOMAIN_ATTR_EARLY_MAP;
3487 } else {
3488 if (smmu_domain->smmu)
3489 ret = arm_smmu_enable_s1_translations(
3490 smmu_domain);
3491
3492 if (!ret)
3493 smmu_domain->attributes &=
3494 ~(1 << DOMAIN_ATTR_EARLY_MAP);
3495 }
3496 break;
3497 }
Liam Mark53cf2342016-12-20 11:36:07 -08003498 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
3499 int force_coherent = *((int *)data);
3500
3501 if (smmu_domain->smmu != NULL) {
3502 dev_err(smmu_domain->smmu->dev,
3503 "cannot change force coherent attribute while attached\n");
3504 ret = -EBUSY;
3505 break;
3506 }
3507
3508 if (force_coherent)
3509 smmu_domain->attributes |=
3510 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
3511 else
3512 smmu_domain->attributes &=
3513 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
3514
3515 ret = 0;
3516 break;
3517 }
3518
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303519 case DOMAIN_ATTR_CB_STALL_DISABLE:
3520 if (*((int *)data))
3521 smmu_domain->attributes |=
3522 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
3523 ret = 0;
3524 break;
Prakash Guptac2e909a2018-03-29 11:23:06 +05303525
3526 case DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE: {
3527 int force_iova_guard_page = *((int *)data);
3528
3529 if (smmu_domain->smmu != NULL) {
3530 dev_err(smmu_domain->smmu->dev,
3531 "cannot change force guard page attribute while attached\n");
3532 ret = -EBUSY;
3533 break;
3534 }
3535
3536 if (force_iova_guard_page)
3537 smmu_domain->attributes |=
3538 1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE;
3539 else
3540 smmu_domain->attributes &=
3541 ~(1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE);
3542
3543 ret = 0;
3544 break;
3545 }
3546
Will Deaconc752ce42014-06-25 22:46:31 +01003547 default:
Will Deacon518f7132014-11-14 17:17:54 +00003548 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01003549 }
Will Deacon518f7132014-11-14 17:17:54 +00003550
3551out_unlock:
3552 mutex_unlock(&smmu_domain->init_mutex);
3553 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003554}
3555
Robin Murphy7e96c742016-09-14 15:26:46 +01003556static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
3557{
3558 u32 fwid = 0;
3559
3560 if (args->args_count > 0)
3561 fwid |= (u16)args->args[0];
3562
3563 if (args->args_count > 1)
3564 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
3565
3566 return iommu_fwspec_add_ids(dev, &fwid, 1);
3567}
3568
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003569static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
3570{
3571 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3572 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy6549a1f2017-08-08 14:56:14 +01003573 struct arm_smmu_cb *cb = &smmu->cbs[cfg->cbndx];
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003574 int ret;
3575
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003576 ret = arm_smmu_power_on(smmu->pwr);
3577 if (ret)
3578 return ret;
3579
Robin Murphy6549a1f2017-08-08 14:56:14 +01003580 cb->attributes &= ~(1 << DOMAIN_ATTR_EARLY_MAP);
3581 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003582
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003583 arm_smmu_power_off(smmu->pwr);
3584 return ret;
3585}
3586
Liam Mark3ba41cf2016-12-09 14:39:04 -08003587static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
3588 dma_addr_t iova)
3589{
3590 bool ret;
3591 unsigned long flags;
3592 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3593 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3594
3595 if (!ops)
3596 return false;
3597
3598 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3599 ret = ops->is_iova_coherent(ops, iova);
3600 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3601 return ret;
3602}
3603
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003604static void arm_smmu_trigger_fault(struct iommu_domain *domain,
3605 unsigned long flags)
3606{
3607 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3608 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3609 struct arm_smmu_device *smmu;
3610 void __iomem *cb_base;
3611
3612 if (!smmu_domain->smmu) {
3613 pr_err("Can't trigger faults on non-attached domains\n");
3614 return;
3615 }
3616
3617 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003618 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003619 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003620
3621 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3622 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
3623 flags, cfg->cbndx);
3624 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07003625 /* give the interrupt time to fire... */
3626 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003627
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003628 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003629}
3630
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003631static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3632{
Patrick Dalyda765c62017-09-11 16:31:07 -07003633 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3634 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
3635
3636 tlb->tlb_flush_all(smmu_domain);
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003637}
3638
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003639static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3640{
3641 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3642
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003643 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003644}
3645
3646static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3647{
3648 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3649
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003650 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003651}
3652
Will Deacon518f7132014-11-14 17:17:54 +00003653static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003654 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003655 .domain_alloc = arm_smmu_domain_alloc,
3656 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003657 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003658 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003659 .map = arm_smmu_map,
3660 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003661 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003662 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003663 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003664 .add_device = arm_smmu_add_device,
3665 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003666 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003667 .domain_get_attr = arm_smmu_domain_get_attr,
3668 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003669 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003670 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003671 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003672 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003673 .enable_config_clocks = arm_smmu_enable_config_clocks,
3674 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003675 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003676 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003677};
3678
Patrick Dalyad441dd2016-09-15 15:50:46 -07003679#define IMPL_DEF1_MICRO_MMU_CTRL 0
3680#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3681#define MICRO_MMU_CTRL_IDLE (1 << 3)
3682
3683/* Definitions for implementation-defined registers */
3684#define ACTLR_QCOM_OSH_SHIFT 28
3685#define ACTLR_QCOM_OSH 1
3686
3687#define ACTLR_QCOM_ISH_SHIFT 29
3688#define ACTLR_QCOM_ISH 1
3689
3690#define ACTLR_QCOM_NSH_SHIFT 30
3691#define ACTLR_QCOM_NSH 1
3692
3693static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003694{
3695 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003696 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003697
3698 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3699 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3700 0, 30000)) {
3701 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3702 return -EBUSY;
3703 }
3704
3705 return 0;
3706}
3707
Patrick Dalyad441dd2016-09-15 15:50:46 -07003708static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003709{
3710 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3711 u32 reg;
3712
3713 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3714 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303715
3716 if (arm_smmu_is_static_cb(smmu)) {
3717 phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
3718 smmu->phys_addr;
3719
3720 if (scm_io_write(impl_def1_base_phys +
3721 IMPL_DEF1_MICRO_MMU_CTRL, reg)) {
3722 dev_err(smmu->dev,
3723 "scm_io_write fail. SMMU might not be halted");
3724 return -EINVAL;
3725 }
3726 } else {
3727 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3728 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003729
Patrick Dalyad441dd2016-09-15 15:50:46 -07003730 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003731}
3732
Patrick Dalyad441dd2016-09-15 15:50:46 -07003733static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003734{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003735 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003736}
3737
Patrick Dalyad441dd2016-09-15 15:50:46 -07003738static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003739{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003740 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003741}
3742
Patrick Dalyad441dd2016-09-15 15:50:46 -07003743static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003744{
3745 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3746 u32 reg;
3747
3748 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3749 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303750
3751 if (arm_smmu_is_static_cb(smmu)) {
3752 phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
3753 smmu->phys_addr;
3754
3755 if (scm_io_write(impl_def1_base_phys +
3756 IMPL_DEF1_MICRO_MMU_CTRL, reg))
3757 dev_err(smmu->dev,
3758 "scm_io_write fail. SMMU might not be resumed");
3759 } else {
3760 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3761 }
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003762}
3763
Patrick Dalyad441dd2016-09-15 15:50:46 -07003764static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003765{
3766 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003767 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003768 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003769 /*
3770 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3771 * to prevent table walks with an inconsistent state.
3772 */
3773 for (i = 0; i < smmu->num_context_banks; ++i) {
Patrick Dalyad521082018-04-06 18:07:13 -07003774 struct arm_smmu_cb *cb = &smmu->cbs[i];
3775
Patrick Dalyad441dd2016-09-15 15:50:46 -07003776 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3777 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3778 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
Patrick Dalyad521082018-04-06 18:07:13 -07003779 cb->actlr = val;
Patrick Daly25317e82018-05-07 12:35:29 -07003780 cb->has_actlr = true;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003781 }
3782
3783 /* Program implementation defined registers */
3784 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003785 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3786 writel_relaxed(regs[i].value,
3787 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003788 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003789}
3790
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003791static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3792 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003793{
3794 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3795 struct arm_smmu_device *smmu = smmu_domain->smmu;
3796 int ret;
3797 phys_addr_t phys = 0;
3798 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003799 u32 sctlr, sctlr_orig, fsr;
3800 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003801
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003802 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003803 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003804 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003805
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003806 spin_lock_irqsave(&smmu->atos_lock, flags);
3807 cb_base = ARM_SMMU_CB_BASE(smmu) +
3808 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003809
3810 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003811 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003812 qsmmuv2_wait_for_halt(smmu);
3813
3814 /* clear FSR to allow ATOS to log any faults */
3815 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3816 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3817
3818 /* disable stall mode momentarily */
3819 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3820 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3821 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3822
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003823 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003824
3825 /* restore SCTLR */
3826 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3827
3828 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003829 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3830
3831 arm_smmu_power_off(smmu_domain->smmu->pwr);
3832 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003833}
3834
3835struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3836 .device_reset = qsmmuv2_device_reset,
3837 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003838};
3839
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003840static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003841{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003842 int i;
3843 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003844 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003845
Peng Fan3ca37122016-05-03 21:50:30 +08003846 /*
3847 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3848 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3849 * bit is only present in MMU-500r2 onwards.
3850 */
3851 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3852 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3853 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3854 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3855 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3856 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3857 }
3858
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003859 /* Make sure all context banks are disabled and clear CB_FSR */
3860 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy6549a1f2017-08-08 14:56:14 +01003861 void __iomem *cb_base = ARM_SMMU_CB_BASE(smmu) +
3862 ARM_SMMU_CB(smmu, i);
3863
3864 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003865 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003866 /*
3867 * Disable MMU-500's not-particularly-beneficial next-page
3868 * prefetcher for the sake of errata #841119 and #826419.
3869 */
3870 if (smmu->model == ARM_MMU500) {
3871 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3872 reg &= ~ARM_MMU500_ACTLR_CPRE;
3873 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3874 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003875 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003876}
3877
3878static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3879{
3880 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003881 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003882 u32 reg;
3883
3884 /* clear global FSR */
3885 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3886 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3887
Robin Murphy468f4942016-09-12 17:13:49 +01003888 /*
3889 * Reset stream mapping groups: Initial values mark all SMRn as
3890 * invalid and all S2CRn as bypass unless overridden.
3891 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003892 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3893 for (i = 0; i < smmu->num_mapping_groups; ++i)
3894 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003895
Patrick Daly59b6d202017-06-12 13:12:15 -07003896 arm_smmu_context_bank_reset(smmu);
3897 }
Will Deacon1463fe42013-07-31 19:21:27 +01003898
Will Deacon45ae7cf2013-06-24 18:31:25 +01003899 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003900 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3901 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3902
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003903 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003904
Will Deacon45ae7cf2013-06-24 18:31:25 +01003905 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003906 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003907
3908 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003909 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003910
Robin Murphy25a1c962016-02-10 14:25:33 +00003911 /* Enable client access, handling unmatched streams as appropriate */
3912 reg &= ~sCR0_CLIENTPD;
3913 if (disable_bypass)
3914 reg |= sCR0_USFCFG;
3915 else
3916 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003917
3918 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003919 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003920
3921 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003922 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003923
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003924 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3925 reg |= sCR0_VMID16EN;
3926
Patrick Daly7f377fe2017-10-06 17:37:10 -07003927 /* Force bypass transaction to be Non-Shareable & not io-coherent */
3928 reg &= ~(sCR0_SHCFG_MASK << sCR0_SHCFG_SHIFT);
Prakash Gupta673a79f2017-11-16 18:07:00 +05303929 reg |= sCR0_SHCFG_NSH << sCR0_SHCFG_SHIFT;
Patrick Daly7f377fe2017-10-06 17:37:10 -07003930
Will Deacon45ae7cf2013-06-24 18:31:25 +01003931 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003932 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003933 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003934
3935 /* Manage any implementation defined features */
3936 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003937}
3938
3939static int arm_smmu_id_size_to_bits(int size)
3940{
3941 switch (size) {
3942 case 0:
3943 return 32;
3944 case 1:
3945 return 36;
3946 case 2:
3947 return 40;
3948 case 3:
3949 return 42;
3950 case 4:
3951 return 44;
3952 case 5:
3953 default:
3954 return 48;
3955 }
3956}
3957
Patrick Dalyda688822017-05-17 20:12:48 -07003958
3959/*
3960 * Some context banks needs to be transferred from bootloader to HLOS in a way
3961 * that allows ongoing traffic. The current expectation is that these context
3962 * banks operate in bypass mode.
3963 * Additionally, there must be exactly one device in devicetree with stream-ids
3964 * overlapping those used by the bootloader.
3965 */
3966static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3967 struct arm_smmu_device *smmu,
3968 struct device *dev)
3969{
3970 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003971 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003972 u32 i, idx;
3973 int cb = -EINVAL;
3974 bool dynamic;
3975
Patrick Dalye72526b2017-07-18 16:21:44 -07003976 /*
3977 * Dynamic domains have already set cbndx through domain attribute.
3978 * Verify that they picked a valid value.
3979 */
Patrick Dalyda688822017-05-17 20:12:48 -07003980 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07003981 if (dynamic) {
3982 cb = smmu_domain->cfg.cbndx;
3983 if (cb < smmu->num_context_banks)
3984 return cb;
3985 else
3986 return -EINVAL;
3987 }
Patrick Dalyda688822017-05-17 20:12:48 -07003988
3989 mutex_lock(&smmu->stream_map_mutex);
3990 for_each_cfg_sme(fwspec, i, idx) {
3991 if (smmu->s2crs[idx].cb_handoff)
3992 cb = smmu->s2crs[idx].cbndx;
3993 }
3994
Shiraz Hashima28a4792018-01-13 00:39:52 +05303995 if (cb >= 0 && arm_smmu_is_static_cb(smmu)) {
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303996 smmu_domain->slave_side_secure = true;
3997
Shiraz Hashima28a4792018-01-13 00:39:52 +05303998 if (arm_smmu_is_slave_side_secure(smmu_domain))
Charan Teja Reddy4971ca42018-01-23 18:27:08 +05303999 bitmap_set(smmu->secure_context_map, cb, 1);
Shiraz Hashima28a4792018-01-13 00:39:52 +05304000 }
4001
Charan Teja Reddyf0758df2017-09-04 18:52:07 +05304002 if (cb < 0 && !arm_smmu_is_static_cb(smmu)) {
Patrick Dalyda688822017-05-17 20:12:48 -07004003 mutex_unlock(&smmu->stream_map_mutex);
4004 return __arm_smmu_alloc_bitmap(smmu->context_map,
4005 smmu->num_s2_context_banks,
4006 smmu->num_context_banks);
4007 }
4008
4009 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07004010 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304011 if (!arm_smmu_is_static_cb(smmu))
4012 smmu->s2crs[i].cb_handoff = false;
Patrick Dalyda688822017-05-17 20:12:48 -07004013 smmu->s2crs[i].count -= 1;
4014 }
4015 }
4016 mutex_unlock(&smmu->stream_map_mutex);
4017
4018 return cb;
4019}
4020
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004021static void parse_static_cb_cfg(struct arm_smmu_device *smmu)
4022{
4023 u32 idx = 0;
4024 u32 val;
4025 int ret;
4026
4027 if (!(arm_smmu_is_static_cb(smmu) &&
4028 arm_smmu_opt_hibernation(smmu)))
4029 return;
4030
4031 /*
4032 * Context banks may be xpu-protected. Require a devicetree property to
4033 * indicate which context banks HLOS has access to.
4034 */
4035 bitmap_set(smmu->secure_context_map, 0, ARM_SMMU_MAX_CBS);
4036 while (idx < ARM_SMMU_MAX_CBS) {
4037 ret = of_property_read_u32_index(
4038 smmu->dev->of_node, "qcom,static-ns-cbs",
4039 idx++, &val);
4040 if (ret)
4041 break;
4042
4043 bitmap_clear(smmu->secure_context_map, val, 1);
4044 dev_dbg(smmu->dev, "Detected NS context bank: %d\n", idx);
4045 }
4046}
4047
Patrick Dalyda688822017-05-17 20:12:48 -07004048static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
4049{
4050 u32 i, raw_smr, raw_s2cr;
4051 struct arm_smmu_smr smr;
4052 struct arm_smmu_s2cr s2cr;
4053
4054 for (i = 0; i < smmu->num_mapping_groups; i++) {
4055 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
4056 ARM_SMMU_GR0_SMR(i));
4057 if (!(raw_smr & SMR_VALID))
4058 continue;
4059
4060 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
4061 smr.id = (u16)raw_smr;
4062 smr.valid = true;
4063
4064 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
4065 ARM_SMMU_GR0_S2CR(i));
Patrick Daly4ea4bdf2017-08-29 19:24:49 -07004066 memset(&s2cr, 0, sizeof(s2cr));
Patrick Dalyda688822017-05-17 20:12:48 -07004067 s2cr.group = NULL;
4068 s2cr.count = 1;
4069 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
4070 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
4071 S2CR_PRIVCFG_MASK;
4072 s2cr.cbndx = (u8)raw_s2cr;
4073 s2cr.cb_handoff = true;
4074
4075 if (s2cr.type != S2CR_TYPE_TRANS)
4076 continue;
4077
4078 smmu->smrs[i] = smr;
4079 smmu->s2crs[i] = s2cr;
4080 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
4081 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
4082 raw_smr, raw_s2cr, s2cr.cbndx);
4083 }
4084
4085 return 0;
4086}
4087
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004088static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
4089{
4090 struct device *dev = smmu->dev;
4091 int i, ntuples, ret;
4092 u32 *tuples;
4093 struct arm_smmu_impl_def_reg *regs, *regit;
4094
4095 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
4096 return 0;
4097
4098 ntuples /= sizeof(u32);
4099 if (ntuples % 2) {
4100 dev_err(dev,
4101 "Invalid number of attach-impl-defs registers: %d\n",
4102 ntuples);
4103 return -EINVAL;
4104 }
4105
4106 regs = devm_kmalloc(
4107 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
4108 GFP_KERNEL);
4109 if (!regs)
4110 return -ENOMEM;
4111
4112 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
4113 if (!tuples)
4114 return -ENOMEM;
4115
4116 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
4117 tuples, ntuples);
4118 if (ret)
4119 return ret;
4120
4121 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
4122 regit->offset = tuples[i];
4123 regit->value = tuples[i + 1];
4124 }
4125
4126 devm_kfree(dev, tuples);
4127
4128 smmu->impl_def_attach_registers = regs;
4129 smmu->num_impl_def_attach_registers = ntuples / 2;
4130
4131 return 0;
4132}
4133
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004134
4135static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004136{
4137 const char *cname;
4138 struct property *prop;
4139 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004140 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004141
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004142 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004143 of_property_count_strings(dev->of_node, "clock-names");
4144
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004145 if (pwr->num_clocks < 1) {
4146 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004147 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07004148 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004149
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004150 pwr->clocks = devm_kzalloc(
4151 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004152 GFP_KERNEL);
4153
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004154 if (!pwr->clocks)
4155 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004156
4157 i = 0;
4158 of_property_for_each_string(dev->of_node, "clock-names",
4159 prop, cname) {
4160 struct clk *c = devm_clk_get(dev, cname);
4161
4162 if (IS_ERR(c)) {
4163 dev_err(dev, "Couldn't get clock: %s",
4164 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07004165 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004166 }
4167
4168 if (clk_get_rate(c) == 0) {
4169 long rate = clk_round_rate(c, 1000);
4170
4171 clk_set_rate(c, rate);
4172 }
4173
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004174 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004175
4176 ++i;
4177 }
4178 return 0;
4179}
4180
Charan Teja Reddyf8464882017-12-05 20:29:05 +05304181static int regulator_notifier(struct notifier_block *nb,
4182 unsigned long event, void *data)
4183{
4184 int ret = 0;
4185 struct arm_smmu_device *smmu = container_of(nb, struct arm_smmu_device,
4186 regulator_nb);
4187
4188 if (event != REGULATOR_EVENT_PRE_DISABLE &&
4189 event != REGULATOR_EVENT_ENABLE)
4190 return NOTIFY_OK;
4191
4192 ret = arm_smmu_prepare_clocks(smmu->pwr);
4193 if (ret)
4194 goto out;
4195
4196 ret = arm_smmu_power_on_atomic(smmu->pwr);
4197 if (ret)
4198 goto unprepare_clock;
4199
4200 if (event == REGULATOR_EVENT_PRE_DISABLE)
4201 qsmmuv2_halt(smmu);
4202 else if (event == REGULATOR_EVENT_ENABLE) {
Charan Teja Reddyec6f7822018-01-10 17:32:52 +05304203 if (arm_smmu_restore_sec_cfg(smmu, 0))
Charan Teja Reddyf8464882017-12-05 20:29:05 +05304204 goto power_off;
4205 qsmmuv2_resume(smmu);
4206 }
4207power_off:
4208 arm_smmu_power_off_atomic(smmu->pwr);
4209unprepare_clock:
4210 arm_smmu_unprepare_clocks(smmu->pwr);
4211out:
4212 return NOTIFY_OK;
4213}
4214
4215static int register_regulator_notifier(struct arm_smmu_device *smmu)
4216{
4217 struct device *dev = smmu->dev;
4218 struct regulator_bulk_data *consumers;
4219 int ret = 0, num_consumers;
4220 struct arm_smmu_power_resources *pwr = smmu->pwr;
4221
4222 if (!(smmu->options & ARM_SMMU_OPT_HALT))
4223 goto out;
4224
4225 num_consumers = pwr->num_gdscs;
4226 consumers = pwr->gdscs;
4227
4228 if (!num_consumers) {
4229 dev_info(dev, "no regulator info exist for %s\n",
4230 dev_name(dev));
4231 goto out;
4232 }
4233
4234 smmu->regulator_nb.notifier_call = regulator_notifier;
4235 /* registering the notifier against one gdsc is sufficient as
4236 * we do enable/disable regulators in group.
4237 */
4238 ret = regulator_register_notifier(consumers[0].consumer,
4239 &smmu->regulator_nb);
4240 if (ret)
4241 dev_err(dev, "Regulator notifier request failed\n");
4242out:
4243 return ret;
4244}
4245
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004246static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004247{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004248 const char *cname;
4249 struct property *prop;
4250 int i, ret = 0;
4251 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004252
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004253 pwr->num_gdscs =
4254 of_property_count_strings(dev->of_node, "qcom,regulator-names");
4255
4256 if (pwr->num_gdscs < 1) {
4257 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004258 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004259 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004260
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004261 pwr->gdscs = devm_kzalloc(
4262 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
4263
4264 if (!pwr->gdscs)
4265 return -ENOMEM;
4266
Prakash Guptafad87ca2017-05-16 12:13:02 +05304267 if (!of_property_read_u32(dev->of_node,
4268 "qcom,deferred-regulator-disable-delay",
4269 &(pwr->regulator_defer)))
4270 dev_info(dev, "regulator defer delay %d\n",
4271 pwr->regulator_defer);
4272
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004273 i = 0;
4274 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
4275 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07004276 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004277
4278 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
4279 return ret;
4280}
4281
4282static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
4283{
4284 struct device *dev = pwr->dev;
4285
4286 /* We don't want the bus APIs to print an error message */
4287 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
4288 dev_dbg(dev, "No bus scaling info\n");
4289 return 0;
4290 }
4291
4292 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
4293 if (!pwr->bus_dt_data) {
4294 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
4295 return -EINVAL;
4296 }
4297
4298 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
4299 if (!pwr->bus_client) {
4300 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004301 return -EINVAL;
4302 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004303
4304 return 0;
4305}
4306
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004307/*
4308 * Cleanup done by devm. Any non-devm resources must clean up themselves.
4309 */
4310static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
4311 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07004312{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004313 struct arm_smmu_power_resources *pwr;
4314 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07004315
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004316 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
4317 if (!pwr)
4318 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07004319
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004320 pwr->dev = &pdev->dev;
4321 pwr->pdev = pdev;
4322 mutex_init(&pwr->power_lock);
4323 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07004324
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004325 ret = arm_smmu_init_clocks(pwr);
4326 if (ret)
4327 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004328
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004329 ret = arm_smmu_init_regulators(pwr);
4330 if (ret)
4331 return ERR_PTR(ret);
4332
4333 ret = arm_smmu_init_bus_scaling(pwr);
4334 if (ret)
4335 return ERR_PTR(ret);
4336
4337 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07004338}
4339
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004340/*
Patrick Dalyabeee952017-04-13 18:14:59 -07004341 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004342 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004343static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004344{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004345 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004346}
4347
Will Deacon45ae7cf2013-06-24 18:31:25 +01004348static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
4349{
4350 unsigned long size;
4351 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
4352 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01004353 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01004354 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004355
Charan Teja Reddyec6f7822018-01-10 17:32:52 +05304356 if (arm_smmu_restore_sec_cfg(smmu, 0))
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304357 return -ENODEV;
4358
Mitchel Humpherysba822582015-10-20 11:37:41 -07004359 dev_dbg(smmu->dev, "probing hardware configuration...\n");
4360 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01004361 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004362
4363 /* ID0 */
4364 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01004365
4366 /* Restrict available stages based on module parameter */
4367 if (force_stage == 1)
4368 id &= ~(ID0_S2TS | ID0_NTS);
4369 else if (force_stage == 2)
4370 id &= ~(ID0_S1TS | ID0_NTS);
4371
Will Deacon45ae7cf2013-06-24 18:31:25 +01004372 if (id & ID0_S1TS) {
4373 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004374 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004375 }
4376
4377 if (id & ID0_S2TS) {
4378 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004379 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004380 }
4381
4382 if (id & ID0_NTS) {
4383 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004384 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004385 }
4386
4387 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01004388 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004389 dev_err(smmu->dev, "\tno translation support!\n");
4390 return -ENODEV;
4391 }
4392
Robin Murphyb7862e32016-04-13 18:13:03 +01004393 if ((id & ID0_S1TS) &&
4394 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00004395 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004396 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00004397 }
4398
Robin Murphybae2c2d2015-07-29 19:46:05 +01004399 /*
4400 * In order for DMA API calls to work properly, we must defer to what
4401 * the DT says about coherency, regardless of what the hardware claims.
4402 * Fortunately, this also opens up a workaround for systems where the
4403 * ID register value has ended up configured incorrectly.
4404 */
4405 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
4406 cttw_reg = !!(id & ID0_CTTW);
4407 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01004408 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01004409 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004410 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01004411 cttw_dt ? "" : "non-");
4412 if (cttw_dt != cttw_reg)
4413 dev_notice(smmu->dev,
4414 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004415
Robin Murphy53867802016-09-12 17:13:48 +01004416 /* Max. number of entries we have for stream matching/indexing */
4417 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
4418 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004419 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01004420 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08004421 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004422
4423 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01004424 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
4425 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004426 dev_err(smmu->dev,
4427 "stream-matching supported, but no SMRs present!\n");
4428 return -ENODEV;
4429 }
4430
Robin Murphy53867802016-09-12 17:13:48 +01004431 /*
4432 * SMR.ID bits may not be preserved if the corresponding MASK
4433 * bits are set, so check each one separately. We can reject
4434 * masters later if they try to claim IDs outside these masks.
4435 */
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304436 if (!arm_smmu_is_static_cb(smmu)) {
4437 for (i = 0; i < size; i++) {
4438 smr = readl_relaxed(
4439 gr0_base + ARM_SMMU_GR0_SMR(i));
4440 if (!(smr & SMR_VALID))
4441 break;
4442 }
4443 if (i == size) {
4444 dev_err(smmu->dev,
4445 "Unable to compute streamid_masks\n");
4446 return -ENODEV;
4447 }
4448
4449 smr = smmu->streamid_mask << SMR_ID_SHIFT;
4450 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
Patrick Daly937de532016-12-12 18:44:09 -08004451 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304452 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08004453
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304454 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
4455 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
4456 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
4457 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
4458 } else {
4459 smmu->smr_mask_mask = SMR_MASK_MASK;
4460 smmu->streamid_mask = SID_MASK;
4461 }
Dhaval Patel031d7462015-05-09 14:47:29 -07004462
Robin Murphy468f4942016-09-12 17:13:49 +01004463 /* Zero-initialised to mark as invalid */
4464 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
4465 GFP_KERNEL);
4466 if (!smmu->smrs)
4467 return -ENOMEM;
4468
Robin Murphy53867802016-09-12 17:13:48 +01004469 dev_notice(smmu->dev,
4470 "\tstream matching with %lu register groups, mask 0x%x",
4471 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004472 }
Robin Murphya754fd12016-09-12 17:13:50 +01004473 /* s2cr->type == 0 means translation, so initialise explicitly */
4474 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
4475 GFP_KERNEL);
4476 if (!smmu->s2crs)
4477 return -ENOMEM;
4478 for (i = 0; i < size; i++)
4479 smmu->s2crs[i] = s2cr_init_val;
4480
Robin Murphy53867802016-09-12 17:13:48 +01004481 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01004482 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004483
Robin Murphy7602b872016-04-28 17:12:09 +01004484 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
4485 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
4486 if (!(id & ID0_PTFS_NO_AARCH32S))
4487 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
4488 }
4489
Will Deacon45ae7cf2013-06-24 18:31:25 +01004490 /* ID1 */
4491 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01004492 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004493
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01004494 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00004495 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01004496 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01004497 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07004498 dev_warn(smmu->dev,
4499 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
4500 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004501
Will Deacon518f7132014-11-14 17:17:54 +00004502 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004503 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
4504 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
4505 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
4506 return -ENODEV;
4507 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07004508 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01004509 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01004510 /*
4511 * Cavium CN88xx erratum #27704.
4512 * Ensure ASID and VMID allocation is unique across all SMMUs in
4513 * the system.
4514 */
4515 if (smmu->model == CAVIUM_SMMUV2) {
4516 smmu->cavium_id_base =
4517 atomic_add_return(smmu->num_context_banks,
4518 &cavium_smmu_context_count);
4519 smmu->cavium_id_base -= smmu->num_context_banks;
4520 }
Robin Murphy6549a1f2017-08-08 14:56:14 +01004521 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
4522 sizeof(*smmu->cbs), GFP_KERNEL);
4523 if (!smmu->cbs)
4524 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004525
4526 /* ID2 */
4527 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
4528 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004529 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004530
Will Deacon518f7132014-11-14 17:17:54 +00004531 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01004532 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004533 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004534
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08004535 if (id & ID2_VMID16)
4536 smmu->features |= ARM_SMMU_FEAT_VMID16;
4537
Robin Murphyf1d84542015-03-04 16:41:05 +00004538 /*
4539 * What the page table walker can address actually depends on which
4540 * descriptor format is in use, but since a) we don't know that yet,
4541 * and b) it can vary per context bank, this will have to do...
4542 */
4543 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
4544 dev_warn(smmu->dev,
4545 "failed to set DMA mask for table walker\n");
4546
Robin Murphyb7862e32016-04-13 18:13:03 +01004547 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00004548 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01004549 if (smmu->version == ARM_SMMU_V1_64K)
4550 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004551 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004552 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00004553 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00004554 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01004555 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00004556 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01004557 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00004558 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01004559 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004560 }
4561
Robin Murphy7602b872016-04-28 17:12:09 +01004562 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01004563 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01004564 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01004565 if (smmu->features &
4566 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01004567 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01004568 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01004569 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01004570 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01004571 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01004572
Robin Murphyd5466352016-05-09 17:20:09 +01004573 if (arm_smmu_ops.pgsize_bitmap == -1UL)
4574 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
4575 else
4576 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004577 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01004578 smmu->pgsize_bitmap);
4579
Will Deacon518f7132014-11-14 17:17:54 +00004580
Will Deacon28d60072014-09-01 16:24:48 +01004581 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004582 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
4583 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004584
4585 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004586 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
4587 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004588
Will Deacon45ae7cf2013-06-24 18:31:25 +01004589 return 0;
4590}
4591
Robin Murphy67b65a32016-04-13 18:12:57 +01004592struct arm_smmu_match_data {
4593 enum arm_smmu_arch_version version;
4594 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004595 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01004596};
4597
Patrick Dalyd7476202016-09-08 18:23:28 -07004598#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
4599static struct arm_smmu_match_data name = { \
4600.version = ver, \
4601.model = imp, \
4602.arch_ops = ops, \
4603} \
Robin Murphy67b65a32016-04-13 18:12:57 +01004604
Patrick Daly1f8a2882016-09-12 17:32:05 -07004605struct arm_smmu_arch_ops qsmmuv500_arch_ops;
4606
Patrick Dalyd7476202016-09-08 18:23:28 -07004607ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
4608ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
4609ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
4610ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
4611ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07004612ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004613ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
4614 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01004615
Joerg Roedel09b52692014-10-02 12:24:45 +02004616static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01004617 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
4618 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
4619 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01004620 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01004621 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01004622 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07004623 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07004624 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01004625 { },
4626};
4627MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
4628
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304629#ifdef CONFIG_MSM_TZ_SMMU
4630int register_iommu_sec_ptbl(void)
4631{
4632 struct device_node *np;
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004633
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304634 for_each_matching_node(np, arm_smmu_of_match)
4635 if (of_find_property(np, "qcom,tz-device-id", NULL) &&
4636 of_device_is_available(np))
4637 break;
4638 if (!np)
4639 return -ENODEV;
4640
4641 of_node_put(np);
4642
4643 return msm_iommu_sec_pgtbl_init();
4644}
4645#endif
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004646static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
4647{
4648 if (!dev->iommu_fwspec)
4649 of_iommu_configure(dev, dev->of_node);
4650 return 0;
4651}
4652
Patrick Daly000a2f22017-02-13 22:18:12 -08004653static int arm_smmu_add_device_fixup(struct device *dev, void *data)
4654{
4655 struct iommu_ops *ops = data;
4656
4657 ops->add_device(dev);
4658 return 0;
4659}
4660
Patrick Daly1f8a2882016-09-12 17:32:05 -07004661static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004662static int arm_smmu_device_dt_probe(struct platform_device *pdev)
4663{
Robin Murphy67b65a32016-04-13 18:12:57 +01004664 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004665 struct resource *res;
4666 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004667 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01004668 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01004669 bool legacy_binding;
4670
4671 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
4672 if (legacy_binding && !using_generic_binding) {
4673 if (!using_legacy_binding)
4674 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
4675 using_legacy_binding = true;
4676 } else if (!legacy_binding && !using_legacy_binding) {
4677 using_generic_binding = true;
4678 } else {
4679 dev_err(dev, "not probing due to mismatched DT properties\n");
4680 return -ENODEV;
4681 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004682
4683 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4684 if (!smmu) {
4685 dev_err(dev, "failed to allocate arm_smmu_device\n");
4686 return -ENOMEM;
4687 }
4688 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08004689 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07004690 idr_init(&smmu->asid_idr);
4691 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004692
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004693 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01004694 smmu->version = data->version;
4695 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004696 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01004697
Will Deacon45ae7cf2013-06-24 18:31:25 +01004698 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Prakash Guptaa87818d2018-02-09 19:24:02 +05304699 if (res == NULL) {
4700 dev_err(dev, "no MEM resource info\n");
4701 return -EINVAL;
4702 }
4703
4704 smmu->phys_addr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01004705 smmu->base = devm_ioremap_resource(dev, res);
4706 if (IS_ERR(smmu->base))
4707 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004708 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004709
4710 if (of_property_read_u32(dev->of_node, "#global-interrupts",
4711 &smmu->num_global_irqs)) {
4712 dev_err(dev, "missing #global-interrupts property\n");
4713 return -ENODEV;
4714 }
4715
4716 num_irqs = 0;
4717 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
4718 num_irqs++;
4719 if (num_irqs > smmu->num_global_irqs)
4720 smmu->num_context_irqs++;
4721 }
4722
Andreas Herrmann44a08de2013-10-01 13:39:07 +01004723 if (!smmu->num_context_irqs) {
4724 dev_err(dev, "found %d interrupts but expected at least %d\n",
4725 num_irqs, smmu->num_global_irqs + 1);
4726 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004727 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004728
4729 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
4730 GFP_KERNEL);
4731 if (!smmu->irqs) {
4732 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
4733 return -ENOMEM;
4734 }
4735
4736 for (i = 0; i < num_irqs; ++i) {
4737 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07004738
Will Deacon45ae7cf2013-06-24 18:31:25 +01004739 if (irq < 0) {
4740 dev_err(dev, "failed to get irq index %d\n", i);
4741 return -ENODEV;
4742 }
4743 smmu->irqs[i] = irq;
4744 }
4745
Dhaval Patel031d7462015-05-09 14:47:29 -07004746 parse_driver_options(smmu);
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004747 parse_static_cb_cfg(smmu);
Dhaval Patel031d7462015-05-09 14:47:29 -07004748
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004749 smmu->pwr = arm_smmu_init_power_resources(pdev);
4750 if (IS_ERR(smmu->pwr))
4751 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004752
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004753 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004754 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004755 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004756
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304757 smmu->sec_id = msm_dev_to_device_id(dev);
Charan Teja Reddy65ff5e42018-02-19 15:32:28 +05304758 INIT_LIST_HEAD(&smmu->list);
4759 spin_lock(&arm_smmu_devices_lock);
4760 list_add(&smmu->list, &arm_smmu_devices);
4761 spin_unlock(&arm_smmu_devices_lock);
4762
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004763 err = arm_smmu_device_cfg_probe(smmu);
4764 if (err)
4765 goto out_power_off;
4766
Patrick Dalyda688822017-05-17 20:12:48 -07004767 err = arm_smmu_handoff_cbs(smmu);
4768 if (err)
4769 goto out_power_off;
4770
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004771 err = arm_smmu_parse_impl_def_registers(smmu);
4772 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004773 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004774
Robin Murphyb7862e32016-04-13 18:13:03 +01004775 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004776 smmu->num_context_banks != smmu->num_context_irqs) {
4777 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004778 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4779 smmu->num_context_irqs, smmu->num_context_banks,
4780 smmu->num_context_banks);
4781 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004782 }
4783
Will Deacon45ae7cf2013-06-24 18:31:25 +01004784 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004785 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4786 NULL, arm_smmu_global_fault,
4787 IRQF_ONESHOT | IRQF_SHARED,
4788 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004789 if (err) {
4790 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4791 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004792 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004793 }
4794 }
4795
Patrick Dalyd7476202016-09-08 18:23:28 -07004796 err = arm_smmu_arch_init(smmu);
4797 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004798 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004799
Robin Murphy06e393e2016-09-12 17:13:55 +01004800 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004801 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004802 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004803 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004804
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004805 /* bus_set_iommu depends on this. */
4806 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4807 arm_smmu_of_iommu_configure_fixup);
4808
Robin Murphy7e96c742016-09-14 15:26:46 +01004809 /* Oh, for a proper bus abstraction */
4810 if (!iommu_present(&platform_bus_type))
4811 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004812 else
4813 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4814 arm_smmu_add_device_fixup);
Charan Teja Reddyf8464882017-12-05 20:29:05 +05304815
4816 err = register_regulator_notifier(smmu);
4817 if (err)
4818 goto out_power_off;
4819
Robin Murphy7e96c742016-09-14 15:26:46 +01004820#ifdef CONFIG_ARM_AMBA
4821 if (!iommu_present(&amba_bustype))
4822 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4823#endif
4824#ifdef CONFIG_PCI
4825 if (!iommu_present(&pci_bus_type)) {
4826 pci_request_acs();
4827 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4828 }
4829#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004830 return 0;
4831
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004832out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004833 arm_smmu_power_off(smmu->pwr);
Charan Teja Reddy65ff5e42018-02-19 15:32:28 +05304834 spin_lock(&arm_smmu_devices_lock);
4835 list_del(&smmu->list);
4836 spin_unlock(&arm_smmu_devices_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004837
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004838out_exit_power_resources:
4839 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004840
Will Deacon45ae7cf2013-06-24 18:31:25 +01004841 return err;
4842}
4843
4844static int arm_smmu_device_remove(struct platform_device *pdev)
4845{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004846 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004847
4848 if (!smmu)
4849 return -ENODEV;
4850
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004851 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004852 return -EINVAL;
4853
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004854 if (!(bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS) &&
4855 (bitmap_empty(smmu->secure_context_map, ARM_SMMU_MAX_CBS) ||
4856 arm_smmu_opt_hibernation(smmu))))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004857 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004858
Patrick Dalyc190d932016-08-30 17:23:28 -07004859 idr_destroy(&smmu->asid_idr);
4860
Will Deacon45ae7cf2013-06-24 18:31:25 +01004861 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004862 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004863 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004864
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004865 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004866
Prakash Gupta56c447b2018-07-27 14:09:46 +05304867 spin_lock(&arm_smmu_devices_lock);
4868 list_del(&smmu->list);
4869 spin_unlock(&arm_smmu_devices_lock);
4870
Will Deacon45ae7cf2013-06-24 18:31:25 +01004871 return 0;
4872}
4873
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004874static int arm_smmu_pm_freeze(struct device *dev)
4875{
4876 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
4877
4878 if (!arm_smmu_opt_hibernation(smmu)) {
4879 dev_err(smmu->dev, "Aborting: Hibernation not supported\n");
4880 return -EINVAL;
4881 }
4882 return 0;
4883}
4884
4885static int arm_smmu_pm_restore(struct device *dev)
4886{
4887 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
4888 int ret;
4889
4890 ret = arm_smmu_power_on(smmu->pwr);
4891 if (ret)
4892 return ret;
4893
4894 arm_smmu_device_reset(smmu);
4895 arm_smmu_power_off(smmu->pwr);
4896 return 0;
4897}
4898
4899static const struct dev_pm_ops arm_smmu_pm_ops = {
4900#ifdef CONFIG_PM_SLEEP
4901 .freeze = arm_smmu_pm_freeze,
4902 .restore = arm_smmu_pm_restore,
4903#endif
4904};
4905
Will Deacon45ae7cf2013-06-24 18:31:25 +01004906static struct platform_driver arm_smmu_driver = {
4907 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004908 .name = "arm-smmu",
4909 .of_match_table = of_match_ptr(arm_smmu_of_match),
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004910 .pm = &arm_smmu_pm_ops,
Will Deacon45ae7cf2013-06-24 18:31:25 +01004911 },
4912 .probe = arm_smmu_device_dt_probe,
4913 .remove = arm_smmu_device_remove,
4914};
4915
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004916static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004917static int __init arm_smmu_init(void)
4918{
Robin Murphy7e96c742016-09-14 15:26:46 +01004919 static bool registered;
4920 int ret = 0;
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004921 struct device_node *node;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004922 ktime_t cur;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004923
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004924 if (registered)
4925 return 0;
4926
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004927 cur = ktime_get();
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004928 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4929 if (ret)
4930 return ret;
4931
4932 ret = platform_driver_register(&arm_smmu_driver);
Patrick Dalyaddf1f82018-04-23 14:39:19 -07004933 /* Disable secure usecases if hibernation support is enabled */
4934 node = of_find_compatible_node(NULL, NULL, "qcom,qsmmu-v500");
4935 if (IS_ENABLED(CONFIG_MSM_TZ_SMMU) && node &&
4936 !of_find_property(node, "qcom,hibernation-support", NULL))
4937 ret = register_iommu_sec_ptbl();
4938
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004939 registered = !ret;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004940 trace_smmu_init(ktime_us_delta(ktime_get(), cur));
4941
Robin Murphy7e96c742016-09-14 15:26:46 +01004942 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004943}
4944
4945static void __exit arm_smmu_exit(void)
4946{
4947 return platform_driver_unregister(&arm_smmu_driver);
4948}
4949
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004950subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004951module_exit(arm_smmu_exit);
4952
Robin Murphy7e96c742016-09-14 15:26:46 +01004953static int __init arm_smmu_of_init(struct device_node *np)
4954{
4955 int ret = arm_smmu_init();
4956
4957 if (ret)
4958 return ret;
4959
4960 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4961 return -ENODEV;
4962
4963 return 0;
4964}
4965IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4966IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4967IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4968IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4969IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4970IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004971
Patrick Dalya0fddb62017-03-27 19:26:59 -07004972#define TCU_HW_VERSION_HLOS1 (0x18)
4973
Patrick Daly1f8a2882016-09-12 17:32:05 -07004974#define DEBUG_SID_HALT_REG 0x0
4975#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004976#define DEBUG_SID_HALT_SID_MASK 0x3ff
4977
4978#define DEBUG_VA_ADDR_REG 0x8
4979
4980#define DEBUG_TXN_TRIGG_REG 0x18
4981#define DEBUG_TXN_AXPROT_SHIFT 6
4982#define DEBUG_TXN_AXCACHE_SHIFT 2
4983#define DEBUG_TRX_WRITE (0x1 << 1)
4984#define DEBUG_TXN_READ (0x0 << 1)
4985#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004986
4987#define DEBUG_SR_HALT_ACK_REG 0x20
4988#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004989#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4990
4991#define DEBUG_PAR_REG 0x28
4992#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4993#define DEBUG_PAR_PA_SHIFT 12
4994#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004995
Patrick Daly8c1202b2017-05-10 15:42:30 -07004996#define TBU_DBG_TIMEOUT_US 100
Patrick Daly1f8a2882016-09-12 17:32:05 -07004997
Patrick Daly23301482017-10-12 16:18:25 -07004998#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK 0x3
4999#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT 0x8
5000
Patrick Daly03330cc2017-08-11 14:56:38 -07005001
5002struct actlr_setting {
5003 struct arm_smmu_smr smr;
5004 u32 actlr;
5005};
5006
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005007struct qsmmuv500_archdata {
5008 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07005009 void __iomem *tcu_base;
5010 u32 version;
Patrick Dalyda765c62017-09-11 16:31:07 -07005011
5012 struct actlr_setting *actlrs;
5013 u32 actlr_tbl_size;
5014
5015 struct arm_smmu_smr *errata1_clients;
5016 u32 num_errata1_clients;
5017 remote_spinlock_t errata1_lock;
5018 ktime_t last_tlbi_ktime;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005019};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005020#define get_qsmmuv500_archdata(smmu) \
5021 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005022
Patrick Daly1f8a2882016-09-12 17:32:05 -07005023struct qsmmuv500_tbu_device {
5024 struct list_head list;
5025 struct device *dev;
5026 struct arm_smmu_device *smmu;
5027 void __iomem *base;
5028 void __iomem *status_reg;
5029
5030 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005031 u32 sid_start;
5032 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005033
5034 /* Protects halt count */
5035 spinlock_t halt_lock;
5036 u32 halt_count;
5037};
5038
Patrick Daly03330cc2017-08-11 14:56:38 -07005039struct qsmmuv500_group_iommudata {
5040 bool has_actlr;
5041 u32 actlr;
5042};
5043#define to_qsmmuv500_group_iommudata(group) \
5044 ((struct qsmmuv500_group_iommudata *) \
5045 (iommu_group_get_iommudata(group)))
5046
5047
5048static bool arm_smmu_fwspec_match_smr(struct iommu_fwspec *fwspec,
Patrick Dalyda765c62017-09-11 16:31:07 -07005049 struct arm_smmu_smr *smr)
5050{
5051 struct arm_smmu_smr *smr2;
Patrick Daly03330cc2017-08-11 14:56:38 -07005052 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Patrick Dalyda765c62017-09-11 16:31:07 -07005053 int i, idx;
5054
Patrick Daly03330cc2017-08-11 14:56:38 -07005055 for_each_cfg_sme(fwspec, i, idx) {
5056 smr2 = &smmu->smrs[idx];
Patrick Dalyda765c62017-09-11 16:31:07 -07005057 /* Continue if table entry does not match */
5058 if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
5059 continue;
5060 return true;
5061 }
5062 return false;
5063}
5064
5065#define ERRATA1_REMOTE_SPINLOCK "S:6"
5066#define ERRATA1_TLBI_INTERVAL_US 10
5067static bool
5068qsmmuv500_errata1_required(struct arm_smmu_domain *smmu_domain,
5069 struct qsmmuv500_archdata *data)
5070{
5071 bool ret = false;
5072 int j;
5073 struct arm_smmu_smr *smr;
Patrick Daly03330cc2017-08-11 14:56:38 -07005074 struct iommu_fwspec *fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07005075
5076 if (smmu_domain->qsmmuv500_errata1_init)
5077 return smmu_domain->qsmmuv500_errata1_client;
5078
Patrick Daly03330cc2017-08-11 14:56:38 -07005079 fwspec = smmu_domain->dev->iommu_fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07005080 for (j = 0; j < data->num_errata1_clients; j++) {
5081 smr = &data->errata1_clients[j];
Patrick Daly03330cc2017-08-11 14:56:38 -07005082 if (arm_smmu_fwspec_match_smr(fwspec, smr)) {
Patrick Dalyda765c62017-09-11 16:31:07 -07005083 ret = true;
5084 break;
5085 }
5086 }
5087
5088 smmu_domain->qsmmuv500_errata1_init = true;
5089 smmu_domain->qsmmuv500_errata1_client = ret;
5090 return ret;
5091}
5092
Patrick Daly86960052017-12-04 18:53:13 -08005093#define SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
5094#define SCM_CONFIG_ERRATA1 0x3
Patrick Dalyda765c62017-09-11 16:31:07 -07005095static void __qsmmuv500_errata1_tlbiall(struct arm_smmu_domain *smmu_domain)
5096{
5097 struct arm_smmu_device *smmu = smmu_domain->smmu;
5098 struct device *dev = smmu_domain->dev;
5099 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
5100 void __iomem *base;
Patrick Daly86960052017-12-04 18:53:13 -08005101 int ret;
Patrick Dalyda765c62017-09-11 16:31:07 -07005102 ktime_t cur;
5103 u32 val;
Patrick Daly86960052017-12-04 18:53:13 -08005104 struct scm_desc desc = {
5105 .args[0] = SCM_CONFIG_ERRATA1_CLIENT_ALL,
5106 .args[1] = false,
5107 .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
5108 };
Patrick Dalyda765c62017-09-11 16:31:07 -07005109
5110 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
5111 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
5112 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
Patrick Daly86960052017-12-04 18:53:13 -08005113 if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
5114 !(val & TLBSTATUS_SACTIVE), 0, 100))
5115 return;
5116
5117 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
5118 SCM_CONFIG_ERRATA1),
5119 &desc);
5120 if (ret) {
5121 dev_err(smmu->dev, "Calling into TZ to disable ERRATA1 failed - IOMMU hardware in bad state\n");
5122 BUG();
5123 return;
5124 }
5125
5126 cur = ktime_get();
5127 trace_tlbi_throttle_start(dev, 0);
5128 msm_bus_noc_throttle_wa(true);
5129
Patrick Dalyda765c62017-09-11 16:31:07 -07005130 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
Patrick Daly86960052017-12-04 18:53:13 -08005131 !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
5132 dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout - IOMMU hardware in bad state");
5133 trace_tlbsync_timeout(dev, 0);
5134 BUG();
5135 }
Patrick Dalyda765c62017-09-11 16:31:07 -07005136
Patrick Daly86960052017-12-04 18:53:13 -08005137 msm_bus_noc_throttle_wa(false);
5138 trace_tlbi_throttle_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07005139
Patrick Daly86960052017-12-04 18:53:13 -08005140 desc.args[1] = true;
5141 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
5142 SCM_CONFIG_ERRATA1),
5143 &desc);
5144 if (ret) {
5145 dev_err(smmu->dev, "Calling into TZ to reenable ERRATA1 failed - IOMMU hardware in bad state\n");
5146 BUG();
Patrick Dalyda765c62017-09-11 16:31:07 -07005147 }
5148}
5149
5150/* Must be called with clocks/regulators enabled */
5151static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
5152{
5153 struct arm_smmu_domain *smmu_domain = cookie;
5154 struct device *dev = smmu_domain->dev;
5155 struct qsmmuv500_archdata *data =
5156 get_qsmmuv500_archdata(smmu_domain->smmu);
5157 ktime_t cur;
Patrick Daly1faa3112017-10-31 16:40:40 -07005158 unsigned long flags;
Patrick Dalyda765c62017-09-11 16:31:07 -07005159 bool errata;
5160
5161 cur = ktime_get();
Prakash Gupta25f90512017-11-20 14:56:54 +05305162 trace_tlbi_start(dev, 0);
Patrick Dalyda765c62017-09-11 16:31:07 -07005163
5164 errata = qsmmuv500_errata1_required(smmu_domain, data);
Patrick Daly1faa3112017-10-31 16:40:40 -07005165 remote_spin_lock_irqsave(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07005166 if (errata) {
5167 s64 delta;
5168
5169 delta = ktime_us_delta(ktime_get(), data->last_tlbi_ktime);
5170 if (delta < ERRATA1_TLBI_INTERVAL_US)
5171 udelay(ERRATA1_TLBI_INTERVAL_US - delta);
5172
5173 __qsmmuv500_errata1_tlbiall(smmu_domain);
5174
5175 data->last_tlbi_ktime = ktime_get();
5176 } else {
5177 __qsmmuv500_errata1_tlbiall(smmu_domain);
5178 }
Patrick Daly1faa3112017-10-31 16:40:40 -07005179 remote_spin_unlock_irqrestore(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07005180
Prakash Gupta25f90512017-11-20 14:56:54 +05305181 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07005182}
5183
5184static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
5185 .tlb_flush_all = qsmmuv500_errata1_tlb_inv_context,
5186 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
5187 .free_pages_exact = arm_smmu_free_pages_exact,
5188};
5189
Patrick Daly8c1202b2017-05-10 15:42:30 -07005190static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
5191 struct arm_smmu_domain *smmu_domain)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005192{
5193 unsigned long flags;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005194 u32 halt, fsr, sctlr_orig, sctlr, status;
5195 void __iomem *base, *cb_base;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005196
5197 spin_lock_irqsave(&tbu->halt_lock, flags);
5198 if (tbu->halt_count) {
5199 tbu->halt_count++;
5200 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5201 return 0;
5202 }
5203
Patrick Daly8c1202b2017-05-10 15:42:30 -07005204 cb_base = ARM_SMMU_CB_BASE(smmu_domain->smmu) +
5205 ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005206 base = tbu->base;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005207 halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
5208 halt |= DEBUG_SID_HALT_VAL;
5209 writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005210
Patrick Daly8c1202b2017-05-10 15:42:30 -07005211 if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
5212 (status & DEBUG_SR_HALT_ACK_VAL),
5213 0, TBU_DBG_TIMEOUT_US))
5214 goto out;
5215
5216 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5217 if (!(fsr & FSR_FAULT)) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07005218 dev_err(tbu->dev, "Couldn't halt TBU!\n");
5219 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5220 return -ETIMEDOUT;
5221 }
5222
Patrick Daly8c1202b2017-05-10 15:42:30 -07005223 /*
5224 * We are in a fault; Our request to halt the bus will not complete
5225 * until transactions in front of us (such as the fault itself) have
5226 * completed. Disable iommu faults and terminate any existing
5227 * transactions.
5228 */
5229 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
5230 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
5231 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
5232
5233 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
5234 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
5235
5236 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
5237 (status & DEBUG_SR_HALT_ACK_VAL),
5238 0, TBU_DBG_TIMEOUT_US)) {
5239 dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
5240 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
5241 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5242 return -ETIMEDOUT;
5243 }
5244
5245 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
5246out:
Patrick Daly1f8a2882016-09-12 17:32:05 -07005247 tbu->halt_count = 1;
5248 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5249 return 0;
5250}
5251
5252static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
5253{
5254 unsigned long flags;
5255 u32 val;
5256 void __iomem *base;
5257
5258 spin_lock_irqsave(&tbu->halt_lock, flags);
5259 if (!tbu->halt_count) {
5260 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
5261 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5262 return;
5263
5264 } else if (tbu->halt_count > 1) {
5265 tbu->halt_count--;
5266 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5267 return;
5268 }
5269
5270 base = tbu->base;
5271 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
5272 val &= ~DEBUG_SID_HALT_VAL;
5273 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
5274
5275 tbu->halt_count = 0;
5276 spin_unlock_irqrestore(&tbu->halt_lock, flags);
5277}
5278
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005279static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
5280 struct arm_smmu_device *smmu, u32 sid)
5281{
5282 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005283 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005284
5285 list_for_each_entry(tbu, &data->tbus, list) {
5286 if (tbu->sid_start <= sid &&
5287 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07005288 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005289 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07005290 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005291}
5292
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005293static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
5294 struct qsmmuv500_tbu_device *tbu,
5295 unsigned long *flags)
5296{
5297 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005298 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005299 u32 val;
5300
5301 spin_lock_irqsave(&smmu->atos_lock, *flags);
5302 /* The status register is not accessible on version 1.0 */
5303 if (data->version == 0x01000000)
5304 return 0;
5305
5306 if (readl_poll_timeout_atomic(tbu->status_reg,
5307 val, (val == 0x1), 0,
5308 TBU_DBG_TIMEOUT_US)) {
5309 dev_err(tbu->dev, "ECATS hw busy!\n");
5310 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
5311 return -ETIMEDOUT;
5312 }
5313
5314 return 0;
5315}
5316
5317static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
5318 struct qsmmuv500_tbu_device *tbu,
5319 unsigned long *flags)
5320{
5321 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005322 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005323
5324 /* The status register is not accessible on version 1.0 */
5325 if (data->version != 0x01000000)
5326 writel_relaxed(0, tbu->status_reg);
5327 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
5328}
5329
5330/*
5331 * Zero means failure.
5332 */
5333static phys_addr_t qsmmuv500_iova_to_phys(
5334 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
5335{
5336 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
5337 struct arm_smmu_device *smmu = smmu_domain->smmu;
5338 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
5339 struct qsmmuv500_tbu_device *tbu;
5340 int ret;
5341 phys_addr_t phys = 0;
5342 u64 val, fsr;
5343 unsigned long flags;
5344 void __iomem *cb_base;
5345 u32 sctlr_orig, sctlr;
5346 int needs_redo = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005347 ktime_t timeout;
5348
5349 /* only 36 bit iova is supported */
5350 if (iova >= (1ULL << 36)) {
5351 dev_err_ratelimited(smmu->dev, "ECATS: address too large: %pad\n",
5352 &iova);
5353 return 0;
5354 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005355
5356 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
5357 tbu = qsmmuv500_find_tbu(smmu, sid);
5358 if (!tbu)
5359 return 0;
5360
5361 ret = arm_smmu_power_on(tbu->pwr);
5362 if (ret)
5363 return 0;
5364
Patrick Daly8c1202b2017-05-10 15:42:30 -07005365 ret = qsmmuv500_tbu_halt(tbu, smmu_domain);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005366 if (ret)
5367 goto out_power_off;
5368
Patrick Daly8c1202b2017-05-10 15:42:30 -07005369 /*
5370 * ECATS can trigger the fault interrupt, so disable it temporarily
5371 * and check for an interrupt manually.
5372 */
5373 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
5374 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
5375 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
5376
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005377 /* Only one concurrent atos operation */
5378 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
5379 if (ret)
5380 goto out_resume;
5381
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005382redo:
5383 /* Set address and stream-id */
5384 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
5385 val |= sid & DEBUG_SID_HALT_SID_MASK;
5386 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
5387 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
5388
5389 /*
5390 * Write-back Read and Write-Allocate
5391 * Priviledged, nonsecure, data transaction
5392 * Read operation.
5393 */
5394 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
5395 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
5396 val |= DEBUG_TXN_TRIGGER;
5397 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
5398
5399 ret = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005400 //based on readx_poll_timeout_atomic
5401 timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
5402 for (;;) {
5403 val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
5404 if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
5405 break;
5406 val = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5407 if (val & FSR_FAULT)
5408 break;
5409 if (ktime_compare(ktime_get(), timeout) > 0) {
5410 dev_err(tbu->dev, "ECATS translation timed out!\n");
5411 ret = -ETIMEDOUT;
5412 break;
5413 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005414 }
5415
5416 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5417 if (fsr & FSR_FAULT) {
5418 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
Patrick Daly8c1202b2017-05-10 15:42:30 -07005419 fsr);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005420 ret = -EINVAL;
5421
5422 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
5423 /*
5424 * Clear pending interrupts
5425 * Barrier required to ensure that the FSR is cleared
5426 * before resuming SMMU operation
5427 */
5428 wmb();
5429 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
5430 }
5431
5432 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
5433 if (val & DEBUG_PAR_FAULT_VAL) {
5434 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
5435 val);
5436 ret = -EINVAL;
5437 }
5438
5439 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
5440 if (ret < 0)
5441 phys = 0;
5442
5443 /* Reset hardware */
5444 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
5445 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
5446
5447 /*
5448 * After a failed translation, the next successful translation will
5449 * incorrectly be reported as a failure.
5450 */
5451 if (!phys && needs_redo++ < 2)
5452 goto redo;
5453
5454 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
5455 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
5456
5457out_resume:
5458 qsmmuv500_tbu_resume(tbu);
5459
5460out_power_off:
5461 arm_smmu_power_off(tbu->pwr);
5462
5463 return phys;
5464}
5465
5466static phys_addr_t qsmmuv500_iova_to_phys_hard(
5467 struct iommu_domain *domain, dma_addr_t iova)
5468{
5469 u16 sid;
5470 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
5471 struct iommu_fwspec *fwspec;
5472
5473 /* Select a sid */
5474 fwspec = smmu_domain->dev->iommu_fwspec;
5475 sid = (u16)fwspec->ids[0];
5476
5477 return qsmmuv500_iova_to_phys(domain, iova, sid);
5478}
5479
Patrick Daly03330cc2017-08-11 14:56:38 -07005480static void qsmmuv500_release_group_iommudata(void *data)
5481{
5482 kfree(data);
5483}
5484
5485/* If a device has a valid actlr, it must match */
5486static int qsmmuv500_device_group(struct device *dev,
5487 struct iommu_group *group)
5488{
5489 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
5490 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
5491 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5492 struct qsmmuv500_group_iommudata *iommudata;
5493 u32 actlr, i;
5494 struct arm_smmu_smr *smr;
5495
5496 iommudata = to_qsmmuv500_group_iommudata(group);
5497 if (!iommudata) {
5498 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
5499 if (!iommudata)
5500 return -ENOMEM;
5501
5502 iommu_group_set_iommudata(group, iommudata,
5503 qsmmuv500_release_group_iommudata);
5504 }
5505
5506 for (i = 0; i < data->actlr_tbl_size; i++) {
5507 smr = &data->actlrs[i].smr;
5508 actlr = data->actlrs[i].actlr;
5509
5510 if (!arm_smmu_fwspec_match_smr(fwspec, smr))
5511 continue;
5512
5513 if (!iommudata->has_actlr) {
5514 iommudata->actlr = actlr;
5515 iommudata->has_actlr = true;
5516 } else if (iommudata->actlr != actlr) {
5517 return -EINVAL;
5518 }
5519 }
5520
5521 return 0;
5522}
5523
5524static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
5525 struct device *dev)
5526{
5527 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyad521082018-04-06 18:07:13 -07005528 struct arm_smmu_cb *cb = &smmu->cbs[smmu_domain->cfg.cbndx];
Patrick Daly03330cc2017-08-11 14:56:38 -07005529 struct qsmmuv500_group_iommudata *iommudata =
5530 to_qsmmuv500_group_iommudata(dev->iommu_group);
Patrick Daly03330cc2017-08-11 14:56:38 -07005531
5532 if (!iommudata->has_actlr)
5533 return;
5534
Patrick Dalyad521082018-04-06 18:07:13 -07005535 cb->actlr = iommudata->actlr;
Patrick Daly25317e82018-05-07 12:35:29 -07005536 cb->has_actlr = true;
Patrick Daly03330cc2017-08-11 14:56:38 -07005537 /*
Patrick Daly23301482017-10-12 16:18:25 -07005538 * Prefetch only works properly if the start and end of all
5539 * buffers in the page table are aligned to 16 Kb.
5540 */
Patrick Daly27bd9292017-11-22 13:59:59 -08005541 if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
Patrick Daly23301482017-10-12 16:18:25 -07005542 QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
5543 smmu_domain->qsmmuv500_errata2_min_align = true;
Patrick Daly03330cc2017-08-11 14:56:38 -07005544}
5545
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005546static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005547{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005548 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005549 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005550 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005551
5552 if (!dev->driver) {
5553 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
5554 return -EINVAL;
5555 }
5556
5557 tbu = dev_get_drvdata(dev);
5558
5559 INIT_LIST_HEAD(&tbu->list);
5560 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005561 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005562 return 0;
5563}
5564
Patrick Dalyda765c62017-09-11 16:31:07 -07005565static int qsmmuv500_parse_errata1(struct arm_smmu_device *smmu)
5566{
5567 int len, i;
5568 struct device *dev = smmu->dev;
5569 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5570 struct arm_smmu_smr *smrs;
5571 const __be32 *cell;
5572
5573 cell = of_get_property(dev->of_node, "qcom,mmu500-errata-1", NULL);
5574 if (!cell)
5575 return 0;
5576
5577 remote_spin_lock_init(&data->errata1_lock, ERRATA1_REMOTE_SPINLOCK);
5578 len = of_property_count_elems_of_size(
5579 dev->of_node, "qcom,mmu500-errata-1", sizeof(u32) * 2);
5580 if (len < 0)
5581 return 0;
5582
5583 smrs = devm_kzalloc(dev, sizeof(*smrs) * len, GFP_KERNEL);
5584 if (!smrs)
5585 return -ENOMEM;
5586
5587 for (i = 0; i < len; i++) {
5588 smrs[i].id = of_read_number(cell++, 1);
5589 smrs[i].mask = of_read_number(cell++, 1);
5590 }
5591
5592 data->errata1_clients = smrs;
5593 data->num_errata1_clients = len;
5594 return 0;
5595}
5596
Patrick Daly03330cc2017-08-11 14:56:38 -07005597static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
5598{
5599 int len, i;
5600 struct device *dev = smmu->dev;
5601 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5602 struct actlr_setting *actlrs;
5603 const __be32 *cell;
5604
5605 cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
5606 if (!cell)
5607 return 0;
5608
5609 len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
5610 sizeof(u32) * 3);
5611 if (len < 0)
5612 return 0;
5613
5614 actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
5615 if (!actlrs)
5616 return -ENOMEM;
5617
5618 for (i = 0; i < len; i++) {
5619 actlrs[i].smr.id = of_read_number(cell++, 1);
5620 actlrs[i].smr.mask = of_read_number(cell++, 1);
5621 actlrs[i].actlr = of_read_number(cell++, 1);
5622 }
5623
5624 data->actlrs = actlrs;
5625 data->actlr_tbl_size = len;
5626 return 0;
5627}
5628
Patrick Daly1f8a2882016-09-12 17:32:05 -07005629static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
5630{
Patrick Dalya0fddb62017-03-27 19:26:59 -07005631 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005632 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005633 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07005634 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005635 int ret;
Patrick Daly03330cc2017-08-11 14:56:38 -07005636 u32 val;
5637 void __iomem *reg;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005638
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005639 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
5640 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005641 return -ENOMEM;
5642
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005643 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07005644
5645 pdev = container_of(dev, struct platform_device, dev);
5646 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
Charan Teja Reddy97fb6c52018-03-20 15:55:37 +05305647 if (!res) {
5648 dev_err(dev, "Unable to get the tcu-base\n");
5649 return -EINVAL;
5650 }
5651 data->tcu_base = devm_ioremap(dev, res->start, resource_size(res));
Patrick Dalya0fddb62017-03-27 19:26:59 -07005652 if (IS_ERR(data->tcu_base))
5653 return PTR_ERR(data->tcu_base);
5654
5655 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005656 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005657
Charan Teja Reddy424ed342018-01-18 12:25:06 +05305658 if (arm_smmu_is_static_cb(smmu))
5659 return 0;
5660
Patrick Dalyda765c62017-09-11 16:31:07 -07005661 ret = qsmmuv500_parse_errata1(smmu);
5662 if (ret)
5663 return ret;
5664
Patrick Daly03330cc2017-08-11 14:56:38 -07005665 ret = qsmmuv500_read_actlr_tbl(smmu);
5666 if (ret)
5667 return ret;
5668
5669 reg = ARM_SMMU_GR0(smmu);
5670 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5671 val &= ~ARM_MMU500_ACR_CACHE_LOCK;
5672 writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
5673 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5674 /*
5675 * Modifiying the nonsecure copy of the sACR register is only
5676 * allowed if permission is given in the secure sACR register.
5677 * Attempt to detect if we were able to update the value.
5678 */
5679 WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
5680
Patrick Daly1f8a2882016-09-12 17:32:05 -07005681 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
5682 if (ret)
5683 return ret;
5684
5685 /* Attempt to register child devices */
5686 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
5687 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07005688 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005689
5690 return 0;
5691}
5692
5693struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
5694 .init = qsmmuv500_arch_init,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005695 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly03330cc2017-08-11 14:56:38 -07005696 .init_context_bank = qsmmuv500_init_cb,
5697 .device_group = qsmmuv500_device_group,
Patrick Daly1f8a2882016-09-12 17:32:05 -07005698};
5699
5700static const struct of_device_id qsmmuv500_tbu_of_match[] = {
5701 {.compatible = "qcom,qsmmuv500-tbu"},
5702 {}
5703};
5704
5705static int qsmmuv500_tbu_probe(struct platform_device *pdev)
5706{
5707 struct resource *res;
5708 struct device *dev = &pdev->dev;
5709 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005710 const __be32 *cell;
5711 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005712
5713 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
5714 if (!tbu)
5715 return -ENOMEM;
5716
5717 INIT_LIST_HEAD(&tbu->list);
5718 tbu->dev = dev;
5719 spin_lock_init(&tbu->halt_lock);
5720
5721 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
5722 tbu->base = devm_ioremap_resource(dev, res);
5723 if (IS_ERR(tbu->base))
5724 return PTR_ERR(tbu->base);
5725
5726 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
5727 tbu->status_reg = devm_ioremap_resource(dev, res);
5728 if (IS_ERR(tbu->status_reg))
5729 return PTR_ERR(tbu->status_reg);
5730
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005731 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
5732 if (!cell || len < 8)
5733 return -EINVAL;
5734
5735 tbu->sid_start = of_read_number(cell, 1);
5736 tbu->num_sids = of_read_number(cell + 1, 1);
5737
Patrick Daly1f8a2882016-09-12 17:32:05 -07005738 tbu->pwr = arm_smmu_init_power_resources(pdev);
5739 if (IS_ERR(tbu->pwr))
5740 return PTR_ERR(tbu->pwr);
5741
5742 dev_set_drvdata(dev, tbu);
5743 return 0;
5744}
5745
5746static struct platform_driver qsmmuv500_tbu_driver = {
5747 .driver = {
5748 .name = "qsmmuv500-tbu",
5749 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
5750 },
5751 .probe = qsmmuv500_tbu_probe,
5752};
5753
Will Deacon45ae7cf2013-06-24 18:31:25 +01005754MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
5755MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
5756MODULE_LICENSE("GPL v2");