blob: 76b893368fb3fe8ac710b4ec7e0047ee9d447111 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Daly86960052017-12-04 18:53:13 -080050#include <soc/qcom/scm.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070051#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070052#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070053#include <linux/msm-bus.h>
54#include <dt-bindings/msm/msm-bus-ids.h>
Patrick Dalyda765c62017-09-11 16:31:07 -070055#include <linux/remote_spinlock.h>
56#include <linux/ktime.h>
57#include <trace/events/iommu.h>
Charan Teja Reddy35144b02017-09-05 16:20:46 +053058#include <soc/qcom/msm_tz_smmu.h>
59#include <soc/qcom/scm.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010060
61#include <linux/amba/bus.h>
62
Will Deacon518f7132014-11-14 17:17:54 +000063#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Will Deacon45ae7cf2013-06-24 18:31:25 +010065/* Maximum number of context banks per SMMU */
66#define ARM_SMMU_MAX_CBS 128
67
Will Deacon45ae7cf2013-06-24 18:31:25 +010068/* SMMU global address space */
69#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010070#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010071
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000072/*
73 * SMMU global address space with conditional offset to access secure
74 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
75 * nsGFSYNR0: 0x450)
76 */
77#define ARM_SMMU_GR0_NS(smmu) \
78 ((smmu)->base + \
79 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
80 ? 0x400 : 0))
81
Robin Murphyf9a05f02016-04-13 18:13:01 +010082/*
83 * Some 64-bit registers only make sense to write atomically, but in such
84 * cases all the data relevant to AArch32 formats lies within the lower word,
85 * therefore this actually makes more sense than it might first appear.
86 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010087#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010088#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010089#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010090#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010091#endif
92
Will Deacon45ae7cf2013-06-24 18:31:25 +010093/* Configuration registers */
94#define ARM_SMMU_GR0_sCR0 0x0
95#define sCR0_CLIENTPD (1 << 0)
96#define sCR0_GFRE (1 << 1)
97#define sCR0_GFIE (1 << 2)
98#define sCR0_GCFGFRE (1 << 4)
99#define sCR0_GCFGFIE (1 << 5)
100#define sCR0_USFCFG (1 << 10)
101#define sCR0_VMIDPNE (1 << 11)
102#define sCR0_PTM (1 << 12)
103#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800104#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105#define sCR0_BSU_SHIFT 14
106#define sCR0_BSU_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700107#define sCR0_SHCFG_SHIFT 22
108#define sCR0_SHCFG_MASK 0x3
109#define sCR0_SHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100110
Peng Fan3ca37122016-05-03 21:50:30 +0800111/* Auxiliary Configuration register */
112#define ARM_SMMU_GR0_sACR 0x10
113
Will Deacon45ae7cf2013-06-24 18:31:25 +0100114/* Identification registers */
115#define ARM_SMMU_GR0_ID0 0x20
116#define ARM_SMMU_GR0_ID1 0x24
117#define ARM_SMMU_GR0_ID2 0x28
118#define ARM_SMMU_GR0_ID3 0x2c
119#define ARM_SMMU_GR0_ID4 0x30
120#define ARM_SMMU_GR0_ID5 0x34
121#define ARM_SMMU_GR0_ID6 0x38
122#define ARM_SMMU_GR0_ID7 0x3c
123#define ARM_SMMU_GR0_sGFSR 0x48
124#define ARM_SMMU_GR0_sGFSYNR0 0x50
125#define ARM_SMMU_GR0_sGFSYNR1 0x54
126#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100127
128#define ID0_S1TS (1 << 30)
129#define ID0_S2TS (1 << 29)
130#define ID0_NTS (1 << 28)
131#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000132#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100133#define ID0_PTFS_NO_AARCH32 (1 << 25)
134#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100135#define ID0_CTTW (1 << 14)
136#define ID0_NUMIRPT_SHIFT 16
137#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700138#define ID0_NUMSIDB_SHIFT 9
139#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100140#define ID0_NUMSMRG_SHIFT 0
141#define ID0_NUMSMRG_MASK 0xff
142
143#define ID1_PAGESIZE (1 << 31)
144#define ID1_NUMPAGENDXB_SHIFT 28
145#define ID1_NUMPAGENDXB_MASK 7
146#define ID1_NUMS2CB_SHIFT 16
147#define ID1_NUMS2CB_MASK 0xff
148#define ID1_NUMCB_SHIFT 0
149#define ID1_NUMCB_MASK 0xff
150
151#define ID2_OAS_SHIFT 4
152#define ID2_OAS_MASK 0xf
153#define ID2_IAS_SHIFT 0
154#define ID2_IAS_MASK 0xf
155#define ID2_UBS_SHIFT 8
156#define ID2_UBS_MASK 0xf
157#define ID2_PTFS_4K (1 << 12)
158#define ID2_PTFS_16K (1 << 13)
159#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800160#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100161
Peng Fan3ca37122016-05-03 21:50:30 +0800162#define ID7_MAJOR_SHIFT 4
163#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166#define ARM_SMMU_GR0_TLBIVMID 0x64
167#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
168#define ARM_SMMU_GR0_TLBIALLH 0x6c
169#define ARM_SMMU_GR0_sTLBGSYNC 0x70
170#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
171#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800172#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100173
174/* Stream mapping registers */
175#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
176#define SMR_VALID (1 << 31)
177#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700178#define SMR_MASK_MASK 0x7FFF
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530179#define SID_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100180#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100181
182#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
183#define S2CR_CBNDX_SHIFT 0
184#define S2CR_CBNDX_MASK 0xff
185#define S2CR_TYPE_SHIFT 16
186#define S2CR_TYPE_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700187#define S2CR_SHCFG_SHIFT 8
188#define S2CR_SHCFG_MASK 0x3
189#define S2CR_SHCFG_NSH 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100190enum arm_smmu_s2cr_type {
191 S2CR_TYPE_TRANS,
192 S2CR_TYPE_BYPASS,
193 S2CR_TYPE_FAULT,
194};
195
196#define S2CR_PRIVCFG_SHIFT 24
197#define S2CR_PRIVCFG_MASK 0x3
198enum arm_smmu_s2cr_privcfg {
199 S2CR_PRIVCFG_DEFAULT,
200 S2CR_PRIVCFG_DIPAN,
201 S2CR_PRIVCFG_UNPRIV,
202 S2CR_PRIVCFG_PRIV,
203};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100204
205/* Context bank attribute registers */
206#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
207#define CBAR_VMID_SHIFT 0
208#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000209#define CBAR_S1_BPSHCFG_SHIFT 8
210#define CBAR_S1_BPSHCFG_MASK 3
211#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212#define CBAR_S1_MEMATTR_SHIFT 12
213#define CBAR_S1_MEMATTR_MASK 0xf
214#define CBAR_S1_MEMATTR_WB 0xf
215#define CBAR_TYPE_SHIFT 16
216#define CBAR_TYPE_MASK 0x3
217#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
218#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
219#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
220#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
221#define CBAR_IRPTNDX_SHIFT 24
222#define CBAR_IRPTNDX_MASK 0xff
223
Shalaj Jain04059c52015-03-03 13:34:59 -0800224#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
225#define CBFRSYNRA_SID_MASK (0xffff)
226
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
228#define CBA2R_RW64_32BIT (0 << 0)
229#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800230#define CBA2R_VMID_SHIFT 16
231#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100232
233/* Translation context bank */
234#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100235#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100236
237#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100238#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100239#define ARM_SMMU_CB_RESUME 0x8
240#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100241#define ARM_SMMU_CB_TTBR0 0x20
242#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100243#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600244#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100245#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000246#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100247#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700249#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100250#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100251#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000252#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100253#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700254#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000255#define ARM_SMMU_CB_S1_TLBIVAL 0x620
256#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
257#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700258#define ARM_SMMU_CB_TLBSYNC 0x7f0
259#define ARM_SMMU_CB_TLBSTATUS 0x7f4
260#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100261#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000262#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100263
Patrick Daly7f377fe2017-10-06 17:37:10 -0700264#define SCTLR_SHCFG_SHIFT 22
265#define SCTLR_SHCFG_MASK 0x3
266#define SCTLR_SHCFG_NSH 0x3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267#define SCTLR_S1_ASIDPNE (1 << 12)
268#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530269#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270#define SCTLR_CFIE (1 << 6)
271#define SCTLR_CFRE (1 << 5)
272#define SCTLR_E (1 << 4)
273#define SCTLR_AFE (1 << 2)
274#define SCTLR_TRE (1 << 1)
275#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100276
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100277#define ARM_MMU500_ACTLR_CPRE (1 << 1)
278
Peng Fan3ca37122016-05-03 21:50:30 +0800279#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
280
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700281#define ARM_SMMU_IMPL_DEF0(smmu) \
282 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
283#define ARM_SMMU_IMPL_DEF1(smmu) \
284 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000285#define CB_PAR_F (1 << 0)
286
287#define ATSR_ACTIVE (1 << 0)
288
Will Deacon45ae7cf2013-06-24 18:31:25 +0100289#define RESUME_RETRY (0 << 0)
290#define RESUME_TERMINATE (1 << 0)
291
Will Deacon45ae7cf2013-06-24 18:31:25 +0100292#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100293#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100294
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100295#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
297#define FSR_MULTI (1 << 31)
298#define FSR_SS (1 << 30)
299#define FSR_UUT (1 << 8)
300#define FSR_ASF (1 << 7)
301#define FSR_TLBLKF (1 << 6)
302#define FSR_TLBMCF (1 << 5)
303#define FSR_EF (1 << 4)
304#define FSR_PF (1 << 3)
305#define FSR_AFF (1 << 2)
306#define FSR_TF (1 << 1)
307
Mitchel Humpherys29073202014-07-08 09:52:18 -0700308#define FSR_IGN (FSR_AFF | FSR_ASF | \
309 FSR_TLBMCF | FSR_TLBLKF)
310#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100311 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100312
313#define FSYNR0_WNR (1 << 4)
314
Will Deacon4cf740b2014-07-14 19:47:39 +0100315static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000316module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100317MODULE_PARM_DESC(force_stage,
318 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800319static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000320module_param(disable_bypass, bool, S_IRUGO);
321MODULE_PARM_DESC(disable_bypass,
322 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100323
Robin Murphy09360402014-08-28 17:51:59 +0100324enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100325 ARM_SMMU_V1,
326 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100327 ARM_SMMU_V2,
328};
329
Robin Murphy67b65a32016-04-13 18:12:57 +0100330enum arm_smmu_implementation {
331 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100332 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100333 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700334 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700335 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100336};
337
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700338struct arm_smmu_impl_def_reg {
339 u32 offset;
340 u32 value;
341};
342
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700343/*
344 * attach_count
345 * The SMR and S2CR registers are only programmed when the number of
346 * devices attached to the iommu using these registers is > 0. This
347 * is required for the "SID switch" use case for secure display.
348 * Protected by stream_map_mutex.
349 */
Robin Murphya754fd12016-09-12 17:13:50 +0100350struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100351 struct iommu_group *group;
352 int count;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700353 int attach_count;
Robin Murphya754fd12016-09-12 17:13:50 +0100354 enum arm_smmu_s2cr_type type;
355 enum arm_smmu_s2cr_privcfg privcfg;
356 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700357 bool cb_handoff;
Robin Murphya754fd12016-09-12 17:13:50 +0100358};
359
360#define s2cr_init_val (struct arm_smmu_s2cr){ \
361 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700362 .cb_handoff = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100363}
364
Will Deacon45ae7cf2013-06-24 18:31:25 +0100365struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100366 u16 mask;
367 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100368 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100369};
370
Will Deacona9a1b0b2014-05-01 18:05:08 +0100371struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100372 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100373 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100374};
Robin Murphy468f4942016-09-12 17:13:49 +0100375#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100376#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
377#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000378#define fwspec_smendx(fw, i) \
379 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100380#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000381 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100382
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700383/*
384 * Describes resources required for on/off power operation.
385 * Separate reference count is provided for atomic/nonatomic
386 * operations.
387 */
388struct arm_smmu_power_resources {
389 struct platform_device *pdev;
390 struct device *dev;
391
392 struct clk **clocks;
393 int num_clocks;
394
395 struct regulator_bulk_data *gdscs;
396 int num_gdscs;
397
398 uint32_t bus_client;
399 struct msm_bus_scale_pdata *bus_dt_data;
400
401 /* Protects power_count */
402 struct mutex power_lock;
403 int power_count;
404
405 /* Protects clock_refs_count */
406 spinlock_t clock_refs_lock;
407 int clock_refs_count;
Prakash Guptafad87ca2017-05-16 12:13:02 +0530408 int regulator_defer;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700409};
410
Patrick Daly03330cc2017-08-11 14:56:38 -0700411struct arm_smmu_arch_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100412struct arm_smmu_device {
413 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100414
415 void __iomem *base;
416 unsigned long size;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530417 phys_addr_t phys_addr;
Will Deaconc757e852014-07-30 11:33:25 +0100418 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100419
420#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
421#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
422#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
423#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
424#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000425#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800426#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100427#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
428#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
429#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
430#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
431#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100432 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000433
434#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800435#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Patrick Daly59b6d202017-06-12 13:12:15 -0700436#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700437#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Patrick Daly4423d3e2017-05-04 18:17:51 -0700438#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
Patrick Dalye7069342017-07-11 12:35:55 -0700439#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
Patrick Daly62ba1922017-08-30 16:47:18 -0700440#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 6)
Patrick Daly83174c12017-10-26 12:31:15 -0700441#define ARM_SMMU_OPT_MMU500_ERRATA1 (1 << 7)
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530442#define ARM_SMMU_OPT_STATIC_CB (1 << 8)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000443 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100444 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100445 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100446
447 u32 num_context_banks;
448 u32 num_s2_context_banks;
449 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
450 atomic_t irptndx;
451
452 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100453 u16 streamid_mask;
454 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100455 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100456 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100457 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100458
Will Deacon518f7132014-11-14 17:17:54 +0000459 unsigned long va_size;
460 unsigned long ipa_size;
461 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100462 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100463
464 u32 num_global_irqs;
465 u32 num_context_irqs;
466 unsigned int *irqs;
467
Patrick Daly8e3371a2017-02-13 22:14:53 -0800468 struct list_head list;
469
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800470 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700471 /* Specific to QCOM */
472 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
473 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800474
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700475 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700476
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800477 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700478
479 /* protects idr */
480 struct mutex idr_mutex;
481 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700482
483 struct arm_smmu_arch_ops *arch_ops;
484 void *archdata;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530485
486 enum tz_smmu_device_id sec_id;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100487};
488
Robin Murphy7602b872016-04-28 17:12:09 +0100489enum arm_smmu_context_fmt {
490 ARM_SMMU_CTX_FMT_NONE,
491 ARM_SMMU_CTX_FMT_AARCH64,
492 ARM_SMMU_CTX_FMT_AARCH32_L,
493 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100494};
495
496struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100497 u8 cbndx;
498 u8 irptndx;
499 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600500 u32 procid;
501 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100502 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100503};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100504#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600505#define INVALID_CBNDX 0xff
506#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700507/*
508 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
509 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
510 */
511#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100512
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600513#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800514#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100515
Will Deaconc752ce42014-06-25 22:46:31 +0100516enum arm_smmu_domain_stage {
517 ARM_SMMU_DOMAIN_S1 = 0,
518 ARM_SMMU_DOMAIN_S2,
519 ARM_SMMU_DOMAIN_NESTED,
520};
521
Patrick Dalyc11d1082016-09-01 15:52:44 -0700522struct arm_smmu_pte_info {
523 void *virt_addr;
524 size_t size;
525 struct list_head entry;
526};
527
Will Deacon45ae7cf2013-06-24 18:31:25 +0100528struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100529 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800530 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000531 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700532 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000533 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100534 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100535 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000536 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700537 u32 attributes;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530538 bool slave_side_secure;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700539 u32 secure_vmid;
540 struct list_head pte_info_list;
541 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700542 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700543 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100544 struct iommu_domain domain;
Patrick Dalyda765c62017-09-11 16:31:07 -0700545
546 bool qsmmuv500_errata1_init;
547 bool qsmmuv500_errata1_client;
Patrick Daly23301482017-10-12 16:18:25 -0700548 bool qsmmuv500_errata2_min_align;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100549};
550
Patrick Daly8e3371a2017-02-13 22:14:53 -0800551static DEFINE_SPINLOCK(arm_smmu_devices_lock);
552static LIST_HEAD(arm_smmu_devices);
553
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000554struct arm_smmu_option_prop {
555 u32 opt;
556 const char *prop;
557};
558
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800559static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
560
Robin Murphy7e96c742016-09-14 15:26:46 +0100561static bool using_legacy_binding, using_generic_binding;
562
Mitchel Humpherys29073202014-07-08 09:52:18 -0700563static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000564 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800565 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700566 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700567 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700568 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700569 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Patrick Daly62ba1922017-08-30 16:47:18 -0700570 { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
Patrick Daly83174c12017-10-26 12:31:15 -0700571 { ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530572 { ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000573 { 0, NULL},
574};
575
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800576static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
577 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700578static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
579 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600580static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800581
Patrick Dalyc11d1082016-09-01 15:52:44 -0700582static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
583static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700584static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700585static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
586
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700587static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
588 dma_addr_t iova);
589
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800590static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
591
Patrick Dalyda688822017-05-17 20:12:48 -0700592static int arm_smmu_alloc_cb(struct iommu_domain *domain,
593 struct arm_smmu_device *smmu,
594 struct device *dev);
Patrick Dalyda765c62017-09-11 16:31:07 -0700595static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops;
Patrick Dalyda688822017-05-17 20:12:48 -0700596
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530597static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530598static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
599static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530600
Joerg Roedel1d672632015-03-26 13:43:10 +0100601static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
602{
603 return container_of(dom, struct arm_smmu_domain, domain);
604}
605
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000606static void parse_driver_options(struct arm_smmu_device *smmu)
607{
608 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700609
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000610 do {
611 if (of_property_read_bool(smmu->dev->of_node,
612 arm_smmu_options[i].prop)) {
613 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700614 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000615 arm_smmu_options[i].prop);
616 }
617 } while (arm_smmu_options[++i].opt);
618}
619
Patrick Dalyc190d932016-08-30 17:23:28 -0700620static bool is_dynamic_domain(struct iommu_domain *domain)
621{
622 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
623
624 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
625}
626
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530627static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu)
628{
629 int ret;
630 int scm_ret = 0;
631
632 if (!arm_smmu_is_static_cb(smmu))
633 return 0;
634
635 ret = scm_restore_sec_cfg(smmu->sec_id, 0x0, &scm_ret);
636 if (ret || scm_ret) {
637 pr_err("scm call IOMMU_SECURE_CFG failed\n");
638 return -EINVAL;
639 }
640
641 return 0;
642}
Liam Mark53cf2342016-12-20 11:36:07 -0800643static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
644{
645 if (smmu_domain->attributes &
646 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
647 return true;
648 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
649 return smmu_domain->smmu->dev->archdata.dma_coherent;
650 else
651 return false;
652}
653
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530654static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu)
655{
656 return smmu->options & ARM_SMMU_OPT_STATIC_CB;
657}
658
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530659static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain)
Patrick Dalye271f212016-10-04 13:24:49 -0700660{
661 return (smmu_domain->secure_vmid != VMID_INVAL);
662}
663
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530664static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain)
665{
666 return arm_smmu_has_secure_vmid(smmu_domain) &&
667 smmu_domain->slave_side_secure;
668}
669
670static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain)
671{
672 return arm_smmu_has_secure_vmid(smmu_domain)
673 && !smmu_domain->slave_side_secure;
674}
675
Patrick Dalye271f212016-10-04 13:24:49 -0700676static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
677{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530678 if (arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -0700679 mutex_lock(&smmu_domain->assign_lock);
680}
681
682static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
683{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530684 if (arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -0700685 mutex_unlock(&smmu_domain->assign_lock);
686}
687
Patrick Daly03330cc2017-08-11 14:56:38 -0700688/*
689 * init()
690 * Hook for additional device tree parsing at probe time.
691 *
692 * device_reset()
693 * Hook for one-time architecture-specific register settings.
694 *
695 * iova_to_phys_hard()
696 * Provides debug information. May be called from the context fault irq handler.
697 *
698 * init_context_bank()
699 * Hook for architecture-specific settings which require knowledge of the
700 * dynamically allocated context bank number.
701 *
702 * device_group()
703 * Hook for checking whether a device is compatible with a said group.
704 */
705struct arm_smmu_arch_ops {
706 int (*init)(struct arm_smmu_device *smmu);
707 void (*device_reset)(struct arm_smmu_device *smmu);
708 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
709 dma_addr_t iova);
710 void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
711 struct device *dev);
712 int (*device_group)(struct device *dev, struct iommu_group *group);
713};
714
715static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
716{
717 if (!smmu->arch_ops)
718 return 0;
719 if (!smmu->arch_ops->init)
720 return 0;
721 return smmu->arch_ops->init(smmu);
722}
723
724static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
725{
726 if (!smmu->arch_ops)
727 return;
728 if (!smmu->arch_ops->device_reset)
729 return;
730 return smmu->arch_ops->device_reset(smmu);
731}
732
733static void arm_smmu_arch_init_context_bank(
734 struct arm_smmu_domain *smmu_domain, struct device *dev)
735{
736 struct arm_smmu_device *smmu = smmu_domain->smmu;
737
738 if (!smmu->arch_ops)
739 return;
740 if (!smmu->arch_ops->init_context_bank)
741 return;
742 return smmu->arch_ops->init_context_bank(smmu_domain, dev);
743}
744
745static int arm_smmu_arch_device_group(struct device *dev,
746 struct iommu_group *group)
747{
748 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
749 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
750
751 if (!smmu->arch_ops)
752 return 0;
753 if (!smmu->arch_ops->device_group)
754 return 0;
755 return smmu->arch_ops->device_group(dev, group);
756}
757
Will Deacon8f68f8e2014-07-15 11:27:08 +0100758static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100759{
760 if (dev_is_pci(dev)) {
761 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700762
Will Deacona9a1b0b2014-05-01 18:05:08 +0100763 while (!pci_is_root_bus(bus))
764 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100765 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100766 }
767
Robin Murphyd5b41782016-09-14 15:21:39 +0100768 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100769}
770
Robin Murphyd5b41782016-09-14 15:21:39 +0100771static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100772{
Robin Murphyd5b41782016-09-14 15:21:39 +0100773 *((__be32 *)data) = cpu_to_be32(alias);
774 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100775}
776
Robin Murphyd5b41782016-09-14 15:21:39 +0100777static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100778{
Robin Murphyd5b41782016-09-14 15:21:39 +0100779 struct of_phandle_iterator *it = *(void **)data;
780 struct device_node *np = it->node;
781 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100782
Robin Murphyd5b41782016-09-14 15:21:39 +0100783 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
784 "#stream-id-cells", 0)
785 if (it->node == np) {
786 *(void **)data = dev;
787 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700788 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100789 it->node = np;
790 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100791}
792
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100793static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100794static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100795
Robin Murphy06e393e2016-09-12 17:13:55 +0100796static int arm_smmu_register_legacy_master(struct device *dev,
797 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100798{
Robin Murphy06e393e2016-09-12 17:13:55 +0100799 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100800 struct device_node *np;
801 struct of_phandle_iterator it;
802 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100803 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100804 __be32 pci_sid;
805 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100806
Stephen Boydfecdeef2017-03-01 16:53:19 -0800807 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100808 np = dev_get_dev_node(dev);
809 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
810 of_node_put(np);
811 return -ENODEV;
812 }
813
814 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100815 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
816 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100817 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100818 of_node_put(np);
819 if (err == 0)
820 return -ENODEV;
821 if (err < 0)
822 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100823
Robin Murphyd5b41782016-09-14 15:21:39 +0100824 if (dev_is_pci(dev)) {
825 /* "mmu-masters" assumes Stream ID == Requester ID */
826 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
827 &pci_sid);
828 it.cur = &pci_sid;
829 it.cur_count = 1;
830 }
831
Robin Murphy06e393e2016-09-12 17:13:55 +0100832 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
833 &arm_smmu_ops);
834 if (err)
835 return err;
836
837 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
838 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100839 return -ENOMEM;
840
Robin Murphy06e393e2016-09-12 17:13:55 +0100841 *smmu = dev_get_drvdata(smmu_dev);
842 of_phandle_iterator_args(&it, sids, it.cur_count);
843 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
844 kfree(sids);
845 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100846}
847
848static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
849{
850 int idx;
851
852 do {
853 idx = find_next_zero_bit(map, end, start);
854 if (idx == end)
855 return -ENOSPC;
856 } while (test_and_set_bit(idx, map));
857
858 return idx;
859}
860
861static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
862{
863 clear_bit(idx, map);
864}
865
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700866static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700867{
868 int i, ret = 0;
869
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700870 for (i = 0; i < pwr->num_clocks; ++i) {
871 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700872 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700873 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700874 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700875 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700876 break;
877 }
878 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700879 return ret;
880}
881
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700882static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700883{
884 int i;
885
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700886 for (i = pwr->num_clocks; i; --i)
887 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700888}
889
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700890static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700891{
892 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700893
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700894 for (i = 0; i < pwr->num_clocks; ++i) {
895 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700896 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700897 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700898 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700899 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700900 break;
901 }
902 }
903
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700904 return ret;
905}
Patrick Daly8befb662016-08-17 20:03:28 -0700906
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700907static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
908{
909 int i;
910
911 for (i = pwr->num_clocks; i; --i)
912 clk_disable(pwr->clocks[i - 1]);
913}
914
915static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
916{
917 if (!pwr->bus_client)
918 return 0;
919 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
920}
921
922static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
923{
924 if (!pwr->bus_client)
925 return;
926 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
927}
928
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700929static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
930{
931 struct regulator_bulk_data *consumers;
932 int num_consumers, ret;
933 int i;
934
935 num_consumers = pwr->num_gdscs;
936 consumers = pwr->gdscs;
937 for (i = 0; i < num_consumers; i++) {
938 ret = regulator_enable(consumers[i].consumer);
939 if (ret)
940 goto out;
941 }
942 return 0;
943
944out:
945 i -= 1;
946 for (; i >= 0; i--)
947 regulator_disable(consumers[i].consumer);
948 return ret;
949}
950
Prakash Guptafad87ca2017-05-16 12:13:02 +0530951static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
952{
953 struct regulator_bulk_data *consumers;
954 int i;
955 int num_consumers, ret, r;
956
957 num_consumers = pwr->num_gdscs;
958 consumers = pwr->gdscs;
959 for (i = num_consumers - 1; i >= 0; --i) {
960 ret = regulator_disable_deferred(consumers[i].consumer,
961 pwr->regulator_defer);
962 if (ret != 0)
963 goto err;
964 }
965
966 return 0;
967
968err:
969 pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
970 for (++i; i < num_consumers; ++i) {
971 r = regulator_enable(consumers[i].consumer);
972 if (r != 0)
973 pr_err("Failed to reename %s: %d\n",
974 consumers[i].supply, r);
975 }
976
977 return ret;
978}
979
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700980/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
981static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
982{
983 int ret = 0;
984 unsigned long flags;
985
986 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
987 if (pwr->clock_refs_count > 0) {
988 pwr->clock_refs_count++;
989 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
990 return 0;
991 }
992
993 ret = arm_smmu_enable_clocks(pwr);
994 if (!ret)
995 pwr->clock_refs_count = 1;
996
997 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700998 return ret;
999}
1000
1001/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001002static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001003{
Patrick Daly8befb662016-08-17 20:03:28 -07001004 unsigned long flags;
1005
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001006 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
1007 if (pwr->clock_refs_count == 0) {
1008 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
1009 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
1010 return;
1011
1012 } else if (pwr->clock_refs_count > 1) {
1013 pwr->clock_refs_count--;
1014 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001015 return;
1016 }
1017
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001018 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001019
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001020 pwr->clock_refs_count = 0;
1021 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001022}
1023
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001024static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001025{
1026 int ret;
1027
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001028 mutex_lock(&pwr->power_lock);
1029 if (pwr->power_count > 0) {
1030 pwr->power_count += 1;
1031 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001032 return 0;
1033 }
1034
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001035 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001036 if (ret)
1037 goto out_unlock;
1038
Patrick Dalyb26f97c2017-08-11 15:24:20 -07001039 ret = arm_smmu_enable_regulators(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001040 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001041 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001042
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001043 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07001044 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001045 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -07001046
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001047 pwr->power_count = 1;
1048 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001049 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001050
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001051out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001052 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001053out_disable_bus:
1054 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001055out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001056 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001057 return ret;
1058}
1059
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001060static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001061{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001062 mutex_lock(&pwr->power_lock);
1063 if (pwr->power_count == 0) {
1064 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
1065 mutex_unlock(&pwr->power_lock);
1066 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001067
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001068 } else if (pwr->power_count > 1) {
1069 pwr->power_count--;
1070 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001071 return;
1072 }
1073
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001074 arm_smmu_unprepare_clocks(pwr);
Prakash Guptafad87ca2017-05-16 12:13:02 +05301075 arm_smmu_disable_regulators(pwr);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001076 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -07001077 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001078 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001079}
1080
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001081static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001082{
1083 int ret;
1084
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001085 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001086 if (ret)
1087 return ret;
1088
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001089 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001090 if (ret)
1091 goto out_disable;
1092
1093 return 0;
1094
1095out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001096 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001097 return ret;
1098}
1099
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001100static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001101{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001102 arm_smmu_power_off_atomic(pwr);
1103 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001104}
1105
1106/*
1107 * Must be used instead of arm_smmu_power_on if it may be called from
1108 * atomic context
1109 */
1110static int arm_smmu_domain_power_on(struct iommu_domain *domain,
1111 struct arm_smmu_device *smmu)
1112{
1113 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1114 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1115
1116 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001117 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001118
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001119 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001120}
1121
1122/*
1123 * Must be used instead of arm_smmu_power_on if it may be called from
1124 * atomic context
1125 */
1126static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1127 struct arm_smmu_device *smmu)
1128{
1129 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1130 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1131
1132 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001133 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001134 return;
1135 }
1136
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001137 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001138}
1139
Will Deacon45ae7cf2013-06-24 18:31:25 +01001140/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001141static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1142 int cbndx)
1143{
1144 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1145 u32 val;
1146
1147 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1148 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1149 !(val & TLBSTATUS_SACTIVE),
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301150 0, TLB_LOOP_TIMEOUT)) {
1151 trace_tlbsync_timeout(smmu->dev, 0);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001152 dev_err(smmu->dev, "TLBSYNC timeout!\n");
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301153 }
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001154}
1155
Will Deacon518f7132014-11-14 17:17:54 +00001156static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001157{
1158 int count = 0;
1159 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1160
1161 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1162 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1163 & sTLBGSTATUS_GSACTIVE) {
1164 cpu_relax();
1165 if (++count == TLB_LOOP_TIMEOUT) {
1166 dev_err_ratelimited(smmu->dev,
1167 "TLB sync timed out -- SMMU may be deadlocked\n");
1168 return;
1169 }
1170 udelay(1);
1171 }
1172}
1173
Will Deacon518f7132014-11-14 17:17:54 +00001174static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001175{
Will Deacon518f7132014-11-14 17:17:54 +00001176 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001177 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001178}
1179
Patrick Daly8befb662016-08-17 20:03:28 -07001180/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001181static void arm_smmu_tlb_inv_context(void *cookie)
1182{
1183 struct arm_smmu_domain *smmu_domain = cookie;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301184 struct device *dev = smmu_domain->dev;
Will Deacon44680ee2014-06-25 11:29:12 +01001185 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1186 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001187 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001188 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -07001189 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301190 ktime_t cur = ktime_get();
1191
1192 trace_tlbi_start(dev, 0);
Will Deacon1463fe42013-07-31 19:21:27 +01001193
Patrick Dalye7069342017-07-11 12:35:55 -07001194 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001195 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001196 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001197 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001198 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001199 } else if (stage1 && use_tlbiall) {
1200 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1201 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1202 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001203 } else {
1204 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001205 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001206 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001207 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001208 }
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301209
1210 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Will Deacon1463fe42013-07-31 19:21:27 +01001211}
1212
Will Deacon518f7132014-11-14 17:17:54 +00001213static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001214 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001215{
1216 struct arm_smmu_domain *smmu_domain = cookie;
1217 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1218 struct arm_smmu_device *smmu = smmu_domain->smmu;
1219 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1220 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001221 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001222
Patrick Dalye7069342017-07-11 12:35:55 -07001223 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001224 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1225 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1226
Robin Murphy7602b872016-04-28 17:12:09 +01001227 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001228 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001229 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001230 do {
1231 writel_relaxed(iova, reg);
1232 iova += granule;
1233 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001234 } else {
1235 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001236 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001237 do {
1238 writeq_relaxed(iova, reg);
1239 iova += granule >> 12;
1240 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001241 }
Patrick Dalye7069342017-07-11 12:35:55 -07001242 } else if (stage1 && use_tlbiall) {
1243 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1244 reg += ARM_SMMU_CB_S1_TLBIALL;
1245 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001246 } else if (smmu->version == ARM_SMMU_V2) {
1247 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1248 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1249 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001250 iova >>= 12;
1251 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001252 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001253 iova += granule >> 12;
1254 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001255 } else {
1256 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001257 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001258 }
1259}
1260
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001261struct arm_smmu_secure_pool_chunk {
1262 void *addr;
1263 size_t size;
1264 struct list_head list;
1265};
1266
1267static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1268 size_t size)
1269{
1270 struct arm_smmu_secure_pool_chunk *it;
1271
1272 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1273 if (it->size == size) {
1274 void *addr = it->addr;
1275
1276 list_del(&it->list);
1277 kfree(it);
1278 return addr;
1279 }
1280 }
1281
1282 return NULL;
1283}
1284
1285static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1286 void *addr, size_t size)
1287{
1288 struct arm_smmu_secure_pool_chunk *chunk;
1289
1290 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1291 if (!chunk)
1292 return -ENOMEM;
1293
1294 chunk->addr = addr;
1295 chunk->size = size;
1296 memset(addr, 0, size);
1297 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1298
1299 return 0;
1300}
1301
1302static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1303{
1304 struct arm_smmu_secure_pool_chunk *it, *i;
1305
1306 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1307 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1308 /* pages will be freed later (after being unassigned) */
Prakash Gupta8e827be2017-10-04 12:37:11 +05301309 list_del(&it->list);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001310 kfree(it);
1311 }
1312}
1313
Patrick Dalyc11d1082016-09-01 15:52:44 -07001314static void *arm_smmu_alloc_pages_exact(void *cookie,
1315 size_t size, gfp_t gfp_mask)
1316{
1317 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001318 void *page;
1319 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001320
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301321 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001322 return alloc_pages_exact(size, gfp_mask);
1323
1324 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1325 if (page)
1326 return page;
1327
1328 page = alloc_pages_exact(size, gfp_mask);
1329 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001330 ret = arm_smmu_prepare_pgtable(page, cookie);
1331 if (ret) {
1332 free_pages_exact(page, size);
1333 return NULL;
1334 }
1335 }
1336
1337 return page;
1338}
1339
1340static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1341{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001342 struct arm_smmu_domain *smmu_domain = cookie;
1343
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301344 if (!arm_smmu_is_master_side_secure(smmu_domain)) {
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001345 free_pages_exact(virt, size);
1346 return;
1347 }
1348
1349 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1350 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001351}
1352
Will Deacon518f7132014-11-14 17:17:54 +00001353static struct iommu_gather_ops arm_smmu_gather_ops = {
1354 .tlb_flush_all = arm_smmu_tlb_inv_context,
1355 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1356 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001357 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1358 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001359};
1360
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001361static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1362 dma_addr_t iova, u32 fsr)
1363{
1364 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001365 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyda765c62017-09-11 16:31:07 -07001366 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001367 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001368 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001369
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001370 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyda765c62017-09-11 16:31:07 -07001371 tlb->tlb_flush_all(smmu_domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001372 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001373
Patrick Dalyad441dd2016-09-15 15:50:46 -07001374 if (phys != phys_post_tlbiall) {
1375 dev_err(smmu->dev,
1376 "ATOS results differed across TLBIALL...\n"
1377 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1378 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001379
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001380 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001381}
1382
Will Deacon45ae7cf2013-06-24 18:31:25 +01001383static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1384{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001385 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001386 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001387 unsigned long iova;
1388 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001389 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001390 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1391 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001392 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001393 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001394 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001395 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001396 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001397 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001398 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001399
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001400 static DEFINE_RATELIMIT_STATE(_rs,
1401 DEFAULT_RATELIMIT_INTERVAL,
1402 DEFAULT_RATELIMIT_BURST);
1403
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001404 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001405 if (ret)
1406 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001407
Shalaj Jain04059c52015-03-03 13:34:59 -08001408 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001409 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001410 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1411
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001412 if (!(fsr & FSR_FAULT)) {
1413 ret = IRQ_NONE;
1414 goto out_power_off;
1415 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001416
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001417 if (fatal_asf && (fsr & FSR_ASF)) {
1418 dev_err(smmu->dev,
1419 "Took an address size fault. Refusing to recover.\n");
1420 BUG();
1421 }
1422
Will Deacon45ae7cf2013-06-24 18:31:25 +01001423 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001424 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001425 if (fsr & FSR_TF)
1426 flags |= IOMMU_FAULT_TRANSLATION;
1427 if (fsr & FSR_PF)
1428 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001429 if (fsr & FSR_EF)
1430 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001431 if (fsr & FSR_SS)
1432 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001433
Robin Murphyf9a05f02016-04-13 18:13:01 +01001434 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001435 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001436 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1437 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001438 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1439 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001440 dev_dbg(smmu->dev,
1441 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1442 iova, fsr, fsynr, cfg->cbndx);
1443 dev_dbg(smmu->dev,
1444 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001445 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001446 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001447 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001448 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1449 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001450 if (__ratelimit(&_rs)) {
1451 dev_err(smmu->dev,
1452 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1453 iova, fsr, fsynr, cfg->cbndx);
1454 dev_err(smmu->dev, "FAR = %016lx\n",
1455 (unsigned long)iova);
1456 dev_err(smmu->dev,
1457 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1458 fsr,
1459 (fsr & 0x02) ? "TF " : "",
1460 (fsr & 0x04) ? "AFF " : "",
1461 (fsr & 0x08) ? "PF " : "",
1462 (fsr & 0x10) ? "EF " : "",
1463 (fsr & 0x20) ? "TLBMCF " : "",
1464 (fsr & 0x40) ? "TLBLKF " : "",
1465 (fsr & 0x80) ? "MHF " : "",
1466 (fsr & 0x40000000) ? "SS " : "",
1467 (fsr & 0x80000000) ? "MULTI " : "");
1468 dev_err(smmu->dev,
1469 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001470 if (!phys_soft)
1471 dev_err(smmu->dev,
1472 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1473 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001474 if (phys_atos)
1475 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1476 &phys_atos);
1477 else
1478 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001479 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1480 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001481 ret = IRQ_NONE;
1482 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001483 if (!non_fatal_fault) {
1484 dev_err(smmu->dev,
1485 "Unhandled arm-smmu context fault!\n");
1486 BUG();
1487 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001488 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001489
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001490 /*
1491 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1492 * if stalled. This is required to keep the IOMMU client stalled on
1493 * the outstanding fault. This gives the client a chance to take any
1494 * debug action and then terminate the stalled transaction.
1495 * So, the sequence in case of stall on fault should be:
1496 * 1) Do not clear FSR or write to RESUME here
1497 * 2) Client takes any debug action
1498 * 3) Client terminates the stalled transaction and resumes the IOMMU
1499 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1500 * not before so that the fault remains outstanding. This ensures
1501 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1502 * need to be terminated.
1503 */
1504 if (tmp != -EBUSY) {
1505 /* Clear the faulting FSR */
1506 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001507
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001508 /*
1509 * Barrier required to ensure that the FSR is cleared
1510 * before resuming SMMU operation
1511 */
1512 wmb();
1513
1514 /* Retry or terminate any stalled transactions */
1515 if (fsr & FSR_SS)
1516 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1517 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001518
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001519out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001520 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001521
Patrick Daly5ba28112016-08-30 19:18:52 -07001522 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001523}
1524
1525static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1526{
1527 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1528 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001529 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001530
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001531 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001532 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001533
Will Deacon45ae7cf2013-06-24 18:31:25 +01001534 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1535 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1536 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1537 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1538
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001539 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001540 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001541 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001542 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001543
Will Deacon45ae7cf2013-06-24 18:31:25 +01001544 dev_err_ratelimited(smmu->dev,
1545 "Unexpected global fault, this could be serious\n");
1546 dev_err_ratelimited(smmu->dev,
1547 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1548 gfsr, gfsynr0, gfsynr1, gfsynr2);
1549
1550 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001551 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001552 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001553}
1554
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301555static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain,
1556 struct io_pgtable_cfg *pgtbl_cfg)
1557{
1558 struct arm_smmu_device *smmu = smmu_domain->smmu;
1559 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1560 int ret = 0;
1561
1562 if ((smmu->version > ARM_SMMU_V1) &&
1563 (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) &&
1564 !arm_smmu_has_secure_vmid(smmu_domain) &&
1565 arm_smmu_is_static_cb(smmu)) {
1566 ret = msm_tz_set_cb_format(smmu->sec_id, cfg->cbndx);
1567 }
1568 return ret;
1569}
1570
Will Deacon518f7132014-11-14 17:17:54 +00001571static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1572 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001573{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001574 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001575 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001576 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001577 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1578 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001579 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001580
Will Deacon45ae7cf2013-06-24 18:31:25 +01001581 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001582 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1583 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001584
Will Deacon4a1c93c2015-03-04 12:21:03 +00001585 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001586 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1587 reg = CBA2R_RW64_64BIT;
1588 else
1589 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001590 /* 16-bit VMIDs live in CBA2R */
1591 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001592 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001593
Will Deacon4a1c93c2015-03-04 12:21:03 +00001594 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1595 }
1596
Will Deacon45ae7cf2013-06-24 18:31:25 +01001597 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001598 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001599 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001600 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001601
Will Deacon57ca90f2014-02-06 14:59:05 +00001602 /*
1603 * Use the weakest shareability/memory types, so they are
1604 * overridden by the ttbcr/pte.
1605 */
1606 if (stage1) {
1607 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1608 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001609 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1610 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001611 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001612 }
Will Deacon44680ee2014-06-25 11:29:12 +01001613 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001614
Will Deacon518f7132014-11-14 17:17:54 +00001615 /* TTBRs */
1616 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001617 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001618
Robin Murphyb94df6f2016-08-11 17:44:06 +01001619 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1620 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1621 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1622 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1623 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1624 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1625 } else {
1626 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1627 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1628 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1629 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1630 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1631 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1632 }
Will Deacon518f7132014-11-14 17:17:54 +00001633 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001634 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001635 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001636 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001637
Will Deacon518f7132014-11-14 17:17:54 +00001638 /* TTBCR */
1639 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001640 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1641 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1642 reg2 = 0;
1643 } else {
1644 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1645 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1646 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001647 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001648 if (smmu->version > ARM_SMMU_V1)
1649 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001650 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001651 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001652 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001653 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001654
Will Deacon518f7132014-11-14 17:17:54 +00001655 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001656 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001657 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1658 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1659 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1660 } else {
1661 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1662 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1663 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001664 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001665 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001666 }
1667
Will Deacon45ae7cf2013-06-24 18:31:25 +01001668 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001669 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001670
Patrick Daly7f377fe2017-10-06 17:37:10 -07001671 /* Ensure bypass transactions are Non-shareable */
1672 reg |= SCTLR_SHCFG_NSH << SCTLR_SHCFG_SHIFT;
1673
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301674 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
1675 reg &= ~SCTLR_CFCFG;
1676 reg |= SCTLR_HUPCF;
1677 }
1678
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001679 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1680 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1681 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001682 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001683 if (stage1)
1684 reg |= SCTLR_S1_ASIDPNE;
1685#ifdef __BIG_ENDIAN
1686 reg |= SCTLR_E;
1687#endif
Will Deacon25724842013-08-21 13:49:53 +01001688 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001689}
1690
Patrick Dalyc190d932016-08-30 17:23:28 -07001691static int arm_smmu_init_asid(struct iommu_domain *domain,
1692 struct arm_smmu_device *smmu)
1693{
1694 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1695 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1696 bool dynamic = is_dynamic_domain(domain);
1697 int ret;
1698
1699 if (!dynamic) {
1700 cfg->asid = cfg->cbndx + 1;
1701 } else {
1702 mutex_lock(&smmu->idr_mutex);
1703 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1704 smmu->num_context_banks + 2,
1705 MAX_ASID + 1, GFP_KERNEL);
1706
1707 mutex_unlock(&smmu->idr_mutex);
1708 if (ret < 0) {
1709 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1710 ret);
1711 return ret;
1712 }
1713 cfg->asid = ret;
1714 }
1715 return 0;
1716}
1717
1718static void arm_smmu_free_asid(struct iommu_domain *domain)
1719{
1720 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1721 struct arm_smmu_device *smmu = smmu_domain->smmu;
1722 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1723 bool dynamic = is_dynamic_domain(domain);
1724
1725 if (cfg->asid == INVALID_ASID || !dynamic)
1726 return;
1727
1728 mutex_lock(&smmu->idr_mutex);
1729 idr_remove(&smmu->asid_idr, cfg->asid);
1730 mutex_unlock(&smmu->idr_mutex);
1731}
1732
Will Deacon45ae7cf2013-06-24 18:31:25 +01001733static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001734 struct arm_smmu_device *smmu,
1735 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001736{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001737 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001738 unsigned long ias, oas;
1739 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001740 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001741 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001742 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001743 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001744 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001745 bool dynamic;
Patrick Dalyda765c62017-09-11 16:31:07 -07001746 const struct iommu_gather_ops *tlb;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001747
Will Deacon518f7132014-11-14 17:17:54 +00001748 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001749 if (smmu_domain->smmu)
1750 goto out_unlock;
1751
Patrick Dalyc190d932016-08-30 17:23:28 -07001752 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1753 smmu_domain->cfg.asid = INVALID_ASID;
1754
Patrick Dalyc190d932016-08-30 17:23:28 -07001755 dynamic = is_dynamic_domain(domain);
1756 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1757 dev_err(smmu->dev, "dynamic domains not supported\n");
1758 ret = -EPERM;
1759 goto out_unlock;
1760 }
1761
Will Deaconc752ce42014-06-25 22:46:31 +01001762 /*
1763 * Mapping the requested stage onto what we support is surprisingly
1764 * complicated, mainly because the spec allows S1+S2 SMMUs without
1765 * support for nested translation. That means we end up with the
1766 * following table:
1767 *
1768 * Requested Supported Actual
1769 * S1 N S1
1770 * S1 S1+S2 S1
1771 * S1 S2 S2
1772 * S1 S1 S1
1773 * N N N
1774 * N S1+S2 S2
1775 * N S2 S2
1776 * N S1 S1
1777 *
1778 * Note that you can't actually request stage-2 mappings.
1779 */
1780 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1781 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1782 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1783 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1784
Robin Murphy7602b872016-04-28 17:12:09 +01001785 /*
1786 * Choosing a suitable context format is even more fiddly. Until we
1787 * grow some way for the caller to express a preference, and/or move
1788 * the decision into the io-pgtable code where it arguably belongs,
1789 * just aim for the closest thing to the rest of the system, and hope
1790 * that the hardware isn't esoteric enough that we can't assume AArch64
1791 * support to be a superset of AArch32 support...
1792 */
1793 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1794 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001795 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1796 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1797 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1798 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1799 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001800 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1801 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1802 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1803 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1804 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1805
1806 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1807 ret = -EINVAL;
1808 goto out_unlock;
1809 }
1810
Will Deaconc752ce42014-06-25 22:46:31 +01001811 switch (smmu_domain->stage) {
1812 case ARM_SMMU_DOMAIN_S1:
1813 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1814 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001815 ias = smmu->va_size;
1816 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001817 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001818 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001819 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1820 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001821 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001822 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001823 ias = min(ias, 32UL);
1824 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001825 } else {
1826 fmt = ARM_V7S;
1827 ias = min(ias, 32UL);
1828 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001829 }
Will Deaconc752ce42014-06-25 22:46:31 +01001830 break;
1831 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001832 /*
1833 * We will likely want to change this if/when KVM gets
1834 * involved.
1835 */
Will Deaconc752ce42014-06-25 22:46:31 +01001836 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001837 cfg->cbar = CBAR_TYPE_S2_TRANS;
1838 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001839 ias = smmu->ipa_size;
1840 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001841 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001842 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001843 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001844 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001845 ias = min(ias, 40UL);
1846 oas = min(oas, 40UL);
1847 }
Will Deaconc752ce42014-06-25 22:46:31 +01001848 break;
1849 default:
1850 ret = -EINVAL;
1851 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001852 }
1853
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001854 if (is_fast)
1855 fmt = ARM_V8L_FAST;
1856
Patrick Dalyce6786f2016-11-09 14:19:23 -08001857 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1858 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001859 if (is_iommu_pt_coherent(smmu_domain))
1860 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Patrick Daly49ccf332017-09-27 15:10:29 -07001861 if ((quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
1862 (smmu->model == QCOM_SMMUV500))
1863 quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001864
Patrick Dalyda765c62017-09-11 16:31:07 -07001865 tlb = &arm_smmu_gather_ops;
Patrick Daly83174c12017-10-26 12:31:15 -07001866 if (smmu->options & ARM_SMMU_OPT_MMU500_ERRATA1)
Patrick Dalyda765c62017-09-11 16:31:07 -07001867 tlb = &qsmmuv500_errata1_smmu_gather_ops;
1868
Patrick Dalyda688822017-05-17 20:12:48 -07001869 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1870 if (ret < 0)
1871 goto out_unlock;
1872 cfg->cbndx = ret;
1873
Robin Murphyb7862e32016-04-13 18:13:03 +01001874 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001875 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1876 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001877 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001878 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001879 }
1880
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301881 if (arm_smmu_is_slave_side_secure(smmu_domain)) {
1882 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
1883 .quirks = quirks,
1884 .pgsize_bitmap = smmu->pgsize_bitmap,
1885 .arm_msm_secure_cfg = {
1886 .sec_id = smmu->sec_id,
1887 .cbndx = cfg->cbndx,
1888 },
1889 .iommu_dev = smmu->dev,
1890 };
1891 fmt = ARM_MSM_SECURE;
1892 } else {
1893 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
1894 .quirks = quirks,
1895 .pgsize_bitmap = smmu->pgsize_bitmap,
1896 .ias = ias,
1897 .oas = oas,
1898 .tlb = tlb,
1899 .iommu_dev = smmu->dev,
1900 };
1901 }
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001902
Will Deacon518f7132014-11-14 17:17:54 +00001903 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001904 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001905 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1906 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001907 if (!pgtbl_ops) {
1908 ret = -ENOMEM;
1909 goto out_clear_smmu;
1910 }
1911
Patrick Dalyc11d1082016-09-01 15:52:44 -07001912 /*
1913 * assign any page table memory that might have been allocated
1914 * during alloc_io_pgtable_ops
1915 */
Patrick Dalye271f212016-10-04 13:24:49 -07001916 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001917 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001918 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001919
Robin Murphyd5466352016-05-09 17:20:09 +01001920 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001921 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001922 domain->geometry.aperture_end = (1UL << ias) - 1;
1923 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001924
Patrick Dalyc190d932016-08-30 17:23:28 -07001925 /* Assign an asid */
1926 ret = arm_smmu_init_asid(domain, smmu);
1927 if (ret)
1928 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001929
Patrick Dalyc190d932016-08-30 17:23:28 -07001930 if (!dynamic) {
1931 /* Initialise the context bank with our page table cfg */
1932 arm_smmu_init_context_bank(smmu_domain,
1933 &smmu_domain->pgtbl_cfg);
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301934 /* for slave side secure, we may have to force the pagetable
1935 * format to V8L.
1936 */
1937 ret = arm_smmu_set_pt_format(smmu_domain,
1938 &smmu_domain->pgtbl_cfg);
1939 if (ret)
1940 goto out_clear_smmu;
Patrick Dalyc190d932016-08-30 17:23:28 -07001941
Patrick Daly03330cc2017-08-11 14:56:38 -07001942 arm_smmu_arch_init_context_bank(smmu_domain, dev);
1943
Patrick Dalyc190d932016-08-30 17:23:28 -07001944 /*
1945 * Request context fault interrupt. Do this last to avoid the
1946 * handler seeing a half-initialised domain state.
1947 */
1948 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1949 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001950 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1951 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001952 if (ret < 0) {
1953 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1954 cfg->irptndx, irq);
1955 cfg->irptndx = INVALID_IRPTNDX;
1956 goto out_clear_smmu;
1957 }
1958 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001959 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001960 }
Will Deacon518f7132014-11-14 17:17:54 +00001961 mutex_unlock(&smmu_domain->init_mutex);
1962
1963 /* Publish page table ops for map/unmap */
1964 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001965 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001966
Will Deacon518f7132014-11-14 17:17:54 +00001967out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001968 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001969 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001970out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001971 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972 return ret;
1973}
1974
Patrick Daly77db4f92016-10-14 15:34:10 -07001975static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1976{
1977 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1978 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1979 smmu_domain->secure_vmid = VMID_INVAL;
1980}
1981
Will Deacon45ae7cf2013-06-24 18:31:25 +01001982static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1983{
Joerg Roedel1d672632015-03-26 13:43:10 +01001984 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001985 struct arm_smmu_device *smmu = smmu_domain->smmu;
1986 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001987 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001988 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001989 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001990 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001991
Robin Murphy7e96c742016-09-14 15:26:46 +01001992 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001993 return;
1994
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001995 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001996 if (ret) {
1997 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1998 smmu);
1999 return;
2000 }
2001
Patrick Dalyc190d932016-08-30 17:23:28 -07002002 dynamic = is_dynamic_domain(domain);
2003 if (dynamic) {
2004 arm_smmu_free_asid(domain);
2005 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002006 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07002007 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002008 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002009 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002010 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07002011 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07002012 return;
2013 }
2014
Will Deacon518f7132014-11-14 17:17:54 +00002015 /*
2016 * Disable the context bank and free the page tables before freeing
2017 * it.
2018 */
Will Deacon44680ee2014-06-25 11:29:12 +01002019 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01002020 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01002021
Will Deacon44680ee2014-06-25 11:29:12 +01002022 if (cfg->irptndx != INVALID_IRPTNDX) {
2023 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08002024 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002025 }
2026
Markus Elfring44830b02015-11-06 18:32:41 +01002027 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07002028 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002029 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002030 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002031 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01002032 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002033
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002034 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07002035 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002036}
2037
Joerg Roedel1d672632015-03-26 13:43:10 +01002038static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002039{
2040 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002041
Patrick Daly09801312016-08-29 17:02:52 -07002042 /* Do not support DOMAIN_DMA for now */
2043 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01002044 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002045 /*
2046 * Allocate the domain and initialise some of its data structures.
2047 * We can't really do anything meaningful until we've added a
2048 * master.
2049 */
2050 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
2051 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01002052 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002053
Robin Murphy7e96c742016-09-14 15:26:46 +01002054 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
2055 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00002056 kfree(smmu_domain);
2057 return NULL;
2058 }
2059
Will Deacon518f7132014-11-14 17:17:54 +00002060 mutex_init(&smmu_domain->init_mutex);
2061 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002062 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
2063 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07002064 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002065 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07002066 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01002067
2068 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002069}
2070
Joerg Roedel1d672632015-03-26 13:43:10 +01002071static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002072{
Joerg Roedel1d672632015-03-26 13:43:10 +01002073 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01002074
2075 /*
2076 * Free the domain resources. We assume that all devices have
2077 * already been detached.
2078 */
Robin Murphy9adb9592016-01-26 18:06:36 +00002079 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002080 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002081 kfree(smmu_domain);
2082}
2083
Robin Murphy468f4942016-09-12 17:13:49 +01002084static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
2085{
2086 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01002087 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01002088
2089 if (smr->valid)
2090 reg |= SMR_VALID;
2091 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2092}
2093
Robin Murphya754fd12016-09-12 17:13:50 +01002094static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
2095{
2096 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
2097 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
2098 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
Patrick Daly7f377fe2017-10-06 17:37:10 -07002099 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT |
2100 S2CR_SHCFG_NSH << S2CR_SHCFG_SHIFT;
Robin Murphya754fd12016-09-12 17:13:50 +01002101
2102 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2103}
2104
2105static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
2106{
2107 arm_smmu_write_s2cr(smmu, idx);
2108 if (smmu->smrs)
2109 arm_smmu_write_smr(smmu, idx);
2110}
2111
Robin Murphy6668f692016-09-12 17:13:54 +01002112static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01002113{
2114 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01002115 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002116
Robin Murphy6668f692016-09-12 17:13:54 +01002117 /* Stream indexing is blissfully easy */
2118 if (!smrs)
2119 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01002120
Robin Murphy6668f692016-09-12 17:13:54 +01002121 /* Validating SMRs is... less so */
2122 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2123 if (!smrs[i].valid) {
2124 /*
2125 * Note the first free entry we come across, which
2126 * we'll claim in the end if nothing else matches.
2127 */
2128 if (free_idx < 0)
2129 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01002130 continue;
2131 }
Robin Murphy6668f692016-09-12 17:13:54 +01002132 /*
2133 * If the new entry is _entirely_ matched by an existing entry,
2134 * then reuse that, with the guarantee that there also cannot
2135 * be any subsequent conflicting entries. In normal use we'd
2136 * expect simply identical entries for this case, but there's
2137 * no harm in accommodating the generalisation.
2138 */
2139 if ((mask & smrs[i].mask) == mask &&
2140 !((id ^ smrs[i].id) & ~smrs[i].mask))
2141 return i;
2142 /*
2143 * If the new entry has any other overlap with an existing one,
2144 * though, then there always exists at least one stream ID
2145 * which would cause a conflict, and we can't allow that risk.
2146 */
2147 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
2148 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002149 }
2150
Robin Murphy6668f692016-09-12 17:13:54 +01002151 return free_idx;
2152}
2153
2154static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
2155{
2156 if (--smmu->s2crs[idx].count)
2157 return false;
2158
2159 smmu->s2crs[idx] = s2cr_init_val;
2160 if (smmu->smrs)
2161 smmu->smrs[idx].valid = false;
2162
2163 return true;
2164}
2165
2166static int arm_smmu_master_alloc_smes(struct device *dev)
2167{
Robin Murphy06e393e2016-09-12 17:13:55 +01002168 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2169 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01002170 struct arm_smmu_device *smmu = cfg->smmu;
2171 struct arm_smmu_smr *smrs = smmu->smrs;
2172 struct iommu_group *group;
2173 int i, idx, ret;
2174
2175 mutex_lock(&smmu->stream_map_mutex);
2176 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01002177 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002178 u16 sid = fwspec->ids[i];
2179 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
2180
Robin Murphy6668f692016-09-12 17:13:54 +01002181 if (idx != INVALID_SMENDX) {
2182 ret = -EEXIST;
2183 goto out_err;
2184 }
2185
Robin Murphy7e96c742016-09-14 15:26:46 +01002186 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01002187 if (ret < 0)
2188 goto out_err;
2189
2190 idx = ret;
2191 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002192 smrs[idx].id = sid;
2193 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01002194 smrs[idx].valid = true;
2195 }
2196 smmu->s2crs[idx].count++;
2197 cfg->smendx[i] = (s16)idx;
2198 }
2199
2200 group = iommu_group_get_for_dev(dev);
2201 if (!group)
2202 group = ERR_PTR(-ENOMEM);
2203 if (IS_ERR(group)) {
2204 ret = PTR_ERR(group);
2205 goto out_err;
2206 }
2207 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01002208
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002209 /* It worked! Don't poke the actual hardware until we've attached */
2210 for_each_cfg_sme(fwspec, i, idx)
Robin Murphy6668f692016-09-12 17:13:54 +01002211 smmu->s2crs[idx].group = group;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002212
Robin Murphy6668f692016-09-12 17:13:54 +01002213 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002214 return 0;
2215
Robin Murphy6668f692016-09-12 17:13:54 +01002216out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01002217 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01002218 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01002219 cfg->smendx[i] = INVALID_SMENDX;
2220 }
Robin Murphy6668f692016-09-12 17:13:54 +01002221 mutex_unlock(&smmu->stream_map_mutex);
2222 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002223}
2224
Robin Murphy06e393e2016-09-12 17:13:55 +01002225static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002226{
Robin Murphy06e393e2016-09-12 17:13:55 +01002227 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
2228 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01002229 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01002230
Robin Murphy6668f692016-09-12 17:13:54 +01002231 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002232 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002233 if (arm_smmu_free_sme(smmu, idx))
2234 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01002235 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002236 }
Robin Murphy6668f692016-09-12 17:13:54 +01002237 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002238}
2239
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002240static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
2241 struct iommu_fwspec *fwspec)
2242{
2243 struct arm_smmu_device *smmu = smmu_domain->smmu;
2244 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2245 int i, idx;
2246 const struct iommu_gather_ops *tlb;
2247
2248 tlb = smmu_domain->pgtbl_cfg.tlb;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302249 if (!tlb)
2250 return;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002251
2252 mutex_lock(&smmu->stream_map_mutex);
2253 for_each_cfg_sme(fwspec, i, idx) {
2254 WARN_ON(s2cr[idx].attach_count == 0);
2255 s2cr[idx].attach_count -= 1;
2256
2257 if (s2cr[idx].attach_count > 0)
2258 continue;
2259
2260 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2261 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2262 }
2263 mutex_unlock(&smmu->stream_map_mutex);
2264
2265 /* Ensure there are no stale mappings for this context bank */
2266 tlb->tlb_flush_all(smmu_domain);
2267}
2268
Will Deacon45ae7cf2013-06-24 18:31:25 +01002269static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01002270 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002271{
Will Deacon44680ee2014-06-25 11:29:12 +01002272 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002273 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2274 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2275 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002276 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002277
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002278 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002279 for_each_cfg_sme(fwspec, i, idx) {
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002280 if (s2cr[idx].attach_count++ > 0)
Robin Murphy6668f692016-09-12 17:13:54 +01002281 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002282
2283 s2cr[idx].type = type;
2284 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2285 s2cr[idx].cbndx = cbndx;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002286 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002287 }
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002288 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002289
2290 return 0;
2291}
2292
Patrick Daly09801312016-08-29 17:02:52 -07002293static void arm_smmu_detach_dev(struct iommu_domain *domain,
2294 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002295{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002296 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002297 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002298 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly09801312016-08-29 17:02:52 -07002299 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002300 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002301
2302 if (dynamic)
2303 return;
2304
Patrick Daly09801312016-08-29 17:02:52 -07002305 if (!smmu) {
2306 dev_err(dev, "Domain not attached; cannot detach!\n");
2307 return;
2308 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002309
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302310 if (atomic_domain)
2311 arm_smmu_power_on_atomic(smmu->pwr);
2312 else
2313 arm_smmu_power_on(smmu->pwr);
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002314
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302315 arm_smmu_domain_remove_master(smmu_domain, fwspec);
2316 arm_smmu_power_off(smmu->pwr);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002317}
2318
Patrick Dalye271f212016-10-04 13:24:49 -07002319static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002320{
Patrick Dalye271f212016-10-04 13:24:49 -07002321 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002322 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2323 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2324 int source_vmid = VMID_HLOS;
2325 struct arm_smmu_pte_info *pte_info, *temp;
2326
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302327 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -07002328 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002329
Patrick Dalye271f212016-10-04 13:24:49 -07002330 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002331 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2332 PAGE_SIZE, &source_vmid, 1,
2333 dest_vmids, dest_perms, 2);
2334 if (WARN_ON(ret))
2335 break;
2336 }
2337
2338 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2339 entry) {
2340 list_del(&pte_info->entry);
2341 kfree(pte_info);
2342 }
Patrick Dalye271f212016-10-04 13:24:49 -07002343 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002344}
2345
2346static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2347{
2348 int ret;
2349 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002350 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002351 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2352 struct arm_smmu_pte_info *pte_info, *temp;
2353
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302354 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002355 return;
2356
2357 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2358 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2359 PAGE_SIZE, source_vmlist, 2,
2360 &dest_vmids, &dest_perms, 1);
2361 if (WARN_ON(ret))
2362 break;
2363 free_pages_exact(pte_info->virt_addr, pte_info->size);
2364 }
2365
2366 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2367 entry) {
2368 list_del(&pte_info->entry);
2369 kfree(pte_info);
2370 }
2371}
2372
2373static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2374{
2375 struct arm_smmu_domain *smmu_domain = cookie;
2376 struct arm_smmu_pte_info *pte_info;
2377
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302378 if (smmu_domain->slave_side_secure ||
2379 !arm_smmu_has_secure_vmid(smmu_domain)) {
2380 if (smmu_domain->slave_side_secure)
2381 WARN(1, "slave side secure is enforced\n");
2382 else
2383 WARN(1, "Invalid VMID is set !!\n");
2384 return;
2385 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002386
2387 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2388 if (!pte_info)
2389 return;
2390
2391 pte_info->virt_addr = addr;
2392 pte_info->size = size;
2393 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2394}
2395
2396static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2397{
2398 struct arm_smmu_domain *smmu_domain = cookie;
2399 struct arm_smmu_pte_info *pte_info;
2400
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302401 if (smmu_domain->slave_side_secure ||
2402 !arm_smmu_has_secure_vmid(smmu_domain)) {
2403 if (smmu_domain->slave_side_secure)
2404 WARN(1, "slave side secure is enforced\n");
2405 else
2406 WARN(1, "Invalid VMID is set !!\n");
2407 return -EINVAL;
2408 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002409
2410 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2411 if (!pte_info)
2412 return -ENOMEM;
2413 pte_info->virt_addr = addr;
2414 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2415 return 0;
2416}
2417
Will Deacon45ae7cf2013-06-24 18:31:25 +01002418static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2419{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002420 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002421 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002422 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002423 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002424 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002425
Robin Murphy06e393e2016-09-12 17:13:55 +01002426 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002427 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2428 return -ENXIO;
2429 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002430
Robin Murphy4f79b142016-10-17 12:06:21 +01002431 /*
2432 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2433 * domains between of_xlate() and add_device() - we have no way to cope
2434 * with that, so until ARM gets converted to rely on groups and default
2435 * domains, just say no (but more politely than by dereferencing NULL).
2436 * This should be at least a WARN_ON once that's sorted.
2437 */
2438 if (!fwspec->iommu_priv)
2439 return -ENODEV;
2440
Robin Murphy06e393e2016-09-12 17:13:55 +01002441 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002442
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002443 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002444 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002445 if (ret)
2446 return ret;
2447
Will Deacon518f7132014-11-14 17:17:54 +00002448 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002449 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002450 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002451 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002452
Patrick Dalyc190d932016-08-30 17:23:28 -07002453 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002454 if (is_dynamic_domain(domain)) {
2455 ret = 0;
2456 goto out_power_off;
2457 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002458
Will Deacon45ae7cf2013-06-24 18:31:25 +01002459 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002460 * Sanity check the domain. We don't support domains across
2461 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002462 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002463 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002464 dev_err(dev,
2465 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002466 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002467 ret = -EINVAL;
2468 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002469 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002470
2471 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002472 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002473
2474out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002475 /*
2476 * Keep an additional vote for non-atomic power until domain is
2477 * detached
2478 */
2479 if (!ret && atomic_domain) {
2480 WARN_ON(arm_smmu_power_on(smmu->pwr));
2481 arm_smmu_power_off_atomic(smmu->pwr);
2482 }
2483
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002484 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002485
Will Deacon45ae7cf2013-06-24 18:31:25 +01002486 return ret;
2487}
2488
Will Deacon45ae7cf2013-06-24 18:31:25 +01002489static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002490 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002491{
Will Deacon518f7132014-11-14 17:17:54 +00002492 int ret;
2493 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002494 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002495 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002496
Will Deacon518f7132014-11-14 17:17:54 +00002497 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002498 return -ENODEV;
2499
Patrick Dalye271f212016-10-04 13:24:49 -07002500 arm_smmu_secure_domain_lock(smmu_domain);
2501
Will Deacon518f7132014-11-14 17:17:54 +00002502 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2503 ret = ops->map(ops, iova, paddr, size, prot);
2504 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002505
2506 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002507 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002508
Will Deacon518f7132014-11-14 17:17:54 +00002509 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002510}
2511
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002512static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2513 dma_addr_t iova)
2514{
2515 uint64_t ret;
2516 unsigned long flags;
2517 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2518 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2519
2520 if (!ops)
2521 return 0;
2522
2523 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2524 ret = ops->iova_to_pte(ops, iova);
2525 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2526 return ret;
2527}
2528
Will Deacon45ae7cf2013-06-24 18:31:25 +01002529static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2530 size_t size)
2531{
Will Deacon518f7132014-11-14 17:17:54 +00002532 size_t ret;
2533 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002534 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002535 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002536
Will Deacon518f7132014-11-14 17:17:54 +00002537 if (!ops)
2538 return 0;
2539
Patrick Daly8befb662016-08-17 20:03:28 -07002540 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002541 if (ret)
2542 return ret;
2543
Patrick Dalye271f212016-10-04 13:24:49 -07002544 arm_smmu_secure_domain_lock(smmu_domain);
2545
Will Deacon518f7132014-11-14 17:17:54 +00002546 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2547 ret = ops->unmap(ops, iova, size);
2548 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002549
Patrick Daly8befb662016-08-17 20:03:28 -07002550 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002551 /*
2552 * While splitting up block mappings, we might allocate page table
2553 * memory during unmap, so the vmids needs to be assigned to the
2554 * memory here as well.
2555 */
2556 arm_smmu_assign_table(smmu_domain);
2557 /* Also unassign any pages that were free'd during unmap */
2558 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002559 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002560 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002561}
2562
Patrick Daly88d321d2017-02-09 18:02:13 -08002563#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002564static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2565 struct scatterlist *sg, unsigned int nents, int prot)
2566{
2567 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002568 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002569 unsigned long flags;
2570 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2571 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002572 unsigned int idx_start, idx_end;
2573 struct scatterlist *sg_start, *sg_end;
2574 unsigned long __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002575
2576 if (!ops)
2577 return -ENODEV;
2578
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002579 arm_smmu_secure_domain_lock(smmu_domain);
2580
Patrick Daly88d321d2017-02-09 18:02:13 -08002581 __saved_iova_start = iova;
2582 idx_start = idx_end = 0;
2583 sg_start = sg_end = sg;
2584 while (idx_end < nents) {
2585 batch_size = sg_end->length;
2586 sg_end = sg_next(sg_end);
2587 idx_end++;
2588 while ((idx_end < nents) &&
2589 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002590
Patrick Daly88d321d2017-02-09 18:02:13 -08002591 batch_size += sg_end->length;
2592 sg_end = sg_next(sg_end);
2593 idx_end++;
2594 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002595
Patrick Daly88d321d2017-02-09 18:02:13 -08002596 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2597 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2598 prot, &size);
2599 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2600 /* Returns 0 on error */
2601 if (!ret) {
2602 size_to_unmap = iova + size - __saved_iova_start;
2603 goto out;
2604 }
2605
2606 iova += batch_size;
2607 idx_start = idx_end;
2608 sg_start = sg_end;
2609 }
2610
2611out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002612 arm_smmu_assign_table(smmu_domain);
2613
Patrick Daly88d321d2017-02-09 18:02:13 -08002614 if (size_to_unmap) {
2615 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2616 iova = __saved_iova_start;
2617 }
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002618 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly88d321d2017-02-09 18:02:13 -08002619 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002620}
2621
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002622static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002623 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002624{
Joerg Roedel1d672632015-03-26 13:43:10 +01002625 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002626 struct arm_smmu_device *smmu = smmu_domain->smmu;
2627 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2628 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2629 struct device *dev = smmu->dev;
2630 void __iomem *cb_base;
2631 u32 tmp;
2632 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002633 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002634
2635 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2636
Robin Murphy661d9622015-05-27 17:09:34 +01002637 /* ATS1 registers can only be written atomically */
2638 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002639 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002640 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2641 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002642 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002643
2644 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2645 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002646 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002647 dev_err(dev,
2648 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2649 &iova, &phys);
2650 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002651 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002652 }
2653
Robin Murphyf9a05f02016-04-13 18:13:01 +01002654 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002655 if (phys & CB_PAR_F) {
2656 dev_err(dev, "translation fault!\n");
2657 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002658 phys = 0;
2659 } else {
2660 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002661 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002662
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002663 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002664}
2665
Will Deacon45ae7cf2013-06-24 18:31:25 +01002666static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002667 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002668{
Will Deacon518f7132014-11-14 17:17:54 +00002669 phys_addr_t ret;
2670 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002671 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002672 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002673
Will Deacon518f7132014-11-14 17:17:54 +00002674 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002675 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002676
Will Deacon518f7132014-11-14 17:17:54 +00002677 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002678 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002679 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002680
Will Deacon518f7132014-11-14 17:17:54 +00002681 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002682}
2683
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002684/*
2685 * This function can sleep, and cannot be called from atomic context. Will
2686 * power on register block if required. This restriction does not apply to the
2687 * original iova_to_phys() op.
2688 */
2689static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2690 dma_addr_t iova)
2691{
2692 phys_addr_t ret = 0;
2693 unsigned long flags;
2694 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly62ba1922017-08-30 16:47:18 -07002695 struct arm_smmu_device *smmu = smmu_domain->smmu;
2696
2697 if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS)
2698 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002699
Patrick Dalyad441dd2016-09-15 15:50:46 -07002700 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002701 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2702 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002703 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002704 return ret;
2705 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002706
2707 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2708 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2709 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002710 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002711
2712 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2713
2714 return ret;
2715}
2716
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002717static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002718{
Will Deacond0948942014-06-24 17:30:10 +01002719 switch (cap) {
2720 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002721 /*
2722 * Return true here as the SMMU can always send out coherent
2723 * requests.
2724 */
2725 return true;
Will Deacond0948942014-06-24 17:30:10 +01002726 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002727 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002728 case IOMMU_CAP_NOEXEC:
2729 return true;
Will Deacond0948942014-06-24 17:30:10 +01002730 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002731 return false;
Will Deacond0948942014-06-24 17:30:10 +01002732 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002733}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002734
Patrick Daly8e3371a2017-02-13 22:14:53 -08002735static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2736{
2737 struct arm_smmu_device *smmu;
2738 unsigned long flags;
2739
2740 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2741 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2742 if (smmu->dev->of_node == np) {
2743 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2744 return smmu;
2745 }
2746 }
2747 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2748 return NULL;
2749}
2750
Robin Murphy7e96c742016-09-14 15:26:46 +01002751static int arm_smmu_match_node(struct device *dev, void *data)
2752{
2753 return dev->of_node == data;
2754}
2755
2756static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2757{
2758 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2759 np, arm_smmu_match_node);
2760 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002761 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002762}
2763
Will Deacon03edb222015-01-19 14:27:33 +00002764static int arm_smmu_add_device(struct device *dev)
2765{
Robin Murphy06e393e2016-09-12 17:13:55 +01002766 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002767 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002768 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002769 int i, ret;
2770
Robin Murphy7e96c742016-09-14 15:26:46 +01002771 if (using_legacy_binding) {
2772 ret = arm_smmu_register_legacy_master(dev, &smmu);
2773 fwspec = dev->iommu_fwspec;
2774 if (ret)
2775 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002776 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002777 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2778 if (!smmu)
2779 return -ENODEV;
2780 } else {
2781 return -ENODEV;
2782 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002783
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002784 ret = arm_smmu_power_on(smmu->pwr);
2785 if (ret)
2786 goto out_free;
2787
Robin Murphyd5b41782016-09-14 15:21:39 +01002788 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002789 for (i = 0; i < fwspec->num_ids; i++) {
2790 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002791 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002792
Robin Murphy06e393e2016-09-12 17:13:55 +01002793 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002794 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002795 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002796 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002797 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002798 if (mask & ~smmu->smr_mask_mask) {
2799 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2800 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002801 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002802 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002803 }
Will Deacon03edb222015-01-19 14:27:33 +00002804
Robin Murphy06e393e2016-09-12 17:13:55 +01002805 ret = -ENOMEM;
2806 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2807 GFP_KERNEL);
2808 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002809 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002810
2811 cfg->smmu = smmu;
2812 fwspec->iommu_priv = cfg;
2813 while (i--)
2814 cfg->smendx[i] = INVALID_SMENDX;
2815
Robin Murphy6668f692016-09-12 17:13:54 +01002816 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002817 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002818 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002819
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002820 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002821 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002822
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002823out_pwr_off:
2824 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002825out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002826 if (fwspec)
2827 kfree(fwspec->iommu_priv);
2828 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002829 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002830}
2831
Will Deacon45ae7cf2013-06-24 18:31:25 +01002832static void arm_smmu_remove_device(struct device *dev)
2833{
Robin Murphy06e393e2016-09-12 17:13:55 +01002834 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002835 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002836
Robin Murphy06e393e2016-09-12 17:13:55 +01002837 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002838 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002839
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002840 smmu = fwspec_smmu(fwspec);
2841 if (arm_smmu_power_on(smmu->pwr)) {
2842 WARN_ON(1);
2843 return;
2844 }
2845
Robin Murphy06e393e2016-09-12 17:13:55 +01002846 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002847 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002848 kfree(fwspec->iommu_priv);
2849 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002850 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002851}
2852
Joerg Roedelaf659932015-10-21 23:51:41 +02002853static struct iommu_group *arm_smmu_device_group(struct device *dev)
2854{
Robin Murphy06e393e2016-09-12 17:13:55 +01002855 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2856 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002857 struct iommu_group *group = NULL;
2858 int i, idx;
2859
Robin Murphy06e393e2016-09-12 17:13:55 +01002860 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002861 if (group && smmu->s2crs[idx].group &&
2862 group != smmu->s2crs[idx].group)
2863 return ERR_PTR(-EINVAL);
2864
2865 group = smmu->s2crs[idx].group;
2866 }
2867
Patrick Daly03330cc2017-08-11 14:56:38 -07002868 if (!group) {
2869 if (dev_is_pci(dev))
2870 group = pci_device_group(dev);
2871 else
2872 group = generic_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002873
Patrick Daly03330cc2017-08-11 14:56:38 -07002874 if (IS_ERR(group))
2875 return NULL;
2876 }
2877
2878 if (arm_smmu_arch_device_group(dev, group)) {
2879 iommu_group_put(group);
2880 return ERR_PTR(-EINVAL);
2881 }
Joerg Roedelaf659932015-10-21 23:51:41 +02002882
Joerg Roedelaf659932015-10-21 23:51:41 +02002883 return group;
2884}
2885
Will Deaconc752ce42014-06-25 22:46:31 +01002886static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2887 enum iommu_attr attr, void *data)
2888{
Joerg Roedel1d672632015-03-26 13:43:10 +01002889 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002890 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002891
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002892 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01002893 switch (attr) {
2894 case DOMAIN_ATTR_NESTING:
2895 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002896 ret = 0;
2897 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002898 case DOMAIN_ATTR_PT_BASE_ADDR:
2899 *((phys_addr_t *)data) =
2900 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002901 ret = 0;
2902 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002903 case DOMAIN_ATTR_CONTEXT_BANK:
2904 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002905 if (smmu_domain->smmu == NULL) {
2906 ret = -ENODEV;
2907 break;
2908 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002909 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2910 ret = 0;
2911 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002912 case DOMAIN_ATTR_TTBR0: {
2913 u64 val;
2914 struct arm_smmu_device *smmu = smmu_domain->smmu;
2915 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002916 if (smmu == NULL) {
2917 ret = -ENODEV;
2918 break;
2919 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002920 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2921 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2922 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2923 << (TTBRn_ASID_SHIFT);
2924 *((u64 *)data) = val;
2925 ret = 0;
2926 break;
2927 }
2928 case DOMAIN_ATTR_CONTEXTIDR:
2929 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002930 if (smmu_domain->smmu == NULL) {
2931 ret = -ENODEV;
2932 break;
2933 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002934 *((u32 *)data) = smmu_domain->cfg.procid;
2935 ret = 0;
2936 break;
2937 case DOMAIN_ATTR_PROCID:
2938 *((u32 *)data) = smmu_domain->cfg.procid;
2939 ret = 0;
2940 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002941 case DOMAIN_ATTR_DYNAMIC:
2942 *((int *)data) = !!(smmu_domain->attributes
2943 & (1 << DOMAIN_ATTR_DYNAMIC));
2944 ret = 0;
2945 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002946 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2947 *((int *)data) = !!(smmu_domain->attributes
2948 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2949 ret = 0;
2950 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002951 case DOMAIN_ATTR_S1_BYPASS:
2952 *((int *)data) = !!(smmu_domain->attributes
2953 & (1 << DOMAIN_ATTR_S1_BYPASS));
2954 ret = 0;
2955 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002956 case DOMAIN_ATTR_SECURE_VMID:
2957 *((int *)data) = smmu_domain->secure_vmid;
2958 ret = 0;
2959 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002960 case DOMAIN_ATTR_PGTBL_INFO: {
2961 struct iommu_pgtbl_info *info = data;
2962
2963 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2964 ret = -ENODEV;
2965 break;
2966 }
2967 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2968 ret = 0;
2969 break;
2970 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002971 case DOMAIN_ATTR_FAST:
2972 *((int *)data) = !!(smmu_domain->attributes
2973 & (1 << DOMAIN_ATTR_FAST));
2974 ret = 0;
2975 break;
Patrick Daly1e279922017-09-06 15:57:45 -07002976 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
2977 *((int *)data) = !!(smmu_domain->attributes
2978 & (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
2979 ret = 0;
2980 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002981 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2982 *((int *)data) = !!(smmu_domain->attributes &
2983 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2984 ret = 0;
2985 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002986 case DOMAIN_ATTR_EARLY_MAP:
2987 *((int *)data) = !!(smmu_domain->attributes
2988 & (1 << DOMAIN_ATTR_EARLY_MAP));
2989 ret = 0;
2990 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002991 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002992 if (!smmu_domain->smmu) {
2993 ret = -ENODEV;
2994 break;
2995 }
Liam Mark53cf2342016-12-20 11:36:07 -08002996 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
2997 ret = 0;
2998 break;
2999 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
3000 *((int *)data) = !!(smmu_domain->attributes
3001 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07003002 ret = 0;
3003 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303004 case DOMAIN_ATTR_CB_STALL_DISABLE:
3005 *((int *)data) = !!(smmu_domain->attributes
3006 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
3007 ret = 0;
3008 break;
Patrick Daly83174c12017-10-26 12:31:15 -07003009 case DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN:
Patrick Daly23301482017-10-12 16:18:25 -07003010 *((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
3011 ret = 0;
3012 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003013 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003014 ret = -ENODEV;
3015 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003016 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003017 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003018 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003019}
3020
3021static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
3022 enum iommu_attr attr, void *data)
3023{
Will Deacon518f7132014-11-14 17:17:54 +00003024 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01003025 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01003026
Will Deacon518f7132014-11-14 17:17:54 +00003027 mutex_lock(&smmu_domain->init_mutex);
3028
Will Deaconc752ce42014-06-25 22:46:31 +01003029 switch (attr) {
3030 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00003031 if (smmu_domain->smmu) {
3032 ret = -EPERM;
3033 goto out_unlock;
3034 }
3035
Will Deaconc752ce42014-06-25 22:46:31 +01003036 if (*(int *)data)
3037 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
3038 else
3039 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
3040
Will Deacon518f7132014-11-14 17:17:54 +00003041 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003042 case DOMAIN_ATTR_PROCID:
3043 if (smmu_domain->smmu != NULL) {
3044 dev_err(smmu_domain->smmu->dev,
3045 "cannot change procid attribute while attached\n");
3046 ret = -EBUSY;
3047 break;
3048 }
3049 smmu_domain->cfg.procid = *((u32 *)data);
3050 ret = 0;
3051 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07003052 case DOMAIN_ATTR_DYNAMIC: {
3053 int dynamic = *((int *)data);
3054
3055 if (smmu_domain->smmu != NULL) {
3056 dev_err(smmu_domain->smmu->dev,
3057 "cannot change dynamic attribute while attached\n");
3058 ret = -EBUSY;
3059 break;
3060 }
3061
3062 if (dynamic)
3063 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
3064 else
3065 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
3066 ret = 0;
3067 break;
3068 }
3069 case DOMAIN_ATTR_CONTEXT_BANK:
3070 /* context bank can't be set while attached */
3071 if (smmu_domain->smmu != NULL) {
3072 ret = -EBUSY;
3073 break;
3074 }
3075 /* ... and it can only be set for dynamic contexts. */
3076 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
3077 ret = -EINVAL;
3078 break;
3079 }
3080
3081 /* this will be validated during attach */
3082 smmu_domain->cfg.cbndx = *((unsigned int *)data);
3083 ret = 0;
3084 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07003085 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
3086 u32 non_fatal_faults = *((int *)data);
3087
3088 if (non_fatal_faults)
3089 smmu_domain->attributes |=
3090 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
3091 else
3092 smmu_domain->attributes &=
3093 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
3094 ret = 0;
3095 break;
3096 }
Patrick Dalye62d3362016-03-15 18:58:28 -07003097 case DOMAIN_ATTR_S1_BYPASS: {
3098 int bypass = *((int *)data);
3099
3100 /* bypass can't be changed while attached */
3101 if (smmu_domain->smmu != NULL) {
3102 ret = -EBUSY;
3103 break;
3104 }
3105 if (bypass)
3106 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
3107 else
3108 smmu_domain->attributes &=
3109 ~(1 << DOMAIN_ATTR_S1_BYPASS);
3110
3111 ret = 0;
3112 break;
3113 }
Patrick Daly8befb662016-08-17 20:03:28 -07003114 case DOMAIN_ATTR_ATOMIC:
3115 {
3116 int atomic_ctx = *((int *)data);
3117
3118 /* can't be changed while attached */
3119 if (smmu_domain->smmu != NULL) {
3120 ret = -EBUSY;
3121 break;
3122 }
3123 if (atomic_ctx)
3124 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
3125 else
3126 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
3127 break;
3128 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07003129 case DOMAIN_ATTR_SECURE_VMID:
3130 if (smmu_domain->secure_vmid != VMID_INVAL) {
3131 ret = -ENODEV;
3132 WARN(1, "secure vmid already set!");
3133 break;
3134 }
3135 smmu_domain->secure_vmid = *((int *)data);
3136 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003137 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3138 if (*((int *)data))
3139 smmu_domain->attributes |=
3140 1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
3141 ret = 0;
3142 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003143 /*
3144 * fast_smmu_unmap_page() and fast_smmu_alloc_iova() both
3145 * expect that the bus/clock/regulator are already on. Thus also
3146 * force DOMAIN_ATTR_ATOMIC to bet set.
3147 */
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003148 case DOMAIN_ATTR_FAST:
Patrick Daly0df84ac2017-10-11 17:32:41 -07003149 {
3150 int fast = *((int *)data);
3151
3152 if (fast) {
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003153 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003154 smmu_domain->attributes |= 1 << DOMAIN_ATTR_ATOMIC;
3155 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003156 ret = 0;
3157 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003158 }
Patrick Dalyce6786f2016-11-09 14:19:23 -08003159 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3160 /* can't be changed while attached */
3161 if (smmu_domain->smmu != NULL) {
3162 ret = -EBUSY;
3163 break;
3164 }
3165 if (*((int *)data))
3166 smmu_domain->attributes |=
3167 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
3168 ret = 0;
3169 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003170 case DOMAIN_ATTR_EARLY_MAP: {
3171 int early_map = *((int *)data);
3172
3173 ret = 0;
3174 if (early_map) {
3175 smmu_domain->attributes |=
3176 1 << DOMAIN_ATTR_EARLY_MAP;
3177 } else {
3178 if (smmu_domain->smmu)
3179 ret = arm_smmu_enable_s1_translations(
3180 smmu_domain);
3181
3182 if (!ret)
3183 smmu_domain->attributes &=
3184 ~(1 << DOMAIN_ATTR_EARLY_MAP);
3185 }
3186 break;
3187 }
Liam Mark53cf2342016-12-20 11:36:07 -08003188 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
3189 int force_coherent = *((int *)data);
3190
3191 if (smmu_domain->smmu != NULL) {
3192 dev_err(smmu_domain->smmu->dev,
3193 "cannot change force coherent attribute while attached\n");
3194 ret = -EBUSY;
3195 break;
3196 }
3197
3198 if (force_coherent)
3199 smmu_domain->attributes |=
3200 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
3201 else
3202 smmu_domain->attributes &=
3203 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
3204
3205 ret = 0;
3206 break;
3207 }
3208
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303209 case DOMAIN_ATTR_CB_STALL_DISABLE:
3210 if (*((int *)data))
3211 smmu_domain->attributes |=
3212 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
3213 ret = 0;
3214 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003215 default:
Will Deacon518f7132014-11-14 17:17:54 +00003216 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01003217 }
Will Deacon518f7132014-11-14 17:17:54 +00003218
3219out_unlock:
3220 mutex_unlock(&smmu_domain->init_mutex);
3221 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003222}
3223
Robin Murphy7e96c742016-09-14 15:26:46 +01003224static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
3225{
3226 u32 fwid = 0;
3227
3228 if (args->args_count > 0)
3229 fwid |= (u16)args->args[0];
3230
3231 if (args->args_count > 1)
3232 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
3233
3234 return iommu_fwspec_add_ids(dev, &fwid, 1);
3235}
3236
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003237static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
3238{
3239 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3240 struct arm_smmu_device *smmu = smmu_domain->smmu;
3241 void __iomem *cb_base;
3242 u32 reg;
3243 int ret;
3244
3245 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3246 ret = arm_smmu_power_on(smmu->pwr);
3247 if (ret)
3248 return ret;
3249
3250 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3251 reg |= SCTLR_M;
3252
3253 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
3254 arm_smmu_power_off(smmu->pwr);
3255 return ret;
3256}
3257
Liam Mark3ba41cf2016-12-09 14:39:04 -08003258static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
3259 dma_addr_t iova)
3260{
3261 bool ret;
3262 unsigned long flags;
3263 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3264 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3265
3266 if (!ops)
3267 return false;
3268
3269 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3270 ret = ops->is_iova_coherent(ops, iova);
3271 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3272 return ret;
3273}
3274
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003275static void arm_smmu_trigger_fault(struct iommu_domain *domain,
3276 unsigned long flags)
3277{
3278 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3279 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3280 struct arm_smmu_device *smmu;
3281 void __iomem *cb_base;
3282
3283 if (!smmu_domain->smmu) {
3284 pr_err("Can't trigger faults on non-attached domains\n");
3285 return;
3286 }
3287
3288 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003289 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003290 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003291
3292 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3293 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
3294 flags, cfg->cbndx);
3295 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07003296 /* give the interrupt time to fire... */
3297 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003298
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003299 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003300}
3301
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003302static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3303{
Patrick Dalyda765c62017-09-11 16:31:07 -07003304 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3305 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
3306
3307 tlb->tlb_flush_all(smmu_domain);
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003308}
3309
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003310static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3311{
3312 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3313
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003314 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003315}
3316
3317static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3318{
3319 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3320
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003321 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003322}
3323
Will Deacon518f7132014-11-14 17:17:54 +00003324static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003325 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003326 .domain_alloc = arm_smmu_domain_alloc,
3327 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003328 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003329 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003330 .map = arm_smmu_map,
3331 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003332 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003333 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003334 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003335 .add_device = arm_smmu_add_device,
3336 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003337 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003338 .domain_get_attr = arm_smmu_domain_get_attr,
3339 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003340 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003341 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003342 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003343 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003344 .enable_config_clocks = arm_smmu_enable_config_clocks,
3345 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003346 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003347 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003348};
3349
Patrick Dalyad441dd2016-09-15 15:50:46 -07003350#define IMPL_DEF1_MICRO_MMU_CTRL 0
3351#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3352#define MICRO_MMU_CTRL_IDLE (1 << 3)
3353
3354/* Definitions for implementation-defined registers */
3355#define ACTLR_QCOM_OSH_SHIFT 28
3356#define ACTLR_QCOM_OSH 1
3357
3358#define ACTLR_QCOM_ISH_SHIFT 29
3359#define ACTLR_QCOM_ISH 1
3360
3361#define ACTLR_QCOM_NSH_SHIFT 30
3362#define ACTLR_QCOM_NSH 1
3363
3364static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003365{
3366 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003367 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003368
3369 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3370 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3371 0, 30000)) {
3372 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3373 return -EBUSY;
3374 }
3375
3376 return 0;
3377}
3378
Patrick Dalyad441dd2016-09-15 15:50:46 -07003379static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003380{
3381 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3382 u32 reg;
3383
3384 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3385 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303386
3387 if (arm_smmu_is_static_cb(smmu)) {
3388 phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
3389 smmu->phys_addr;
3390
3391 if (scm_io_write(impl_def1_base_phys +
3392 IMPL_DEF1_MICRO_MMU_CTRL, reg)) {
3393 dev_err(smmu->dev,
3394 "scm_io_write fail. SMMU might not be halted");
3395 return -EINVAL;
3396 }
3397 } else {
3398 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3399 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003400
Patrick Dalyad441dd2016-09-15 15:50:46 -07003401 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003402}
3403
Patrick Dalyad441dd2016-09-15 15:50:46 -07003404static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003405{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003406 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003407}
3408
Patrick Dalyad441dd2016-09-15 15:50:46 -07003409static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003410{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003411 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003412}
3413
Patrick Dalyad441dd2016-09-15 15:50:46 -07003414static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003415{
3416 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3417 u32 reg;
3418
3419 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3420 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303421
3422 if (arm_smmu_is_static_cb(smmu)) {
3423 phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
3424 smmu->phys_addr;
3425
3426 if (scm_io_write(impl_def1_base_phys +
3427 IMPL_DEF1_MICRO_MMU_CTRL, reg))
3428 dev_err(smmu->dev,
3429 "scm_io_write fail. SMMU might not be resumed");
3430 } else {
3431 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3432 }
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003433}
3434
Patrick Dalyad441dd2016-09-15 15:50:46 -07003435static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003436{
3437 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003438 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003439 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003440 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003441
Patrick Dalyad441dd2016-09-15 15:50:46 -07003442 /*
3443 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3444 * to prevent table walks with an inconsistent state.
3445 */
3446 for (i = 0; i < smmu->num_context_banks; ++i) {
3447 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3448 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3449 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3450 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3451 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3452 }
3453
3454 /* Program implementation defined registers */
3455 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003456 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3457 writel_relaxed(regs[i].value,
3458 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003459 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003460}
3461
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003462static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3463 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003464{
3465 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3466 struct arm_smmu_device *smmu = smmu_domain->smmu;
3467 int ret;
3468 phys_addr_t phys = 0;
3469 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003470 u32 sctlr, sctlr_orig, fsr;
3471 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003472
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003473 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003474 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003475 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003476
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003477 spin_lock_irqsave(&smmu->atos_lock, flags);
3478 cb_base = ARM_SMMU_CB_BASE(smmu) +
3479 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003480
3481 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003482 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003483 qsmmuv2_wait_for_halt(smmu);
3484
3485 /* clear FSR to allow ATOS to log any faults */
3486 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3487 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3488
3489 /* disable stall mode momentarily */
3490 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3491 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3492 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3493
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003494 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003495
3496 /* restore SCTLR */
3497 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3498
3499 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003500 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3501
3502 arm_smmu_power_off(smmu_domain->smmu->pwr);
3503 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003504}
3505
3506struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3507 .device_reset = qsmmuv2_device_reset,
3508 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003509};
3510
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003511static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003512{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003513 int i;
3514 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003515 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003516 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003517
Peng Fan3ca37122016-05-03 21:50:30 +08003518 /*
3519 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3520 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3521 * bit is only present in MMU-500r2 onwards.
3522 */
3523 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3524 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3525 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3526 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3527 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3528 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3529 }
3530
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003531 /* Make sure all context banks are disabled and clear CB_FSR */
3532 for (i = 0; i < smmu->num_context_banks; ++i) {
3533 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3534 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3535 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003536 /*
3537 * Disable MMU-500's not-particularly-beneficial next-page
3538 * prefetcher for the sake of errata #841119 and #826419.
3539 */
3540 if (smmu->model == ARM_MMU500) {
3541 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3542 reg &= ~ARM_MMU500_ACTLR_CPRE;
3543 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3544 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003545 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003546}
3547
3548static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3549{
3550 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003551 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003552 u32 reg;
3553
3554 /* clear global FSR */
3555 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3556 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3557
Robin Murphy468f4942016-09-12 17:13:49 +01003558 /*
3559 * Reset stream mapping groups: Initial values mark all SMRn as
3560 * invalid and all S2CRn as bypass unless overridden.
3561 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003562 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3563 for (i = 0; i < smmu->num_mapping_groups; ++i)
3564 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003565
Patrick Daly59b6d202017-06-12 13:12:15 -07003566 arm_smmu_context_bank_reset(smmu);
3567 }
Will Deacon1463fe42013-07-31 19:21:27 +01003568
Will Deacon45ae7cf2013-06-24 18:31:25 +01003569 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003570 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3571 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3572
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003573 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003574
Will Deacon45ae7cf2013-06-24 18:31:25 +01003575 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003576 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003577
3578 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003579 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003580
Robin Murphy25a1c962016-02-10 14:25:33 +00003581 /* Enable client access, handling unmatched streams as appropriate */
3582 reg &= ~sCR0_CLIENTPD;
3583 if (disable_bypass)
3584 reg |= sCR0_USFCFG;
3585 else
3586 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003587
3588 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003589 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003590
3591 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003592 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003593
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003594 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3595 reg |= sCR0_VMID16EN;
3596
Patrick Daly7f377fe2017-10-06 17:37:10 -07003597 /* Force bypass transaction to be Non-Shareable & not io-coherent */
3598 reg &= ~(sCR0_SHCFG_MASK << sCR0_SHCFG_SHIFT);
Prakash Gupta673a79f2017-11-16 18:07:00 +05303599 reg |= sCR0_SHCFG_NSH << sCR0_SHCFG_SHIFT;
Patrick Daly7f377fe2017-10-06 17:37:10 -07003600
Will Deacon45ae7cf2013-06-24 18:31:25 +01003601 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003602 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003603 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003604
3605 /* Manage any implementation defined features */
3606 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003607}
3608
3609static int arm_smmu_id_size_to_bits(int size)
3610{
3611 switch (size) {
3612 case 0:
3613 return 32;
3614 case 1:
3615 return 36;
3616 case 2:
3617 return 40;
3618 case 3:
3619 return 42;
3620 case 4:
3621 return 44;
3622 case 5:
3623 default:
3624 return 48;
3625 }
3626}
3627
Patrick Dalyda688822017-05-17 20:12:48 -07003628
3629/*
3630 * Some context banks needs to be transferred from bootloader to HLOS in a way
3631 * that allows ongoing traffic. The current expectation is that these context
3632 * banks operate in bypass mode.
3633 * Additionally, there must be exactly one device in devicetree with stream-ids
3634 * overlapping those used by the bootloader.
3635 */
3636static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3637 struct arm_smmu_device *smmu,
3638 struct device *dev)
3639{
3640 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003641 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003642 u32 i, idx;
3643 int cb = -EINVAL;
3644 bool dynamic;
3645
Patrick Dalye72526b2017-07-18 16:21:44 -07003646 /*
3647 * Dynamic domains have already set cbndx through domain attribute.
3648 * Verify that they picked a valid value.
3649 */
Patrick Dalyda688822017-05-17 20:12:48 -07003650 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07003651 if (dynamic) {
3652 cb = smmu_domain->cfg.cbndx;
3653 if (cb < smmu->num_context_banks)
3654 return cb;
3655 else
3656 return -EINVAL;
3657 }
Patrick Dalyda688822017-05-17 20:12:48 -07003658
3659 mutex_lock(&smmu->stream_map_mutex);
3660 for_each_cfg_sme(fwspec, i, idx) {
3661 if (smmu->s2crs[idx].cb_handoff)
3662 cb = smmu->s2crs[idx].cbndx;
3663 }
3664
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303665 if (cb >= 0 && arm_smmu_is_static_cb(smmu))
3666 smmu_domain->slave_side_secure = true;
3667
Charan Teja Reddyf0758df2017-09-04 18:52:07 +05303668 if (cb < 0 && !arm_smmu_is_static_cb(smmu)) {
Patrick Dalyda688822017-05-17 20:12:48 -07003669 mutex_unlock(&smmu->stream_map_mutex);
3670 return __arm_smmu_alloc_bitmap(smmu->context_map,
3671 smmu->num_s2_context_banks,
3672 smmu->num_context_banks);
3673 }
3674
3675 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07003676 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303677 if (!arm_smmu_is_static_cb(smmu))
3678 smmu->s2crs[i].cb_handoff = false;
Patrick Dalyda688822017-05-17 20:12:48 -07003679 smmu->s2crs[i].count -= 1;
3680 }
3681 }
3682 mutex_unlock(&smmu->stream_map_mutex);
3683
3684 return cb;
3685}
3686
3687static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
3688{
3689 u32 i, raw_smr, raw_s2cr;
3690 struct arm_smmu_smr smr;
3691 struct arm_smmu_s2cr s2cr;
3692
3693 for (i = 0; i < smmu->num_mapping_groups; i++) {
3694 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3695 ARM_SMMU_GR0_SMR(i));
3696 if (!(raw_smr & SMR_VALID))
3697 continue;
3698
3699 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3700 smr.id = (u16)raw_smr;
3701 smr.valid = true;
3702
3703 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3704 ARM_SMMU_GR0_S2CR(i));
Patrick Daly4ea4bdf2017-08-29 19:24:49 -07003705 memset(&s2cr, 0, sizeof(s2cr));
Patrick Dalyda688822017-05-17 20:12:48 -07003706 s2cr.group = NULL;
3707 s2cr.count = 1;
3708 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
3709 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
3710 S2CR_PRIVCFG_MASK;
3711 s2cr.cbndx = (u8)raw_s2cr;
3712 s2cr.cb_handoff = true;
3713
3714 if (s2cr.type != S2CR_TYPE_TRANS)
3715 continue;
3716
3717 smmu->smrs[i] = smr;
3718 smmu->s2crs[i] = s2cr;
3719 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
3720 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
3721 raw_smr, raw_s2cr, s2cr.cbndx);
3722 }
3723
3724 return 0;
3725}
3726
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003727static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3728{
3729 struct device *dev = smmu->dev;
3730 int i, ntuples, ret;
3731 u32 *tuples;
3732 struct arm_smmu_impl_def_reg *regs, *regit;
3733
3734 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3735 return 0;
3736
3737 ntuples /= sizeof(u32);
3738 if (ntuples % 2) {
3739 dev_err(dev,
3740 "Invalid number of attach-impl-defs registers: %d\n",
3741 ntuples);
3742 return -EINVAL;
3743 }
3744
3745 regs = devm_kmalloc(
3746 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3747 GFP_KERNEL);
3748 if (!regs)
3749 return -ENOMEM;
3750
3751 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3752 if (!tuples)
3753 return -ENOMEM;
3754
3755 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3756 tuples, ntuples);
3757 if (ret)
3758 return ret;
3759
3760 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3761 regit->offset = tuples[i];
3762 regit->value = tuples[i + 1];
3763 }
3764
3765 devm_kfree(dev, tuples);
3766
3767 smmu->impl_def_attach_registers = regs;
3768 smmu->num_impl_def_attach_registers = ntuples / 2;
3769
3770 return 0;
3771}
3772
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003773
3774static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003775{
3776 const char *cname;
3777 struct property *prop;
3778 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003779 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003780
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003781 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003782 of_property_count_strings(dev->of_node, "clock-names");
3783
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003784 if (pwr->num_clocks < 1) {
3785 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003786 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003787 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003788
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003789 pwr->clocks = devm_kzalloc(
3790 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003791 GFP_KERNEL);
3792
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003793 if (!pwr->clocks)
3794 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003795
3796 i = 0;
3797 of_property_for_each_string(dev->of_node, "clock-names",
3798 prop, cname) {
3799 struct clk *c = devm_clk_get(dev, cname);
3800
3801 if (IS_ERR(c)) {
3802 dev_err(dev, "Couldn't get clock: %s",
3803 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003804 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003805 }
3806
3807 if (clk_get_rate(c) == 0) {
3808 long rate = clk_round_rate(c, 1000);
3809
3810 clk_set_rate(c, rate);
3811 }
3812
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003813 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003814
3815 ++i;
3816 }
3817 return 0;
3818}
3819
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003820static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003821{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003822 const char *cname;
3823 struct property *prop;
3824 int i, ret = 0;
3825 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003826
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003827 pwr->num_gdscs =
3828 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3829
3830 if (pwr->num_gdscs < 1) {
3831 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003832 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003833 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003834
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003835 pwr->gdscs = devm_kzalloc(
3836 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3837
3838 if (!pwr->gdscs)
3839 return -ENOMEM;
3840
Prakash Guptafad87ca2017-05-16 12:13:02 +05303841 if (!of_property_read_u32(dev->of_node,
3842 "qcom,deferred-regulator-disable-delay",
3843 &(pwr->regulator_defer)))
3844 dev_info(dev, "regulator defer delay %d\n",
3845 pwr->regulator_defer);
3846
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003847 i = 0;
3848 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3849 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07003850 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003851
3852 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3853 return ret;
3854}
3855
3856static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3857{
3858 struct device *dev = pwr->dev;
3859
3860 /* We don't want the bus APIs to print an error message */
3861 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3862 dev_dbg(dev, "No bus scaling info\n");
3863 return 0;
3864 }
3865
3866 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3867 if (!pwr->bus_dt_data) {
3868 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3869 return -EINVAL;
3870 }
3871
3872 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3873 if (!pwr->bus_client) {
3874 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003875 return -EINVAL;
3876 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003877
3878 return 0;
3879}
3880
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003881/*
3882 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3883 */
3884static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3885 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003886{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003887 struct arm_smmu_power_resources *pwr;
3888 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003889
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003890 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3891 if (!pwr)
3892 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003893
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003894 pwr->dev = &pdev->dev;
3895 pwr->pdev = pdev;
3896 mutex_init(&pwr->power_lock);
3897 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003898
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003899 ret = arm_smmu_init_clocks(pwr);
3900 if (ret)
3901 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003902
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003903 ret = arm_smmu_init_regulators(pwr);
3904 if (ret)
3905 return ERR_PTR(ret);
3906
3907 ret = arm_smmu_init_bus_scaling(pwr);
3908 if (ret)
3909 return ERR_PTR(ret);
3910
3911 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003912}
3913
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003914/*
Patrick Dalyabeee952017-04-13 18:14:59 -07003915 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003916 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003917static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003918{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003919 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003920}
3921
Will Deacon45ae7cf2013-06-24 18:31:25 +01003922static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3923{
3924 unsigned long size;
3925 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3926 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003927 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003928 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003929
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303930 if (arm_smmu_restore_sec_cfg(smmu))
3931 return -ENODEV;
3932
Mitchel Humpherysba822582015-10-20 11:37:41 -07003933 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3934 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003935 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003936
3937 /* ID0 */
3938 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003939
3940 /* Restrict available stages based on module parameter */
3941 if (force_stage == 1)
3942 id &= ~(ID0_S2TS | ID0_NTS);
3943 else if (force_stage == 2)
3944 id &= ~(ID0_S1TS | ID0_NTS);
3945
Will Deacon45ae7cf2013-06-24 18:31:25 +01003946 if (id & ID0_S1TS) {
3947 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003948 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003949 }
3950
3951 if (id & ID0_S2TS) {
3952 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003953 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003954 }
3955
3956 if (id & ID0_NTS) {
3957 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003958 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003959 }
3960
3961 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003962 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003963 dev_err(smmu->dev, "\tno translation support!\n");
3964 return -ENODEV;
3965 }
3966
Robin Murphyb7862e32016-04-13 18:13:03 +01003967 if ((id & ID0_S1TS) &&
3968 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003969 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003970 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003971 }
3972
Robin Murphybae2c2d2015-07-29 19:46:05 +01003973 /*
3974 * In order for DMA API calls to work properly, we must defer to what
3975 * the DT says about coherency, regardless of what the hardware claims.
3976 * Fortunately, this also opens up a workaround for systems where the
3977 * ID register value has ended up configured incorrectly.
3978 */
3979 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3980 cttw_reg = !!(id & ID0_CTTW);
3981 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003982 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003983 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003984 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003985 cttw_dt ? "" : "non-");
3986 if (cttw_dt != cttw_reg)
3987 dev_notice(smmu->dev,
3988 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003989
Robin Murphy53867802016-09-12 17:13:48 +01003990 /* Max. number of entries we have for stream matching/indexing */
3991 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3992 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003993 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003994 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08003995 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003996
3997 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003998 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3999 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004000 dev_err(smmu->dev,
4001 "stream-matching supported, but no SMRs present!\n");
4002 return -ENODEV;
4003 }
4004
Robin Murphy53867802016-09-12 17:13:48 +01004005 /*
4006 * SMR.ID bits may not be preserved if the corresponding MASK
4007 * bits are set, so check each one separately. We can reject
4008 * masters later if they try to claim IDs outside these masks.
4009 */
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304010 if (!arm_smmu_is_static_cb(smmu)) {
4011 for (i = 0; i < size; i++) {
4012 smr = readl_relaxed(
4013 gr0_base + ARM_SMMU_GR0_SMR(i));
4014 if (!(smr & SMR_VALID))
4015 break;
4016 }
4017 if (i == size) {
4018 dev_err(smmu->dev,
4019 "Unable to compute streamid_masks\n");
4020 return -ENODEV;
4021 }
4022
4023 smr = smmu->streamid_mask << SMR_ID_SHIFT;
4024 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
Patrick Daly937de532016-12-12 18:44:09 -08004025 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304026 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08004027
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304028 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
4029 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
4030 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
4031 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
4032 } else {
4033 smmu->smr_mask_mask = SMR_MASK_MASK;
4034 smmu->streamid_mask = SID_MASK;
4035 }
Dhaval Patel031d7462015-05-09 14:47:29 -07004036
Robin Murphy468f4942016-09-12 17:13:49 +01004037 /* Zero-initialised to mark as invalid */
4038 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
4039 GFP_KERNEL);
4040 if (!smmu->smrs)
4041 return -ENOMEM;
4042
Robin Murphy53867802016-09-12 17:13:48 +01004043 dev_notice(smmu->dev,
4044 "\tstream matching with %lu register groups, mask 0x%x",
4045 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004046 }
Robin Murphya754fd12016-09-12 17:13:50 +01004047 /* s2cr->type == 0 means translation, so initialise explicitly */
4048 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
4049 GFP_KERNEL);
4050 if (!smmu->s2crs)
4051 return -ENOMEM;
4052 for (i = 0; i < size; i++)
4053 smmu->s2crs[i] = s2cr_init_val;
4054
Robin Murphy53867802016-09-12 17:13:48 +01004055 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01004056 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004057
Robin Murphy7602b872016-04-28 17:12:09 +01004058 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
4059 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
4060 if (!(id & ID0_PTFS_NO_AARCH32S))
4061 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
4062 }
4063
Will Deacon45ae7cf2013-06-24 18:31:25 +01004064 /* ID1 */
4065 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01004066 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004067
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01004068 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00004069 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01004070 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01004071 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07004072 dev_warn(smmu->dev,
4073 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
4074 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004075
Will Deacon518f7132014-11-14 17:17:54 +00004076 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004077 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
4078 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
4079 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
4080 return -ENODEV;
4081 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07004082 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01004083 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01004084 /*
4085 * Cavium CN88xx erratum #27704.
4086 * Ensure ASID and VMID allocation is unique across all SMMUs in
4087 * the system.
4088 */
4089 if (smmu->model == CAVIUM_SMMUV2) {
4090 smmu->cavium_id_base =
4091 atomic_add_return(smmu->num_context_banks,
4092 &cavium_smmu_context_count);
4093 smmu->cavium_id_base -= smmu->num_context_banks;
4094 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004095
4096 /* ID2 */
4097 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
4098 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004099 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004100
Will Deacon518f7132014-11-14 17:17:54 +00004101 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01004102 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004103 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004104
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08004105 if (id & ID2_VMID16)
4106 smmu->features |= ARM_SMMU_FEAT_VMID16;
4107
Robin Murphyf1d84542015-03-04 16:41:05 +00004108 /*
4109 * What the page table walker can address actually depends on which
4110 * descriptor format is in use, but since a) we don't know that yet,
4111 * and b) it can vary per context bank, this will have to do...
4112 */
4113 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
4114 dev_warn(smmu->dev,
4115 "failed to set DMA mask for table walker\n");
4116
Robin Murphyb7862e32016-04-13 18:13:03 +01004117 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00004118 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01004119 if (smmu->version == ARM_SMMU_V1_64K)
4120 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004121 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004122 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00004123 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00004124 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01004125 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00004126 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01004127 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00004128 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01004129 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004130 }
4131
Robin Murphy7602b872016-04-28 17:12:09 +01004132 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01004133 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01004134 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01004135 if (smmu->features &
4136 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01004137 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01004138 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01004139 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01004140 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01004141 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01004142
Robin Murphyd5466352016-05-09 17:20:09 +01004143 if (arm_smmu_ops.pgsize_bitmap == -1UL)
4144 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
4145 else
4146 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004147 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01004148 smmu->pgsize_bitmap);
4149
Will Deacon518f7132014-11-14 17:17:54 +00004150
Will Deacon28d60072014-09-01 16:24:48 +01004151 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004152 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
4153 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004154
4155 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004156 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
4157 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004158
Will Deacon45ae7cf2013-06-24 18:31:25 +01004159 return 0;
4160}
4161
Robin Murphy67b65a32016-04-13 18:12:57 +01004162struct arm_smmu_match_data {
4163 enum arm_smmu_arch_version version;
4164 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004165 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01004166};
4167
Patrick Dalyd7476202016-09-08 18:23:28 -07004168#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
4169static struct arm_smmu_match_data name = { \
4170.version = ver, \
4171.model = imp, \
4172.arch_ops = ops, \
4173} \
Robin Murphy67b65a32016-04-13 18:12:57 +01004174
Patrick Daly1f8a2882016-09-12 17:32:05 -07004175struct arm_smmu_arch_ops qsmmuv500_arch_ops;
4176
Patrick Dalyd7476202016-09-08 18:23:28 -07004177ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
4178ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
4179ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
4180ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
4181ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07004182ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004183ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
4184 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01004185
Joerg Roedel09b52692014-10-02 12:24:45 +02004186static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01004187 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
4188 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
4189 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01004190 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01004191 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01004192 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07004193 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07004194 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01004195 { },
4196};
4197MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
4198
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304199#ifdef CONFIG_MSM_TZ_SMMU
4200int register_iommu_sec_ptbl(void)
4201{
4202 struct device_node *np;
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004203
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304204 for_each_matching_node(np, arm_smmu_of_match)
4205 if (of_find_property(np, "qcom,tz-device-id", NULL) &&
4206 of_device_is_available(np))
4207 break;
4208 if (!np)
4209 return -ENODEV;
4210
4211 of_node_put(np);
4212
4213 return msm_iommu_sec_pgtbl_init();
4214}
4215#endif
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004216static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
4217{
4218 if (!dev->iommu_fwspec)
4219 of_iommu_configure(dev, dev->of_node);
4220 return 0;
4221}
4222
Patrick Daly000a2f22017-02-13 22:18:12 -08004223static int arm_smmu_add_device_fixup(struct device *dev, void *data)
4224{
4225 struct iommu_ops *ops = data;
4226
4227 ops->add_device(dev);
4228 return 0;
4229}
4230
Patrick Daly1f8a2882016-09-12 17:32:05 -07004231static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004232static int arm_smmu_device_dt_probe(struct platform_device *pdev)
4233{
Robin Murphy67b65a32016-04-13 18:12:57 +01004234 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004235 struct resource *res;
4236 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004237 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01004238 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01004239 bool legacy_binding;
4240
4241 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
4242 if (legacy_binding && !using_generic_binding) {
4243 if (!using_legacy_binding)
4244 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
4245 using_legacy_binding = true;
4246 } else if (!legacy_binding && !using_legacy_binding) {
4247 using_generic_binding = true;
4248 } else {
4249 dev_err(dev, "not probing due to mismatched DT properties\n");
4250 return -ENODEV;
4251 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004252
4253 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4254 if (!smmu) {
4255 dev_err(dev, "failed to allocate arm_smmu_device\n");
4256 return -ENOMEM;
4257 }
4258 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08004259 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07004260 idr_init(&smmu->asid_idr);
4261 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004262
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004263 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01004264 smmu->version = data->version;
4265 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004266 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01004267
Will Deacon45ae7cf2013-06-24 18:31:25 +01004268 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304269 if (res)
4270 smmu->phys_addr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01004271 smmu->base = devm_ioremap_resource(dev, res);
4272 if (IS_ERR(smmu->base))
4273 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004274 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004275
4276 if (of_property_read_u32(dev->of_node, "#global-interrupts",
4277 &smmu->num_global_irqs)) {
4278 dev_err(dev, "missing #global-interrupts property\n");
4279 return -ENODEV;
4280 }
4281
4282 num_irqs = 0;
4283 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
4284 num_irqs++;
4285 if (num_irqs > smmu->num_global_irqs)
4286 smmu->num_context_irqs++;
4287 }
4288
Andreas Herrmann44a08de2013-10-01 13:39:07 +01004289 if (!smmu->num_context_irqs) {
4290 dev_err(dev, "found %d interrupts but expected at least %d\n",
4291 num_irqs, smmu->num_global_irqs + 1);
4292 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004293 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004294
4295 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
4296 GFP_KERNEL);
4297 if (!smmu->irqs) {
4298 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
4299 return -ENOMEM;
4300 }
4301
4302 for (i = 0; i < num_irqs; ++i) {
4303 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07004304
Will Deacon45ae7cf2013-06-24 18:31:25 +01004305 if (irq < 0) {
4306 dev_err(dev, "failed to get irq index %d\n", i);
4307 return -ENODEV;
4308 }
4309 smmu->irqs[i] = irq;
4310 }
4311
Dhaval Patel031d7462015-05-09 14:47:29 -07004312 parse_driver_options(smmu);
4313
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004314 smmu->pwr = arm_smmu_init_power_resources(pdev);
4315 if (IS_ERR(smmu->pwr))
4316 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004317
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004318 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004319 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004320 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004321
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304322 smmu->sec_id = msm_dev_to_device_id(dev);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004323 err = arm_smmu_device_cfg_probe(smmu);
4324 if (err)
4325 goto out_power_off;
4326
Patrick Dalyda688822017-05-17 20:12:48 -07004327 err = arm_smmu_handoff_cbs(smmu);
4328 if (err)
4329 goto out_power_off;
4330
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004331 err = arm_smmu_parse_impl_def_registers(smmu);
4332 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004333 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004334
Robin Murphyb7862e32016-04-13 18:13:03 +01004335 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004336 smmu->num_context_banks != smmu->num_context_irqs) {
4337 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004338 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4339 smmu->num_context_irqs, smmu->num_context_banks,
4340 smmu->num_context_banks);
4341 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004342 }
4343
Will Deacon45ae7cf2013-06-24 18:31:25 +01004344 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004345 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4346 NULL, arm_smmu_global_fault,
4347 IRQF_ONESHOT | IRQF_SHARED,
4348 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004349 if (err) {
4350 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4351 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004352 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004353 }
4354 }
4355
Patrick Dalyd7476202016-09-08 18:23:28 -07004356 err = arm_smmu_arch_init(smmu);
4357 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004358 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004359
Robin Murphy06e393e2016-09-12 17:13:55 +01004360 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004361 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004362 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004363 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004364
Patrick Daly8e3371a2017-02-13 22:14:53 -08004365 INIT_LIST_HEAD(&smmu->list);
4366 spin_lock(&arm_smmu_devices_lock);
4367 list_add(&smmu->list, &arm_smmu_devices);
4368 spin_unlock(&arm_smmu_devices_lock);
4369
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004370 /* bus_set_iommu depends on this. */
4371 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4372 arm_smmu_of_iommu_configure_fixup);
4373
Robin Murphy7e96c742016-09-14 15:26:46 +01004374 /* Oh, for a proper bus abstraction */
4375 if (!iommu_present(&platform_bus_type))
4376 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004377 else
4378 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4379 arm_smmu_add_device_fixup);
Robin Murphy7e96c742016-09-14 15:26:46 +01004380#ifdef CONFIG_ARM_AMBA
4381 if (!iommu_present(&amba_bustype))
4382 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4383#endif
4384#ifdef CONFIG_PCI
4385 if (!iommu_present(&pci_bus_type)) {
4386 pci_request_acs();
4387 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4388 }
4389#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004390 return 0;
4391
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004392out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004393 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004394
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004395out_exit_power_resources:
4396 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004397
Will Deacon45ae7cf2013-06-24 18:31:25 +01004398 return err;
4399}
4400
4401static int arm_smmu_device_remove(struct platform_device *pdev)
4402{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004403 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004404
4405 if (!smmu)
4406 return -ENODEV;
4407
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004408 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004409 return -EINVAL;
4410
Will Deaconecfadb62013-07-31 19:21:28 +01004411 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004412 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004413
Patrick Dalyc190d932016-08-30 17:23:28 -07004414 idr_destroy(&smmu->asid_idr);
4415
Will Deacon45ae7cf2013-06-24 18:31:25 +01004416 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004417 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004418 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004419
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004420 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004421
Will Deacon45ae7cf2013-06-24 18:31:25 +01004422 return 0;
4423}
4424
Will Deacon45ae7cf2013-06-24 18:31:25 +01004425static struct platform_driver arm_smmu_driver = {
4426 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004427 .name = "arm-smmu",
4428 .of_match_table = of_match_ptr(arm_smmu_of_match),
4429 },
4430 .probe = arm_smmu_device_dt_probe,
4431 .remove = arm_smmu_device_remove,
4432};
4433
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004434static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004435static int __init arm_smmu_init(void)
4436{
Robin Murphy7e96c742016-09-14 15:26:46 +01004437 static bool registered;
4438 int ret = 0;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004439 ktime_t cur;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004440
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004441 if (registered)
4442 return 0;
4443
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004444 cur = ktime_get();
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004445 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4446 if (ret)
4447 return ret;
4448
4449 ret = platform_driver_register(&arm_smmu_driver);
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304450#ifdef CONFIG_MSM_TZ_SMMU
4451 ret = register_iommu_sec_ptbl();
4452#endif
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004453 registered = !ret;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004454 trace_smmu_init(ktime_us_delta(ktime_get(), cur));
4455
Robin Murphy7e96c742016-09-14 15:26:46 +01004456 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004457}
4458
4459static void __exit arm_smmu_exit(void)
4460{
4461 return platform_driver_unregister(&arm_smmu_driver);
4462}
4463
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004464subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004465module_exit(arm_smmu_exit);
4466
Robin Murphy7e96c742016-09-14 15:26:46 +01004467static int __init arm_smmu_of_init(struct device_node *np)
4468{
4469 int ret = arm_smmu_init();
4470
4471 if (ret)
4472 return ret;
4473
4474 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4475 return -ENODEV;
4476
4477 return 0;
4478}
4479IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4480IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4481IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4482IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4483IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4484IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004485
Patrick Dalya0fddb62017-03-27 19:26:59 -07004486#define TCU_HW_VERSION_HLOS1 (0x18)
4487
Patrick Daly1f8a2882016-09-12 17:32:05 -07004488#define DEBUG_SID_HALT_REG 0x0
4489#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004490#define DEBUG_SID_HALT_SID_MASK 0x3ff
4491
4492#define DEBUG_VA_ADDR_REG 0x8
4493
4494#define DEBUG_TXN_TRIGG_REG 0x18
4495#define DEBUG_TXN_AXPROT_SHIFT 6
4496#define DEBUG_TXN_AXCACHE_SHIFT 2
4497#define DEBUG_TRX_WRITE (0x1 << 1)
4498#define DEBUG_TXN_READ (0x0 << 1)
4499#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004500
4501#define DEBUG_SR_HALT_ACK_REG 0x20
4502#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004503#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4504
4505#define DEBUG_PAR_REG 0x28
4506#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4507#define DEBUG_PAR_PA_SHIFT 12
4508#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004509
Patrick Daly8c1202b2017-05-10 15:42:30 -07004510#define TBU_DBG_TIMEOUT_US 100
Patrick Daly1f8a2882016-09-12 17:32:05 -07004511
Patrick Daly23301482017-10-12 16:18:25 -07004512#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK 0x3
4513#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT 0x8
4514
Patrick Daly03330cc2017-08-11 14:56:38 -07004515
4516struct actlr_setting {
4517 struct arm_smmu_smr smr;
4518 u32 actlr;
4519};
4520
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004521struct qsmmuv500_archdata {
4522 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004523 void __iomem *tcu_base;
4524 u32 version;
Patrick Dalyda765c62017-09-11 16:31:07 -07004525
4526 struct actlr_setting *actlrs;
4527 u32 actlr_tbl_size;
4528
4529 struct arm_smmu_smr *errata1_clients;
4530 u32 num_errata1_clients;
4531 remote_spinlock_t errata1_lock;
4532 ktime_t last_tlbi_ktime;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004533};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004534#define get_qsmmuv500_archdata(smmu) \
4535 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004536
Patrick Daly1f8a2882016-09-12 17:32:05 -07004537struct qsmmuv500_tbu_device {
4538 struct list_head list;
4539 struct device *dev;
4540 struct arm_smmu_device *smmu;
4541 void __iomem *base;
4542 void __iomem *status_reg;
4543
4544 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004545 u32 sid_start;
4546 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004547
4548 /* Protects halt count */
4549 spinlock_t halt_lock;
4550 u32 halt_count;
4551};
4552
Patrick Daly03330cc2017-08-11 14:56:38 -07004553struct qsmmuv500_group_iommudata {
4554 bool has_actlr;
4555 u32 actlr;
4556};
4557#define to_qsmmuv500_group_iommudata(group) \
4558 ((struct qsmmuv500_group_iommudata *) \
4559 (iommu_group_get_iommudata(group)))
4560
4561
4562static bool arm_smmu_fwspec_match_smr(struct iommu_fwspec *fwspec,
Patrick Dalyda765c62017-09-11 16:31:07 -07004563 struct arm_smmu_smr *smr)
4564{
4565 struct arm_smmu_smr *smr2;
Patrick Daly03330cc2017-08-11 14:56:38 -07004566 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Patrick Dalyda765c62017-09-11 16:31:07 -07004567 int i, idx;
4568
Patrick Daly03330cc2017-08-11 14:56:38 -07004569 for_each_cfg_sme(fwspec, i, idx) {
4570 smr2 = &smmu->smrs[idx];
Patrick Dalyda765c62017-09-11 16:31:07 -07004571 /* Continue if table entry does not match */
4572 if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
4573 continue;
4574 return true;
4575 }
4576 return false;
4577}
4578
4579#define ERRATA1_REMOTE_SPINLOCK "S:6"
4580#define ERRATA1_TLBI_INTERVAL_US 10
4581static bool
4582qsmmuv500_errata1_required(struct arm_smmu_domain *smmu_domain,
4583 struct qsmmuv500_archdata *data)
4584{
4585 bool ret = false;
4586 int j;
4587 struct arm_smmu_smr *smr;
Patrick Daly03330cc2017-08-11 14:56:38 -07004588 struct iommu_fwspec *fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004589
4590 if (smmu_domain->qsmmuv500_errata1_init)
4591 return smmu_domain->qsmmuv500_errata1_client;
4592
Patrick Daly03330cc2017-08-11 14:56:38 -07004593 fwspec = smmu_domain->dev->iommu_fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004594 for (j = 0; j < data->num_errata1_clients; j++) {
4595 smr = &data->errata1_clients[j];
Patrick Daly03330cc2017-08-11 14:56:38 -07004596 if (arm_smmu_fwspec_match_smr(fwspec, smr)) {
Patrick Dalyda765c62017-09-11 16:31:07 -07004597 ret = true;
4598 break;
4599 }
4600 }
4601
4602 smmu_domain->qsmmuv500_errata1_init = true;
4603 smmu_domain->qsmmuv500_errata1_client = ret;
4604 return ret;
4605}
4606
Patrick Daly86960052017-12-04 18:53:13 -08004607#define SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
4608#define SCM_CONFIG_ERRATA1 0x3
Patrick Dalyda765c62017-09-11 16:31:07 -07004609static void __qsmmuv500_errata1_tlbiall(struct arm_smmu_domain *smmu_domain)
4610{
4611 struct arm_smmu_device *smmu = smmu_domain->smmu;
4612 struct device *dev = smmu_domain->dev;
4613 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4614 void __iomem *base;
Patrick Daly86960052017-12-04 18:53:13 -08004615 int ret;
Patrick Dalyda765c62017-09-11 16:31:07 -07004616 ktime_t cur;
4617 u32 val;
Patrick Daly86960052017-12-04 18:53:13 -08004618 struct scm_desc desc = {
4619 .args[0] = SCM_CONFIG_ERRATA1_CLIENT_ALL,
4620 .args[1] = false,
4621 .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
4622 };
Patrick Dalyda765c62017-09-11 16:31:07 -07004623
4624 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4625 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
4626 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
Patrick Daly86960052017-12-04 18:53:13 -08004627 if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
4628 !(val & TLBSTATUS_SACTIVE), 0, 100))
4629 return;
4630
4631 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
4632 SCM_CONFIG_ERRATA1),
4633 &desc);
4634 if (ret) {
4635 dev_err(smmu->dev, "Calling into TZ to disable ERRATA1 failed - IOMMU hardware in bad state\n");
4636 BUG();
4637 return;
4638 }
4639
4640 cur = ktime_get();
4641 trace_tlbi_throttle_start(dev, 0);
4642 msm_bus_noc_throttle_wa(true);
4643
Patrick Dalyda765c62017-09-11 16:31:07 -07004644 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
Patrick Daly86960052017-12-04 18:53:13 -08004645 !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
4646 dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout - IOMMU hardware in bad state");
4647 trace_tlbsync_timeout(dev, 0);
4648 BUG();
4649 }
Patrick Dalyda765c62017-09-11 16:31:07 -07004650
Patrick Daly86960052017-12-04 18:53:13 -08004651 msm_bus_noc_throttle_wa(false);
4652 trace_tlbi_throttle_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07004653
Patrick Daly86960052017-12-04 18:53:13 -08004654 desc.args[1] = true;
4655 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
4656 SCM_CONFIG_ERRATA1),
4657 &desc);
4658 if (ret) {
4659 dev_err(smmu->dev, "Calling into TZ to reenable ERRATA1 failed - IOMMU hardware in bad state\n");
4660 BUG();
Patrick Dalyda765c62017-09-11 16:31:07 -07004661 }
4662}
4663
4664/* Must be called with clocks/regulators enabled */
4665static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
4666{
4667 struct arm_smmu_domain *smmu_domain = cookie;
4668 struct device *dev = smmu_domain->dev;
4669 struct qsmmuv500_archdata *data =
4670 get_qsmmuv500_archdata(smmu_domain->smmu);
4671 ktime_t cur;
Patrick Daly1faa3112017-10-31 16:40:40 -07004672 unsigned long flags;
Patrick Dalyda765c62017-09-11 16:31:07 -07004673 bool errata;
4674
4675 cur = ktime_get();
Prakash Gupta25f90512017-11-20 14:56:54 +05304676 trace_tlbi_start(dev, 0);
Patrick Dalyda765c62017-09-11 16:31:07 -07004677
4678 errata = qsmmuv500_errata1_required(smmu_domain, data);
Patrick Daly1faa3112017-10-31 16:40:40 -07004679 remote_spin_lock_irqsave(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07004680 if (errata) {
4681 s64 delta;
4682
4683 delta = ktime_us_delta(ktime_get(), data->last_tlbi_ktime);
4684 if (delta < ERRATA1_TLBI_INTERVAL_US)
4685 udelay(ERRATA1_TLBI_INTERVAL_US - delta);
4686
4687 __qsmmuv500_errata1_tlbiall(smmu_domain);
4688
4689 data->last_tlbi_ktime = ktime_get();
4690 } else {
4691 __qsmmuv500_errata1_tlbiall(smmu_domain);
4692 }
Patrick Daly1faa3112017-10-31 16:40:40 -07004693 remote_spin_unlock_irqrestore(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07004694
Prakash Gupta25f90512017-11-20 14:56:54 +05304695 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07004696}
4697
4698static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
4699 .tlb_flush_all = qsmmuv500_errata1_tlb_inv_context,
4700 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
4701 .free_pages_exact = arm_smmu_free_pages_exact,
4702};
4703
Patrick Daly8c1202b2017-05-10 15:42:30 -07004704static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
4705 struct arm_smmu_domain *smmu_domain)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004706{
4707 unsigned long flags;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004708 u32 halt, fsr, sctlr_orig, sctlr, status;
4709 void __iomem *base, *cb_base;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004710
4711 spin_lock_irqsave(&tbu->halt_lock, flags);
4712 if (tbu->halt_count) {
4713 tbu->halt_count++;
4714 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4715 return 0;
4716 }
4717
Patrick Daly8c1202b2017-05-10 15:42:30 -07004718 cb_base = ARM_SMMU_CB_BASE(smmu_domain->smmu) +
4719 ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004720 base = tbu->base;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004721 halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
4722 halt |= DEBUG_SID_HALT_VAL;
4723 writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004724
Patrick Daly8c1202b2017-05-10 15:42:30 -07004725 if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
4726 (status & DEBUG_SR_HALT_ACK_VAL),
4727 0, TBU_DBG_TIMEOUT_US))
4728 goto out;
4729
4730 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4731 if (!(fsr & FSR_FAULT)) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004732 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4733 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4734 return -ETIMEDOUT;
4735 }
4736
Patrick Daly8c1202b2017-05-10 15:42:30 -07004737 /*
4738 * We are in a fault; Our request to halt the bus will not complete
4739 * until transactions in front of us (such as the fault itself) have
4740 * completed. Disable iommu faults and terminate any existing
4741 * transactions.
4742 */
4743 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4744 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4745 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4746
4747 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4748 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4749
4750 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
4751 (status & DEBUG_SR_HALT_ACK_VAL),
4752 0, TBU_DBG_TIMEOUT_US)) {
4753 dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
4754 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4755 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4756 return -ETIMEDOUT;
4757 }
4758
4759 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4760out:
Patrick Daly1f8a2882016-09-12 17:32:05 -07004761 tbu->halt_count = 1;
4762 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4763 return 0;
4764}
4765
4766static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4767{
4768 unsigned long flags;
4769 u32 val;
4770 void __iomem *base;
4771
4772 spin_lock_irqsave(&tbu->halt_lock, flags);
4773 if (!tbu->halt_count) {
4774 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4775 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4776 return;
4777
4778 } else if (tbu->halt_count > 1) {
4779 tbu->halt_count--;
4780 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4781 return;
4782 }
4783
4784 base = tbu->base;
4785 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4786 val &= ~DEBUG_SID_HALT_VAL;
4787 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4788
4789 tbu->halt_count = 0;
4790 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4791}
4792
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004793static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4794 struct arm_smmu_device *smmu, u32 sid)
4795{
4796 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004797 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004798
4799 list_for_each_entry(tbu, &data->tbus, list) {
4800 if (tbu->sid_start <= sid &&
4801 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004802 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004803 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004804 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004805}
4806
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004807static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
4808 struct qsmmuv500_tbu_device *tbu,
4809 unsigned long *flags)
4810{
4811 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004812 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004813 u32 val;
4814
4815 spin_lock_irqsave(&smmu->atos_lock, *flags);
4816 /* The status register is not accessible on version 1.0 */
4817 if (data->version == 0x01000000)
4818 return 0;
4819
4820 if (readl_poll_timeout_atomic(tbu->status_reg,
4821 val, (val == 0x1), 0,
4822 TBU_DBG_TIMEOUT_US)) {
4823 dev_err(tbu->dev, "ECATS hw busy!\n");
4824 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4825 return -ETIMEDOUT;
4826 }
4827
4828 return 0;
4829}
4830
4831static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
4832 struct qsmmuv500_tbu_device *tbu,
4833 unsigned long *flags)
4834{
4835 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004836 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004837
4838 /* The status register is not accessible on version 1.0 */
4839 if (data->version != 0x01000000)
4840 writel_relaxed(0, tbu->status_reg);
4841 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4842}
4843
4844/*
4845 * Zero means failure.
4846 */
4847static phys_addr_t qsmmuv500_iova_to_phys(
4848 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
4849{
4850 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4851 struct arm_smmu_device *smmu = smmu_domain->smmu;
4852 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4853 struct qsmmuv500_tbu_device *tbu;
4854 int ret;
4855 phys_addr_t phys = 0;
4856 u64 val, fsr;
4857 unsigned long flags;
4858 void __iomem *cb_base;
4859 u32 sctlr_orig, sctlr;
4860 int needs_redo = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004861 ktime_t timeout;
4862
4863 /* only 36 bit iova is supported */
4864 if (iova >= (1ULL << 36)) {
4865 dev_err_ratelimited(smmu->dev, "ECATS: address too large: %pad\n",
4866 &iova);
4867 return 0;
4868 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004869
4870 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4871 tbu = qsmmuv500_find_tbu(smmu, sid);
4872 if (!tbu)
4873 return 0;
4874
4875 ret = arm_smmu_power_on(tbu->pwr);
4876 if (ret)
4877 return 0;
4878
Patrick Daly8c1202b2017-05-10 15:42:30 -07004879 ret = qsmmuv500_tbu_halt(tbu, smmu_domain);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004880 if (ret)
4881 goto out_power_off;
4882
Patrick Daly8c1202b2017-05-10 15:42:30 -07004883 /*
4884 * ECATS can trigger the fault interrupt, so disable it temporarily
4885 * and check for an interrupt manually.
4886 */
4887 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4888 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4889 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4890
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004891 /* Only one concurrent atos operation */
4892 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
4893 if (ret)
4894 goto out_resume;
4895
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004896redo:
4897 /* Set address and stream-id */
4898 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
4899 val |= sid & DEBUG_SID_HALT_SID_MASK;
4900 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
4901 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
4902
4903 /*
4904 * Write-back Read and Write-Allocate
4905 * Priviledged, nonsecure, data transaction
4906 * Read operation.
4907 */
4908 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
4909 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
4910 val |= DEBUG_TXN_TRIGGER;
4911 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
4912
4913 ret = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004914 //based on readx_poll_timeout_atomic
4915 timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
4916 for (;;) {
4917 val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
4918 if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
4919 break;
4920 val = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4921 if (val & FSR_FAULT)
4922 break;
4923 if (ktime_compare(ktime_get(), timeout) > 0) {
4924 dev_err(tbu->dev, "ECATS translation timed out!\n");
4925 ret = -ETIMEDOUT;
4926 break;
4927 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004928 }
4929
4930 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4931 if (fsr & FSR_FAULT) {
4932 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
Patrick Daly8c1202b2017-05-10 15:42:30 -07004933 fsr);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004934 ret = -EINVAL;
4935
4936 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
4937 /*
4938 * Clear pending interrupts
4939 * Barrier required to ensure that the FSR is cleared
4940 * before resuming SMMU operation
4941 */
4942 wmb();
4943 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4944 }
4945
4946 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
4947 if (val & DEBUG_PAR_FAULT_VAL) {
4948 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
4949 val);
4950 ret = -EINVAL;
4951 }
4952
4953 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
4954 if (ret < 0)
4955 phys = 0;
4956
4957 /* Reset hardware */
4958 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
4959 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
4960
4961 /*
4962 * After a failed translation, the next successful translation will
4963 * incorrectly be reported as a failure.
4964 */
4965 if (!phys && needs_redo++ < 2)
4966 goto redo;
4967
4968 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4969 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
4970
4971out_resume:
4972 qsmmuv500_tbu_resume(tbu);
4973
4974out_power_off:
4975 arm_smmu_power_off(tbu->pwr);
4976
4977 return phys;
4978}
4979
4980static phys_addr_t qsmmuv500_iova_to_phys_hard(
4981 struct iommu_domain *domain, dma_addr_t iova)
4982{
4983 u16 sid;
4984 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4985 struct iommu_fwspec *fwspec;
4986
4987 /* Select a sid */
4988 fwspec = smmu_domain->dev->iommu_fwspec;
4989 sid = (u16)fwspec->ids[0];
4990
4991 return qsmmuv500_iova_to_phys(domain, iova, sid);
4992}
4993
Patrick Daly03330cc2017-08-11 14:56:38 -07004994static void qsmmuv500_release_group_iommudata(void *data)
4995{
4996 kfree(data);
4997}
4998
4999/* If a device has a valid actlr, it must match */
5000static int qsmmuv500_device_group(struct device *dev,
5001 struct iommu_group *group)
5002{
5003 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
5004 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
5005 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5006 struct qsmmuv500_group_iommudata *iommudata;
5007 u32 actlr, i;
5008 struct arm_smmu_smr *smr;
5009
5010 iommudata = to_qsmmuv500_group_iommudata(group);
5011 if (!iommudata) {
5012 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
5013 if (!iommudata)
5014 return -ENOMEM;
5015
5016 iommu_group_set_iommudata(group, iommudata,
5017 qsmmuv500_release_group_iommudata);
5018 }
5019
5020 for (i = 0; i < data->actlr_tbl_size; i++) {
5021 smr = &data->actlrs[i].smr;
5022 actlr = data->actlrs[i].actlr;
5023
5024 if (!arm_smmu_fwspec_match_smr(fwspec, smr))
5025 continue;
5026
5027 if (!iommudata->has_actlr) {
5028 iommudata->actlr = actlr;
5029 iommudata->has_actlr = true;
5030 } else if (iommudata->actlr != actlr) {
5031 return -EINVAL;
5032 }
5033 }
5034
5035 return 0;
5036}
5037
5038static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
5039 struct device *dev)
5040{
5041 struct arm_smmu_device *smmu = smmu_domain->smmu;
5042 struct qsmmuv500_group_iommudata *iommudata =
5043 to_qsmmuv500_group_iommudata(dev->iommu_group);
5044 void __iomem *cb_base;
5045 const struct iommu_gather_ops *tlb;
5046
5047 if (!iommudata->has_actlr)
5048 return;
5049
5050 tlb = smmu_domain->pgtbl_cfg.tlb;
5051 cb_base = ARM_SMMU_CB_BASE(smmu) +
5052 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
5053
5054 writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR);
5055
5056 /*
Patrick Daly23301482017-10-12 16:18:25 -07005057 * Prefetch only works properly if the start and end of all
5058 * buffers in the page table are aligned to 16 Kb.
5059 */
Patrick Daly27bd9292017-11-22 13:59:59 -08005060 if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
Patrick Daly23301482017-10-12 16:18:25 -07005061 QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
5062 smmu_domain->qsmmuv500_errata2_min_align = true;
5063
5064 /*
Patrick Daly03330cc2017-08-11 14:56:38 -07005065 * Flush the context bank after modifying ACTLR to ensure there
5066 * are no cache entries with stale state
5067 */
5068 tlb->tlb_flush_all(smmu_domain);
5069}
5070
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005071static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005072{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005073 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005074 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005075 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005076
5077 if (!dev->driver) {
5078 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
5079 return -EINVAL;
5080 }
5081
5082 tbu = dev_get_drvdata(dev);
5083
5084 INIT_LIST_HEAD(&tbu->list);
5085 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005086 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005087 return 0;
5088}
5089
Patrick Dalyda765c62017-09-11 16:31:07 -07005090static int qsmmuv500_parse_errata1(struct arm_smmu_device *smmu)
5091{
5092 int len, i;
5093 struct device *dev = smmu->dev;
5094 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5095 struct arm_smmu_smr *smrs;
5096 const __be32 *cell;
5097
5098 cell = of_get_property(dev->of_node, "qcom,mmu500-errata-1", NULL);
5099 if (!cell)
5100 return 0;
5101
5102 remote_spin_lock_init(&data->errata1_lock, ERRATA1_REMOTE_SPINLOCK);
5103 len = of_property_count_elems_of_size(
5104 dev->of_node, "qcom,mmu500-errata-1", sizeof(u32) * 2);
5105 if (len < 0)
5106 return 0;
5107
5108 smrs = devm_kzalloc(dev, sizeof(*smrs) * len, GFP_KERNEL);
5109 if (!smrs)
5110 return -ENOMEM;
5111
5112 for (i = 0; i < len; i++) {
5113 smrs[i].id = of_read_number(cell++, 1);
5114 smrs[i].mask = of_read_number(cell++, 1);
5115 }
5116
5117 data->errata1_clients = smrs;
5118 data->num_errata1_clients = len;
5119 return 0;
5120}
5121
Patrick Daly03330cc2017-08-11 14:56:38 -07005122static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
5123{
5124 int len, i;
5125 struct device *dev = smmu->dev;
5126 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5127 struct actlr_setting *actlrs;
5128 const __be32 *cell;
5129
5130 cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
5131 if (!cell)
5132 return 0;
5133
5134 len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
5135 sizeof(u32) * 3);
5136 if (len < 0)
5137 return 0;
5138
5139 actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
5140 if (!actlrs)
5141 return -ENOMEM;
5142
5143 for (i = 0; i < len; i++) {
5144 actlrs[i].smr.id = of_read_number(cell++, 1);
5145 actlrs[i].smr.mask = of_read_number(cell++, 1);
5146 actlrs[i].actlr = of_read_number(cell++, 1);
5147 }
5148
5149 data->actlrs = actlrs;
5150 data->actlr_tbl_size = len;
5151 return 0;
5152}
5153
Patrick Daly1f8a2882016-09-12 17:32:05 -07005154static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
5155{
Patrick Dalya0fddb62017-03-27 19:26:59 -07005156 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005157 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005158 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07005159 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005160 int ret;
Patrick Daly03330cc2017-08-11 14:56:38 -07005161 u32 val;
5162 void __iomem *reg;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005163
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005164 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
5165 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005166 return -ENOMEM;
5167
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005168 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07005169
5170 pdev = container_of(dev, struct platform_device, dev);
5171 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
5172 data->tcu_base = devm_ioremap_resource(dev, res);
5173 if (IS_ERR(data->tcu_base))
5174 return PTR_ERR(data->tcu_base);
5175
5176 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005177 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005178
Patrick Dalyda765c62017-09-11 16:31:07 -07005179 ret = qsmmuv500_parse_errata1(smmu);
5180 if (ret)
5181 return ret;
5182
Patrick Daly03330cc2017-08-11 14:56:38 -07005183 ret = qsmmuv500_read_actlr_tbl(smmu);
5184 if (ret)
5185 return ret;
5186
5187 reg = ARM_SMMU_GR0(smmu);
5188 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5189 val &= ~ARM_MMU500_ACR_CACHE_LOCK;
5190 writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
5191 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5192 /*
5193 * Modifiying the nonsecure copy of the sACR register is only
5194 * allowed if permission is given in the secure sACR register.
5195 * Attempt to detect if we were able to update the value.
5196 */
5197 WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
5198
Patrick Daly1f8a2882016-09-12 17:32:05 -07005199 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
5200 if (ret)
5201 return ret;
5202
5203 /* Attempt to register child devices */
5204 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
5205 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07005206 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005207
5208 return 0;
5209}
5210
5211struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
5212 .init = qsmmuv500_arch_init,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005213 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly03330cc2017-08-11 14:56:38 -07005214 .init_context_bank = qsmmuv500_init_cb,
5215 .device_group = qsmmuv500_device_group,
Patrick Daly1f8a2882016-09-12 17:32:05 -07005216};
5217
5218static const struct of_device_id qsmmuv500_tbu_of_match[] = {
5219 {.compatible = "qcom,qsmmuv500-tbu"},
5220 {}
5221};
5222
5223static int qsmmuv500_tbu_probe(struct platform_device *pdev)
5224{
5225 struct resource *res;
5226 struct device *dev = &pdev->dev;
5227 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005228 const __be32 *cell;
5229 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005230
5231 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
5232 if (!tbu)
5233 return -ENOMEM;
5234
5235 INIT_LIST_HEAD(&tbu->list);
5236 tbu->dev = dev;
5237 spin_lock_init(&tbu->halt_lock);
5238
5239 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
5240 tbu->base = devm_ioremap_resource(dev, res);
5241 if (IS_ERR(tbu->base))
5242 return PTR_ERR(tbu->base);
5243
5244 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
5245 tbu->status_reg = devm_ioremap_resource(dev, res);
5246 if (IS_ERR(tbu->status_reg))
5247 return PTR_ERR(tbu->status_reg);
5248
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005249 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
5250 if (!cell || len < 8)
5251 return -EINVAL;
5252
5253 tbu->sid_start = of_read_number(cell, 1);
5254 tbu->num_sids = of_read_number(cell + 1, 1);
5255
Patrick Daly1f8a2882016-09-12 17:32:05 -07005256 tbu->pwr = arm_smmu_init_power_resources(pdev);
5257 if (IS_ERR(tbu->pwr))
5258 return PTR_ERR(tbu->pwr);
5259
5260 dev_set_drvdata(dev, tbu);
5261 return 0;
5262}
5263
5264static struct platform_driver qsmmuv500_tbu_driver = {
5265 .driver = {
5266 .name = "qsmmuv500-tbu",
5267 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
5268 },
5269 .probe = qsmmuv500_tbu_probe,
5270};
5271
Will Deacon45ae7cf2013-06-24 18:31:25 +01005272MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
5273MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
5274MODULE_LICENSE("GPL v2");