blob: f0e510ffdf0e55dfe907d9083168062dbc34f6f0 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Daly86960052017-12-04 18:53:13 -080050#include <soc/qcom/scm.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070051#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070052#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070053#include <linux/msm-bus.h>
54#include <dt-bindings/msm/msm-bus-ids.h>
Patrick Dalyda765c62017-09-11 16:31:07 -070055#include <linux/remote_spinlock.h>
56#include <linux/ktime.h>
57#include <trace/events/iommu.h>
Charan Teja Reddyf8464882017-12-05 20:29:05 +053058#include <linux/notifier.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010059
60#include <linux/amba/bus.h>
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +053061#include <soc/qcom/msm_tz_smmu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010062
Will Deacon518f7132014-11-14 17:17:54 +000063#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Will Deacon45ae7cf2013-06-24 18:31:25 +010065/* Maximum number of context banks per SMMU */
66#define ARM_SMMU_MAX_CBS 128
67
Will Deacon45ae7cf2013-06-24 18:31:25 +010068/* SMMU global address space */
69#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010070#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010071
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000072/*
73 * SMMU global address space with conditional offset to access secure
74 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
75 * nsGFSYNR0: 0x450)
76 */
77#define ARM_SMMU_GR0_NS(smmu) \
78 ((smmu)->base + \
79 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
80 ? 0x400 : 0))
81
Robin Murphyf9a05f02016-04-13 18:13:01 +010082/*
83 * Some 64-bit registers only make sense to write atomically, but in such
84 * cases all the data relevant to AArch32 formats lies within the lower word,
85 * therefore this actually makes more sense than it might first appear.
86 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010087#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010088#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010089#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010090#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010091#endif
92
Will Deacon45ae7cf2013-06-24 18:31:25 +010093/* Configuration registers */
94#define ARM_SMMU_GR0_sCR0 0x0
95#define sCR0_CLIENTPD (1 << 0)
96#define sCR0_GFRE (1 << 1)
97#define sCR0_GFIE (1 << 2)
98#define sCR0_GCFGFRE (1 << 4)
99#define sCR0_GCFGFIE (1 << 5)
100#define sCR0_USFCFG (1 << 10)
101#define sCR0_VMIDPNE (1 << 11)
102#define sCR0_PTM (1 << 12)
103#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800104#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105#define sCR0_BSU_SHIFT 14
106#define sCR0_BSU_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700107#define sCR0_SHCFG_SHIFT 22
108#define sCR0_SHCFG_MASK 0x3
109#define sCR0_SHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100110
Peng Fan3ca37122016-05-03 21:50:30 +0800111/* Auxiliary Configuration register */
112#define ARM_SMMU_GR0_sACR 0x10
113
Will Deacon45ae7cf2013-06-24 18:31:25 +0100114/* Identification registers */
115#define ARM_SMMU_GR0_ID0 0x20
116#define ARM_SMMU_GR0_ID1 0x24
117#define ARM_SMMU_GR0_ID2 0x28
118#define ARM_SMMU_GR0_ID3 0x2c
119#define ARM_SMMU_GR0_ID4 0x30
120#define ARM_SMMU_GR0_ID5 0x34
121#define ARM_SMMU_GR0_ID6 0x38
122#define ARM_SMMU_GR0_ID7 0x3c
123#define ARM_SMMU_GR0_sGFSR 0x48
124#define ARM_SMMU_GR0_sGFSYNR0 0x50
125#define ARM_SMMU_GR0_sGFSYNR1 0x54
126#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100127
128#define ID0_S1TS (1 << 30)
129#define ID0_S2TS (1 << 29)
130#define ID0_NTS (1 << 28)
131#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000132#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100133#define ID0_PTFS_NO_AARCH32 (1 << 25)
134#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100135#define ID0_CTTW (1 << 14)
136#define ID0_NUMIRPT_SHIFT 16
137#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700138#define ID0_NUMSIDB_SHIFT 9
139#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100140#define ID0_NUMSMRG_SHIFT 0
141#define ID0_NUMSMRG_MASK 0xff
142
143#define ID1_PAGESIZE (1 << 31)
144#define ID1_NUMPAGENDXB_SHIFT 28
145#define ID1_NUMPAGENDXB_MASK 7
146#define ID1_NUMS2CB_SHIFT 16
147#define ID1_NUMS2CB_MASK 0xff
148#define ID1_NUMCB_SHIFT 0
149#define ID1_NUMCB_MASK 0xff
150
151#define ID2_OAS_SHIFT 4
152#define ID2_OAS_MASK 0xf
153#define ID2_IAS_SHIFT 0
154#define ID2_IAS_MASK 0xf
155#define ID2_UBS_SHIFT 8
156#define ID2_UBS_MASK 0xf
157#define ID2_PTFS_4K (1 << 12)
158#define ID2_PTFS_16K (1 << 13)
159#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800160#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100161
Peng Fan3ca37122016-05-03 21:50:30 +0800162#define ID7_MAJOR_SHIFT 4
163#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166#define ARM_SMMU_GR0_TLBIVMID 0x64
167#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
168#define ARM_SMMU_GR0_TLBIALLH 0x6c
169#define ARM_SMMU_GR0_sTLBGSYNC 0x70
170#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
171#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800172#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100173
174/* Stream mapping registers */
175#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
176#define SMR_VALID (1 << 31)
177#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700178#define SMR_MASK_MASK 0x7FFF
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530179#define SID_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100180#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100181
182#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
183#define S2CR_CBNDX_SHIFT 0
184#define S2CR_CBNDX_MASK 0xff
185#define S2CR_TYPE_SHIFT 16
186#define S2CR_TYPE_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700187#define S2CR_SHCFG_SHIFT 8
188#define S2CR_SHCFG_MASK 0x3
189#define S2CR_SHCFG_NSH 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100190enum arm_smmu_s2cr_type {
191 S2CR_TYPE_TRANS,
192 S2CR_TYPE_BYPASS,
193 S2CR_TYPE_FAULT,
194};
195
196#define S2CR_PRIVCFG_SHIFT 24
197#define S2CR_PRIVCFG_MASK 0x3
198enum arm_smmu_s2cr_privcfg {
199 S2CR_PRIVCFG_DEFAULT,
200 S2CR_PRIVCFG_DIPAN,
201 S2CR_PRIVCFG_UNPRIV,
202 S2CR_PRIVCFG_PRIV,
203};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100204
205/* Context bank attribute registers */
206#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
207#define CBAR_VMID_SHIFT 0
208#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000209#define CBAR_S1_BPSHCFG_SHIFT 8
210#define CBAR_S1_BPSHCFG_MASK 3
211#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212#define CBAR_S1_MEMATTR_SHIFT 12
213#define CBAR_S1_MEMATTR_MASK 0xf
214#define CBAR_S1_MEMATTR_WB 0xf
215#define CBAR_TYPE_SHIFT 16
216#define CBAR_TYPE_MASK 0x3
217#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
218#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
219#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
220#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
221#define CBAR_IRPTNDX_SHIFT 24
222#define CBAR_IRPTNDX_MASK 0xff
223
Shalaj Jain04059c52015-03-03 13:34:59 -0800224#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
225#define CBFRSYNRA_SID_MASK (0xffff)
226
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
228#define CBA2R_RW64_32BIT (0 << 0)
229#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800230#define CBA2R_VMID_SHIFT 16
231#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100232
233/* Translation context bank */
234#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100235#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100236
237#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100238#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100239#define ARM_SMMU_CB_RESUME 0x8
240#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100241#define ARM_SMMU_CB_TTBR0 0x20
242#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100243#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600244#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100245#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000246#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100247#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700249#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100250#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100251#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000252#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100253#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700254#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000255#define ARM_SMMU_CB_S1_TLBIVAL 0x620
256#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
257#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700258#define ARM_SMMU_CB_TLBSYNC 0x7f0
259#define ARM_SMMU_CB_TLBSTATUS 0x7f4
260#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100261#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000262#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100263
Patrick Daly7f377fe2017-10-06 17:37:10 -0700264#define SCTLR_SHCFG_SHIFT 22
265#define SCTLR_SHCFG_MASK 0x3
266#define SCTLR_SHCFG_NSH 0x3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267#define SCTLR_S1_ASIDPNE (1 << 12)
268#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530269#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270#define SCTLR_CFIE (1 << 6)
271#define SCTLR_CFRE (1 << 5)
272#define SCTLR_E (1 << 4)
273#define SCTLR_AFE (1 << 2)
274#define SCTLR_TRE (1 << 1)
275#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100276
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100277#define ARM_MMU500_ACTLR_CPRE (1 << 1)
278
Peng Fan3ca37122016-05-03 21:50:30 +0800279#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
280
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700281#define ARM_SMMU_IMPL_DEF0(smmu) \
282 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
283#define ARM_SMMU_IMPL_DEF1(smmu) \
284 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000285#define CB_PAR_F (1 << 0)
286
287#define ATSR_ACTIVE (1 << 0)
288
Will Deacon45ae7cf2013-06-24 18:31:25 +0100289#define RESUME_RETRY (0 << 0)
290#define RESUME_TERMINATE (1 << 0)
291
Will Deacon45ae7cf2013-06-24 18:31:25 +0100292#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100293#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100294
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100295#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
297#define FSR_MULTI (1 << 31)
298#define FSR_SS (1 << 30)
299#define FSR_UUT (1 << 8)
300#define FSR_ASF (1 << 7)
301#define FSR_TLBLKF (1 << 6)
302#define FSR_TLBMCF (1 << 5)
303#define FSR_EF (1 << 4)
304#define FSR_PF (1 << 3)
305#define FSR_AFF (1 << 2)
306#define FSR_TF (1 << 1)
307
Mitchel Humpherys29073202014-07-08 09:52:18 -0700308#define FSR_IGN (FSR_AFF | FSR_ASF | \
309 FSR_TLBMCF | FSR_TLBLKF)
310#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100311 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100312
313#define FSYNR0_WNR (1 << 4)
314
Will Deacon4cf740b2014-07-14 19:47:39 +0100315static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000316module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100317MODULE_PARM_DESC(force_stage,
318 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800319static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000320module_param(disable_bypass, bool, S_IRUGO);
321MODULE_PARM_DESC(disable_bypass,
322 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100323
Robin Murphy09360402014-08-28 17:51:59 +0100324enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100325 ARM_SMMU_V1,
326 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100327 ARM_SMMU_V2,
328};
329
Robin Murphy67b65a32016-04-13 18:12:57 +0100330enum arm_smmu_implementation {
331 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100332 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100333 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700334 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700335 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100336};
337
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700338struct arm_smmu_impl_def_reg {
339 u32 offset;
340 u32 value;
341};
342
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700343/*
344 * attach_count
345 * The SMR and S2CR registers are only programmed when the number of
346 * devices attached to the iommu using these registers is > 0. This
347 * is required for the "SID switch" use case for secure display.
348 * Protected by stream_map_mutex.
349 */
Robin Murphya754fd12016-09-12 17:13:50 +0100350struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100351 struct iommu_group *group;
352 int count;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700353 int attach_count;
Robin Murphya754fd12016-09-12 17:13:50 +0100354 enum arm_smmu_s2cr_type type;
355 enum arm_smmu_s2cr_privcfg privcfg;
356 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700357 bool cb_handoff;
Shiraz Hashima28a4792018-01-13 00:39:52 +0530358 bool write_protected;
Robin Murphya754fd12016-09-12 17:13:50 +0100359};
360
361#define s2cr_init_val (struct arm_smmu_s2cr){ \
362 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700363 .cb_handoff = false, \
Shiraz Hashima28a4792018-01-13 00:39:52 +0530364 .write_protected = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100365}
366
Will Deacon45ae7cf2013-06-24 18:31:25 +0100367struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100368 u16 mask;
369 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100370 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371};
372
Will Deacona9a1b0b2014-05-01 18:05:08 +0100373struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100374 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100375 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100376};
Robin Murphy468f4942016-09-12 17:13:49 +0100377#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100378#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
379#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000380#define fwspec_smendx(fw, i) \
381 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100382#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000383 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100384
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700385/*
386 * Describes resources required for on/off power operation.
387 * Separate reference count is provided for atomic/nonatomic
388 * operations.
389 */
390struct arm_smmu_power_resources {
391 struct platform_device *pdev;
392 struct device *dev;
393
394 struct clk **clocks;
395 int num_clocks;
396
397 struct regulator_bulk_data *gdscs;
398 int num_gdscs;
399
400 uint32_t bus_client;
401 struct msm_bus_scale_pdata *bus_dt_data;
402
403 /* Protects power_count */
404 struct mutex power_lock;
405 int power_count;
406
407 /* Protects clock_refs_count */
408 spinlock_t clock_refs_lock;
409 int clock_refs_count;
Prakash Guptafad87ca2017-05-16 12:13:02 +0530410 int regulator_defer;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700411};
412
Patrick Daly03330cc2017-08-11 14:56:38 -0700413struct arm_smmu_arch_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100414struct arm_smmu_device {
415 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100416
417 void __iomem *base;
418 unsigned long size;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530419 phys_addr_t phys_addr;
Will Deaconc757e852014-07-30 11:33:25 +0100420 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100421
422#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
423#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
424#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
425#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
426#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000427#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800428#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100429#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
430#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
431#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
432#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
433#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100434 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000435
436#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800437#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Patrick Daly59b6d202017-06-12 13:12:15 -0700438#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700439#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Patrick Daly4423d3e2017-05-04 18:17:51 -0700440#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
Patrick Dalye7069342017-07-11 12:35:55 -0700441#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
Patrick Daly62ba1922017-08-30 16:47:18 -0700442#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 6)
Patrick Daly83174c12017-10-26 12:31:15 -0700443#define ARM_SMMU_OPT_MMU500_ERRATA1 (1 << 7)
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530444#define ARM_SMMU_OPT_STATIC_CB (1 << 8)
Charan Teja Reddyf8464882017-12-05 20:29:05 +0530445#define ARM_SMMU_OPT_HALT (1 << 9)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000446 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100447 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100448 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100449
450 u32 num_context_banks;
451 u32 num_s2_context_banks;
452 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
453 atomic_t irptndx;
454
455 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100456 u16 streamid_mask;
457 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100458 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100459 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100460 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100461
Will Deacon518f7132014-11-14 17:17:54 +0000462 unsigned long va_size;
463 unsigned long ipa_size;
464 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100465 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100466
467 u32 num_global_irqs;
468 u32 num_context_irqs;
469 unsigned int *irqs;
470
Patrick Daly8e3371a2017-02-13 22:14:53 -0800471 struct list_head list;
472
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800473 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700474 /* Specific to QCOM */
475 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
476 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800477
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700478 struct arm_smmu_power_resources *pwr;
Charan Teja Reddyf8464882017-12-05 20:29:05 +0530479 struct notifier_block regulator_nb;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700480
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800481 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700482
483 /* protects idr */
484 struct mutex idr_mutex;
485 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700486
487 struct arm_smmu_arch_ops *arch_ops;
488 void *archdata;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530489
490 enum tz_smmu_device_id sec_id;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100491};
492
Robin Murphy7602b872016-04-28 17:12:09 +0100493enum arm_smmu_context_fmt {
494 ARM_SMMU_CTX_FMT_NONE,
495 ARM_SMMU_CTX_FMT_AARCH64,
496 ARM_SMMU_CTX_FMT_AARCH32_L,
497 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100498};
499
500struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100501 u8 cbndx;
502 u8 irptndx;
503 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600504 u32 procid;
505 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100506 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100507};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100508#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600509#define INVALID_CBNDX 0xff
510#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700511/*
512 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
513 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
514 */
515#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100516
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600517#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800518#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100519
Will Deaconc752ce42014-06-25 22:46:31 +0100520enum arm_smmu_domain_stage {
521 ARM_SMMU_DOMAIN_S1 = 0,
522 ARM_SMMU_DOMAIN_S2,
523 ARM_SMMU_DOMAIN_NESTED,
524};
525
Patrick Dalyc11d1082016-09-01 15:52:44 -0700526struct arm_smmu_pte_info {
527 void *virt_addr;
528 size_t size;
529 struct list_head entry;
530};
531
Will Deacon45ae7cf2013-06-24 18:31:25 +0100532struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100533 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800534 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000535 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700536 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000537 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100538 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100539 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000540 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700541 u32 attributes;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530542 bool slave_side_secure;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700543 u32 secure_vmid;
544 struct list_head pte_info_list;
545 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700546 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700547 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100548 struct iommu_domain domain;
Patrick Dalyda765c62017-09-11 16:31:07 -0700549
550 bool qsmmuv500_errata1_init;
551 bool qsmmuv500_errata1_client;
Patrick Daly23301482017-10-12 16:18:25 -0700552 bool qsmmuv500_errata2_min_align;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100553};
554
Patrick Daly8e3371a2017-02-13 22:14:53 -0800555static DEFINE_SPINLOCK(arm_smmu_devices_lock);
556static LIST_HEAD(arm_smmu_devices);
557
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000558struct arm_smmu_option_prop {
559 u32 opt;
560 const char *prop;
561};
562
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800563static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
564
Robin Murphy7e96c742016-09-14 15:26:46 +0100565static bool using_legacy_binding, using_generic_binding;
566
Mitchel Humpherys29073202014-07-08 09:52:18 -0700567static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000568 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800569 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700570 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700571 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700572 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700573 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Patrick Daly62ba1922017-08-30 16:47:18 -0700574 { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
Patrick Daly83174c12017-10-26 12:31:15 -0700575 { ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530576 { ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
Charan Teja Reddyf8464882017-12-05 20:29:05 +0530577 { ARM_SMMU_OPT_HALT, "qcom,enable-smmu-halt"},
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000578 { 0, NULL},
579};
580
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800581static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
582 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700583static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
584 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600585static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800586
Patrick Dalyc11d1082016-09-01 15:52:44 -0700587static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
588static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700589static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700590static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
591
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700592static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
593 dma_addr_t iova);
594
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800595static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
596
Patrick Dalyda688822017-05-17 20:12:48 -0700597static int arm_smmu_alloc_cb(struct iommu_domain *domain,
598 struct arm_smmu_device *smmu,
599 struct device *dev);
Patrick Dalyda765c62017-09-11 16:31:07 -0700600static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops;
Patrick Dalyda688822017-05-17 20:12:48 -0700601
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530602static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530603static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
604static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530605
Joerg Roedel1d672632015-03-26 13:43:10 +0100606static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
607{
608 return container_of(dom, struct arm_smmu_domain, domain);
609}
610
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000611static void parse_driver_options(struct arm_smmu_device *smmu)
612{
613 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700614
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000615 do {
616 if (of_property_read_bool(smmu->dev->of_node,
617 arm_smmu_options[i].prop)) {
618 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700619 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000620 arm_smmu_options[i].prop);
621 }
622 } while (arm_smmu_options[++i].opt);
623}
624
Patrick Dalyc190d932016-08-30 17:23:28 -0700625static bool is_dynamic_domain(struct iommu_domain *domain)
626{
627 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
628
629 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
630}
631
Charan Teja Reddyec6f7822018-01-10 17:32:52 +0530632static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu, u32 cb)
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530633{
634 int ret;
635 int scm_ret = 0;
636
637 if (!arm_smmu_is_static_cb(smmu))
638 return 0;
639
Charan Teja Reddyec6f7822018-01-10 17:32:52 +0530640 ret = scm_restore_sec_cfg(smmu->sec_id, cb, &scm_ret);
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530641 if (ret || scm_ret) {
642 pr_err("scm call IOMMU_SECURE_CFG failed\n");
643 return -EINVAL;
644 }
645
646 return 0;
647}
Liam Mark53cf2342016-12-20 11:36:07 -0800648static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
649{
650 if (smmu_domain->attributes &
651 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
652 return true;
653 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
654 return smmu_domain->smmu->dev->archdata.dma_coherent;
655 else
656 return false;
657}
658
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530659static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu)
660{
661 return smmu->options & ARM_SMMU_OPT_STATIC_CB;
662}
663
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530664static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain)
Patrick Dalye271f212016-10-04 13:24:49 -0700665{
666 return (smmu_domain->secure_vmid != VMID_INVAL);
667}
668
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530669static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain)
670{
671 return arm_smmu_has_secure_vmid(smmu_domain) &&
672 smmu_domain->slave_side_secure;
673}
674
675static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain)
676{
677 return arm_smmu_has_secure_vmid(smmu_domain)
678 && !smmu_domain->slave_side_secure;
679}
680
Patrick Dalye271f212016-10-04 13:24:49 -0700681static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
682{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530683 if (arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -0700684 mutex_lock(&smmu_domain->assign_lock);
685}
686
687static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
688{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530689 if (arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -0700690 mutex_unlock(&smmu_domain->assign_lock);
691}
692
Patrick Daly03330cc2017-08-11 14:56:38 -0700693/*
694 * init()
695 * Hook for additional device tree parsing at probe time.
696 *
697 * device_reset()
698 * Hook for one-time architecture-specific register settings.
699 *
700 * iova_to_phys_hard()
701 * Provides debug information. May be called from the context fault irq handler.
702 *
703 * init_context_bank()
704 * Hook for architecture-specific settings which require knowledge of the
705 * dynamically allocated context bank number.
706 *
707 * device_group()
708 * Hook for checking whether a device is compatible with a said group.
709 */
710struct arm_smmu_arch_ops {
711 int (*init)(struct arm_smmu_device *smmu);
712 void (*device_reset)(struct arm_smmu_device *smmu);
713 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
714 dma_addr_t iova);
715 void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
716 struct device *dev);
717 int (*device_group)(struct device *dev, struct iommu_group *group);
718};
719
720static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
721{
722 if (!smmu->arch_ops)
723 return 0;
724 if (!smmu->arch_ops->init)
725 return 0;
726 return smmu->arch_ops->init(smmu);
727}
728
729static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
730{
731 if (!smmu->arch_ops)
732 return;
733 if (!smmu->arch_ops->device_reset)
734 return;
735 return smmu->arch_ops->device_reset(smmu);
736}
737
738static void arm_smmu_arch_init_context_bank(
739 struct arm_smmu_domain *smmu_domain, struct device *dev)
740{
741 struct arm_smmu_device *smmu = smmu_domain->smmu;
742
743 if (!smmu->arch_ops)
744 return;
745 if (!smmu->arch_ops->init_context_bank)
746 return;
747 return smmu->arch_ops->init_context_bank(smmu_domain, dev);
748}
749
750static int arm_smmu_arch_device_group(struct device *dev,
751 struct iommu_group *group)
752{
753 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
754 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
755
756 if (!smmu->arch_ops)
757 return 0;
758 if (!smmu->arch_ops->device_group)
759 return 0;
760 return smmu->arch_ops->device_group(dev, group);
761}
762
Will Deacon8f68f8e2014-07-15 11:27:08 +0100763static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100764{
765 if (dev_is_pci(dev)) {
766 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700767
Will Deacona9a1b0b2014-05-01 18:05:08 +0100768 while (!pci_is_root_bus(bus))
769 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100770 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100771 }
772
Robin Murphyd5b41782016-09-14 15:21:39 +0100773 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100774}
775
Robin Murphyd5b41782016-09-14 15:21:39 +0100776static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100777{
Robin Murphyd5b41782016-09-14 15:21:39 +0100778 *((__be32 *)data) = cpu_to_be32(alias);
779 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100780}
781
Robin Murphyd5b41782016-09-14 15:21:39 +0100782static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100783{
Robin Murphyd5b41782016-09-14 15:21:39 +0100784 struct of_phandle_iterator *it = *(void **)data;
785 struct device_node *np = it->node;
786 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100787
Robin Murphyd5b41782016-09-14 15:21:39 +0100788 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
789 "#stream-id-cells", 0)
790 if (it->node == np) {
791 *(void **)data = dev;
792 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700793 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100794 it->node = np;
795 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100796}
797
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100798static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100799static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100800
Robin Murphy06e393e2016-09-12 17:13:55 +0100801static int arm_smmu_register_legacy_master(struct device *dev,
802 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100803{
Robin Murphy06e393e2016-09-12 17:13:55 +0100804 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100805 struct device_node *np;
806 struct of_phandle_iterator it;
807 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100808 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100809 __be32 pci_sid;
810 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100811
Stephen Boydfecdeef2017-03-01 16:53:19 -0800812 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100813 np = dev_get_dev_node(dev);
814 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
815 of_node_put(np);
816 return -ENODEV;
817 }
818
819 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100820 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
821 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100822 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100823 of_node_put(np);
824 if (err == 0)
825 return -ENODEV;
826 if (err < 0)
827 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100828
Robin Murphyd5b41782016-09-14 15:21:39 +0100829 if (dev_is_pci(dev)) {
830 /* "mmu-masters" assumes Stream ID == Requester ID */
831 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
832 &pci_sid);
833 it.cur = &pci_sid;
834 it.cur_count = 1;
835 }
836
Robin Murphy06e393e2016-09-12 17:13:55 +0100837 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
838 &arm_smmu_ops);
839 if (err)
840 return err;
841
842 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
843 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100844 return -ENOMEM;
845
Robin Murphy06e393e2016-09-12 17:13:55 +0100846 *smmu = dev_get_drvdata(smmu_dev);
847 of_phandle_iterator_args(&it, sids, it.cur_count);
848 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
849 kfree(sids);
850 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100851}
852
853static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
854{
855 int idx;
856
857 do {
858 idx = find_next_zero_bit(map, end, start);
859 if (idx == end)
860 return -ENOSPC;
861 } while (test_and_set_bit(idx, map));
862
863 return idx;
864}
865
866static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
867{
868 clear_bit(idx, map);
869}
870
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700871static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700872{
873 int i, ret = 0;
874
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700875 for (i = 0; i < pwr->num_clocks; ++i) {
876 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700877 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700878 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700879 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700880 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700881 break;
882 }
883 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700884 return ret;
885}
886
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700887static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700888{
889 int i;
890
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700891 for (i = pwr->num_clocks; i; --i)
892 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700893}
894
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700895static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700896{
897 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700898
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700899 for (i = 0; i < pwr->num_clocks; ++i) {
900 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700901 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700902 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700903 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700904 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700905 break;
906 }
907 }
908
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700909 return ret;
910}
Patrick Daly8befb662016-08-17 20:03:28 -0700911
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700912static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
913{
914 int i;
915
916 for (i = pwr->num_clocks; i; --i)
917 clk_disable(pwr->clocks[i - 1]);
918}
919
920static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
921{
922 if (!pwr->bus_client)
923 return 0;
924 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
925}
926
927static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
928{
929 if (!pwr->bus_client)
930 return;
931 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
932}
933
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700934static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
935{
936 struct regulator_bulk_data *consumers;
937 int num_consumers, ret;
938 int i;
939
940 num_consumers = pwr->num_gdscs;
941 consumers = pwr->gdscs;
942 for (i = 0; i < num_consumers; i++) {
943 ret = regulator_enable(consumers[i].consumer);
944 if (ret)
945 goto out;
946 }
947 return 0;
948
949out:
950 i -= 1;
951 for (; i >= 0; i--)
952 regulator_disable(consumers[i].consumer);
953 return ret;
954}
955
Prakash Guptafad87ca2017-05-16 12:13:02 +0530956static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
957{
958 struct regulator_bulk_data *consumers;
959 int i;
960 int num_consumers, ret, r;
961
962 num_consumers = pwr->num_gdscs;
963 consumers = pwr->gdscs;
964 for (i = num_consumers - 1; i >= 0; --i) {
965 ret = regulator_disable_deferred(consumers[i].consumer,
966 pwr->regulator_defer);
967 if (ret != 0)
968 goto err;
969 }
970
971 return 0;
972
973err:
974 pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
975 for (++i; i < num_consumers; ++i) {
976 r = regulator_enable(consumers[i].consumer);
977 if (r != 0)
978 pr_err("Failed to reename %s: %d\n",
979 consumers[i].supply, r);
980 }
981
982 return ret;
983}
984
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700985/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
986static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
987{
988 int ret = 0;
989 unsigned long flags;
990
991 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
992 if (pwr->clock_refs_count > 0) {
993 pwr->clock_refs_count++;
994 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
995 return 0;
996 }
997
998 ret = arm_smmu_enable_clocks(pwr);
999 if (!ret)
1000 pwr->clock_refs_count = 1;
1001
1002 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001003 return ret;
1004}
1005
1006/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001007static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001008{
Patrick Daly8befb662016-08-17 20:03:28 -07001009 unsigned long flags;
1010
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001011 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
1012 if (pwr->clock_refs_count == 0) {
1013 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
1014 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
1015 return;
1016
1017 } else if (pwr->clock_refs_count > 1) {
1018 pwr->clock_refs_count--;
1019 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001020 return;
1021 }
1022
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001023 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001024
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001025 pwr->clock_refs_count = 0;
1026 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -07001027}
1028
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001029static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001030{
1031 int ret;
1032
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001033 mutex_lock(&pwr->power_lock);
1034 if (pwr->power_count > 0) {
1035 pwr->power_count += 1;
1036 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001037 return 0;
1038 }
1039
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001040 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001041 if (ret)
1042 goto out_unlock;
1043
Patrick Dalyb26f97c2017-08-11 15:24:20 -07001044 ret = arm_smmu_enable_regulators(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001045 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001046 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001047
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001048 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07001049 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001050 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -07001051
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001052 pwr->power_count = 1;
1053 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001054 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001055
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001056out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001057 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001058out_disable_bus:
1059 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001060out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001061 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001062 return ret;
1063}
1064
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001065static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001066{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001067 mutex_lock(&pwr->power_lock);
1068 if (pwr->power_count == 0) {
1069 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
1070 mutex_unlock(&pwr->power_lock);
1071 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001072
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001073 } else if (pwr->power_count > 1) {
1074 pwr->power_count--;
1075 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001076 return;
1077 }
1078
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001079 arm_smmu_unprepare_clocks(pwr);
Prakash Guptafad87ca2017-05-16 12:13:02 +05301080 arm_smmu_disable_regulators(pwr);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001081 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -07001082 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001083 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001084}
1085
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001086static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001087{
1088 int ret;
1089
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001090 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001091 if (ret)
1092 return ret;
1093
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001094 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001095 if (ret)
1096 goto out_disable;
1097
1098 return 0;
1099
1100out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001101 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001102 return ret;
1103}
1104
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001105static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001106{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001107 arm_smmu_power_off_atomic(pwr);
1108 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001109}
1110
1111/*
1112 * Must be used instead of arm_smmu_power_on if it may be called from
1113 * atomic context
1114 */
1115static int arm_smmu_domain_power_on(struct iommu_domain *domain,
1116 struct arm_smmu_device *smmu)
1117{
1118 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1119 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1120
1121 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001122 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001123
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001124 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001125}
1126
1127/*
1128 * Must be used instead of arm_smmu_power_on if it may be called from
1129 * atomic context
1130 */
1131static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1132 struct arm_smmu_device *smmu)
1133{
1134 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1135 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1136
1137 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001138 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001139 return;
1140 }
1141
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001142 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001143}
1144
Will Deacon45ae7cf2013-06-24 18:31:25 +01001145/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001146static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1147 int cbndx)
1148{
1149 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1150 u32 val;
1151
1152 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1153 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1154 !(val & TLBSTATUS_SACTIVE),
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301155 0, TLB_LOOP_TIMEOUT)) {
1156 trace_tlbsync_timeout(smmu->dev, 0);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001157 dev_err(smmu->dev, "TLBSYNC timeout!\n");
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301158 }
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001159}
1160
Will Deacon518f7132014-11-14 17:17:54 +00001161static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001162{
1163 int count = 0;
1164 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1165
1166 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1167 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1168 & sTLBGSTATUS_GSACTIVE) {
1169 cpu_relax();
1170 if (++count == TLB_LOOP_TIMEOUT) {
1171 dev_err_ratelimited(smmu->dev,
1172 "TLB sync timed out -- SMMU may be deadlocked\n");
1173 return;
1174 }
1175 udelay(1);
1176 }
1177}
1178
Will Deacon518f7132014-11-14 17:17:54 +00001179static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001180{
Will Deacon518f7132014-11-14 17:17:54 +00001181 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001182 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001183}
1184
Patrick Daly8befb662016-08-17 20:03:28 -07001185/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001186static void arm_smmu_tlb_inv_context(void *cookie)
1187{
1188 struct arm_smmu_domain *smmu_domain = cookie;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301189 struct device *dev = smmu_domain->dev;
Will Deacon44680ee2014-06-25 11:29:12 +01001190 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1191 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001192 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001193 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -07001194 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301195 ktime_t cur = ktime_get();
1196
1197 trace_tlbi_start(dev, 0);
Will Deacon1463fe42013-07-31 19:21:27 +01001198
Patrick Dalye7069342017-07-11 12:35:55 -07001199 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001200 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001201 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001202 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001203 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001204 } else if (stage1 && use_tlbiall) {
1205 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1206 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1207 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001208 } else {
1209 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001210 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001211 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001212 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001213 }
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301214
1215 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Will Deacon1463fe42013-07-31 19:21:27 +01001216}
1217
Will Deacon518f7132014-11-14 17:17:54 +00001218static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001219 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001220{
1221 struct arm_smmu_domain *smmu_domain = cookie;
1222 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1223 struct arm_smmu_device *smmu = smmu_domain->smmu;
1224 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1225 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001226 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001227
Patrick Dalye7069342017-07-11 12:35:55 -07001228 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001229 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1230 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1231
Robin Murphy7602b872016-04-28 17:12:09 +01001232 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001233 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001234 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001235 do {
1236 writel_relaxed(iova, reg);
1237 iova += granule;
1238 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001239 } else {
1240 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001241 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001242 do {
1243 writeq_relaxed(iova, reg);
1244 iova += granule >> 12;
1245 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001246 }
Patrick Dalye7069342017-07-11 12:35:55 -07001247 } else if (stage1 && use_tlbiall) {
1248 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1249 reg += ARM_SMMU_CB_S1_TLBIALL;
1250 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001251 } else if (smmu->version == ARM_SMMU_V2) {
1252 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1253 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1254 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001255 iova >>= 12;
1256 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001257 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001258 iova += granule >> 12;
1259 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001260 } else {
1261 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001262 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001263 }
1264}
1265
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001266struct arm_smmu_secure_pool_chunk {
1267 void *addr;
1268 size_t size;
1269 struct list_head list;
1270};
1271
1272static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1273 size_t size)
1274{
1275 struct arm_smmu_secure_pool_chunk *it;
1276
1277 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1278 if (it->size == size) {
1279 void *addr = it->addr;
1280
1281 list_del(&it->list);
1282 kfree(it);
1283 return addr;
1284 }
1285 }
1286
1287 return NULL;
1288}
1289
1290static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1291 void *addr, size_t size)
1292{
1293 struct arm_smmu_secure_pool_chunk *chunk;
1294
1295 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1296 if (!chunk)
1297 return -ENOMEM;
1298
1299 chunk->addr = addr;
1300 chunk->size = size;
1301 memset(addr, 0, size);
1302 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1303
1304 return 0;
1305}
1306
1307static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1308{
1309 struct arm_smmu_secure_pool_chunk *it, *i;
1310
1311 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1312 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1313 /* pages will be freed later (after being unassigned) */
Prakash Gupta8e827be2017-10-04 12:37:11 +05301314 list_del(&it->list);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001315 kfree(it);
1316 }
1317}
1318
Patrick Dalyc11d1082016-09-01 15:52:44 -07001319static void *arm_smmu_alloc_pages_exact(void *cookie,
1320 size_t size, gfp_t gfp_mask)
1321{
1322 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001323 void *page;
1324 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001325
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301326 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001327 return alloc_pages_exact(size, gfp_mask);
1328
1329 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1330 if (page)
1331 return page;
1332
1333 page = alloc_pages_exact(size, gfp_mask);
1334 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001335 ret = arm_smmu_prepare_pgtable(page, cookie);
1336 if (ret) {
1337 free_pages_exact(page, size);
1338 return NULL;
1339 }
1340 }
1341
1342 return page;
1343}
1344
1345static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1346{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001347 struct arm_smmu_domain *smmu_domain = cookie;
1348
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301349 if (!arm_smmu_is_master_side_secure(smmu_domain)) {
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001350 free_pages_exact(virt, size);
1351 return;
1352 }
1353
1354 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1355 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001356}
1357
Will Deacon518f7132014-11-14 17:17:54 +00001358static struct iommu_gather_ops arm_smmu_gather_ops = {
1359 .tlb_flush_all = arm_smmu_tlb_inv_context,
1360 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1361 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001362 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1363 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001364};
1365
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001366static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1367 dma_addr_t iova, u32 fsr)
1368{
1369 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001370 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyda765c62017-09-11 16:31:07 -07001371 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001372 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001373 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001374
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001375 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyda765c62017-09-11 16:31:07 -07001376 tlb->tlb_flush_all(smmu_domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001377 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001378
Patrick Dalyad441dd2016-09-15 15:50:46 -07001379 if (phys != phys_post_tlbiall) {
1380 dev_err(smmu->dev,
1381 "ATOS results differed across TLBIALL...\n"
1382 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1383 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001384
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001385 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001386}
1387
Will Deacon45ae7cf2013-06-24 18:31:25 +01001388static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1389{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001390 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001391 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001392 unsigned long iova;
1393 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001394 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001395 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1396 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001397 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001398 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001399 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001400 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001401 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001402 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001403 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001405 static DEFINE_RATELIMIT_STATE(_rs,
1406 DEFAULT_RATELIMIT_INTERVAL,
1407 DEFAULT_RATELIMIT_BURST);
1408
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001409 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001410 if (ret)
1411 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001412
Shalaj Jain04059c52015-03-03 13:34:59 -08001413 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001414 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001415 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1416
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001417 if (!(fsr & FSR_FAULT)) {
1418 ret = IRQ_NONE;
1419 goto out_power_off;
1420 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001421
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001422 if (fatal_asf && (fsr & FSR_ASF)) {
1423 dev_err(smmu->dev,
1424 "Took an address size fault. Refusing to recover.\n");
1425 BUG();
1426 }
1427
Will Deacon45ae7cf2013-06-24 18:31:25 +01001428 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001429 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001430 if (fsr & FSR_TF)
1431 flags |= IOMMU_FAULT_TRANSLATION;
1432 if (fsr & FSR_PF)
1433 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001434 if (fsr & FSR_EF)
1435 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001436 if (fsr & FSR_SS)
1437 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001438
Robin Murphyf9a05f02016-04-13 18:13:01 +01001439 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001440 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001441 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1442 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001443 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1444 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001445 dev_dbg(smmu->dev,
1446 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1447 iova, fsr, fsynr, cfg->cbndx);
1448 dev_dbg(smmu->dev,
1449 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001450 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001451 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001452 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001453 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1454 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001455 if (__ratelimit(&_rs)) {
1456 dev_err(smmu->dev,
1457 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1458 iova, fsr, fsynr, cfg->cbndx);
1459 dev_err(smmu->dev, "FAR = %016lx\n",
1460 (unsigned long)iova);
1461 dev_err(smmu->dev,
1462 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1463 fsr,
1464 (fsr & 0x02) ? "TF " : "",
1465 (fsr & 0x04) ? "AFF " : "",
1466 (fsr & 0x08) ? "PF " : "",
1467 (fsr & 0x10) ? "EF " : "",
1468 (fsr & 0x20) ? "TLBMCF " : "",
1469 (fsr & 0x40) ? "TLBLKF " : "",
1470 (fsr & 0x80) ? "MHF " : "",
1471 (fsr & 0x40000000) ? "SS " : "",
1472 (fsr & 0x80000000) ? "MULTI " : "");
1473 dev_err(smmu->dev,
1474 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001475 if (!phys_soft)
1476 dev_err(smmu->dev,
1477 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1478 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001479 if (phys_atos)
1480 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1481 &phys_atos);
1482 else
1483 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001484 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1485 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001486 ret = IRQ_NONE;
1487 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001488 if (!non_fatal_fault) {
1489 dev_err(smmu->dev,
1490 "Unhandled arm-smmu context fault!\n");
1491 BUG();
1492 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001493 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001494
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001495 /*
1496 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1497 * if stalled. This is required to keep the IOMMU client stalled on
1498 * the outstanding fault. This gives the client a chance to take any
1499 * debug action and then terminate the stalled transaction.
1500 * So, the sequence in case of stall on fault should be:
1501 * 1) Do not clear FSR or write to RESUME here
1502 * 2) Client takes any debug action
1503 * 3) Client terminates the stalled transaction and resumes the IOMMU
1504 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1505 * not before so that the fault remains outstanding. This ensures
1506 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1507 * need to be terminated.
1508 */
1509 if (tmp != -EBUSY) {
1510 /* Clear the faulting FSR */
1511 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001512
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001513 /*
1514 * Barrier required to ensure that the FSR is cleared
1515 * before resuming SMMU operation
1516 */
1517 wmb();
1518
1519 /* Retry or terminate any stalled transactions */
1520 if (fsr & FSR_SS)
1521 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1522 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001523
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001524out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001525 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001526
Patrick Daly5ba28112016-08-30 19:18:52 -07001527 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001528}
1529
1530static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1531{
1532 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1533 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001534 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001535
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001536 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001537 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001538
Will Deacon45ae7cf2013-06-24 18:31:25 +01001539 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1540 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1541 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1542 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1543
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001544 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001545 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001546 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001547 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001548
Will Deacon45ae7cf2013-06-24 18:31:25 +01001549 dev_err_ratelimited(smmu->dev,
1550 "Unexpected global fault, this could be serious\n");
1551 dev_err_ratelimited(smmu->dev,
1552 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1553 gfsr, gfsynr0, gfsynr1, gfsynr2);
1554
1555 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001556 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001557 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001558}
1559
Shiraz Hashimeca8c2e2018-01-15 20:08:38 +05301560static bool arm_smmu_master_attached(struct arm_smmu_device *smmu,
1561 struct iommu_fwspec *fwspec)
1562{
1563 int i, idx;
1564
1565 for_each_cfg_sme(fwspec, i, idx) {
1566 if (smmu->s2crs[idx].attach_count)
1567 return true;
1568 }
1569
1570 return false;
1571}
1572
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301573static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain,
1574 struct io_pgtable_cfg *pgtbl_cfg)
1575{
1576 struct arm_smmu_device *smmu = smmu_domain->smmu;
1577 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1578 int ret = 0;
1579
1580 if ((smmu->version > ARM_SMMU_V1) &&
1581 (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) &&
1582 !arm_smmu_has_secure_vmid(smmu_domain) &&
1583 arm_smmu_is_static_cb(smmu)) {
1584 ret = msm_tz_set_cb_format(smmu->sec_id, cfg->cbndx);
1585 }
1586 return ret;
1587}
1588
Will Deacon518f7132014-11-14 17:17:54 +00001589static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1590 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001591{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001592 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001593 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001594 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001595 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1596 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001597 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001598
Will Deacon45ae7cf2013-06-24 18:31:25 +01001599 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001600 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1601 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001602
Will Deacon4a1c93c2015-03-04 12:21:03 +00001603 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001604 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1605 reg = CBA2R_RW64_64BIT;
1606 else
1607 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001608 /* 16-bit VMIDs live in CBA2R */
1609 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001610 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001611
Will Deacon4a1c93c2015-03-04 12:21:03 +00001612 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1613 }
1614
Will Deacon45ae7cf2013-06-24 18:31:25 +01001615 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001616 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001617 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001618 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001619
Will Deacon57ca90f2014-02-06 14:59:05 +00001620 /*
1621 * Use the weakest shareability/memory types, so they are
1622 * overridden by the ttbcr/pte.
1623 */
1624 if (stage1) {
1625 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1626 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001627 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1628 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001629 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001630 }
Will Deacon44680ee2014-06-25 11:29:12 +01001631 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001632
Will Deacon518f7132014-11-14 17:17:54 +00001633 /* TTBRs */
1634 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001635 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001636
Robin Murphyb94df6f2016-08-11 17:44:06 +01001637 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1638 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1639 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1640 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1641 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1642 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1643 } else {
1644 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1645 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1646 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1647 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1648 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1649 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1650 }
Will Deacon518f7132014-11-14 17:17:54 +00001651 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001652 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001653 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001654 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001655
Will Deacon518f7132014-11-14 17:17:54 +00001656 /* TTBCR */
1657 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001658 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1659 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1660 reg2 = 0;
1661 } else {
1662 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1663 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1664 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001665 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001666 if (smmu->version > ARM_SMMU_V1)
1667 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001668 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001669 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001670 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001671 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001672
Will Deacon518f7132014-11-14 17:17:54 +00001673 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001674 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001675 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1676 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1677 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1678 } else {
1679 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1680 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1681 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001682 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001683 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001684 }
1685
Will Deacon45ae7cf2013-06-24 18:31:25 +01001686 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001687 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001688
Patrick Daly7f377fe2017-10-06 17:37:10 -07001689 /* Ensure bypass transactions are Non-shareable */
1690 reg |= SCTLR_SHCFG_NSH << SCTLR_SHCFG_SHIFT;
1691
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301692 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
1693 reg &= ~SCTLR_CFCFG;
1694 reg |= SCTLR_HUPCF;
1695 }
1696
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001697 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1698 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1699 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001700 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001701 if (stage1)
1702 reg |= SCTLR_S1_ASIDPNE;
1703#ifdef __BIG_ENDIAN
1704 reg |= SCTLR_E;
1705#endif
Will Deacon25724842013-08-21 13:49:53 +01001706 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001707}
1708
Patrick Dalyc190d932016-08-30 17:23:28 -07001709static int arm_smmu_init_asid(struct iommu_domain *domain,
1710 struct arm_smmu_device *smmu)
1711{
1712 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1713 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1714 bool dynamic = is_dynamic_domain(domain);
1715 int ret;
1716
1717 if (!dynamic) {
1718 cfg->asid = cfg->cbndx + 1;
1719 } else {
1720 mutex_lock(&smmu->idr_mutex);
1721 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1722 smmu->num_context_banks + 2,
1723 MAX_ASID + 1, GFP_KERNEL);
1724
1725 mutex_unlock(&smmu->idr_mutex);
1726 if (ret < 0) {
1727 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1728 ret);
1729 return ret;
1730 }
1731 cfg->asid = ret;
1732 }
1733 return 0;
1734}
1735
1736static void arm_smmu_free_asid(struct iommu_domain *domain)
1737{
1738 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1739 struct arm_smmu_device *smmu = smmu_domain->smmu;
1740 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1741 bool dynamic = is_dynamic_domain(domain);
1742
1743 if (cfg->asid == INVALID_ASID || !dynamic)
1744 return;
1745
1746 mutex_lock(&smmu->idr_mutex);
1747 idr_remove(&smmu->asid_idr, cfg->asid);
1748 mutex_unlock(&smmu->idr_mutex);
1749}
1750
Will Deacon45ae7cf2013-06-24 18:31:25 +01001751static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001752 struct arm_smmu_device *smmu,
1753 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001754{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001755 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001756 unsigned long ias, oas;
1757 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001758 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001759 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001760 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001761 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001762 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001763 bool dynamic;
Patrick Dalyda765c62017-09-11 16:31:07 -07001764 const struct iommu_gather_ops *tlb;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001765
Will Deacon518f7132014-11-14 17:17:54 +00001766 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001767 if (smmu_domain->smmu)
1768 goto out_unlock;
1769
Patrick Dalyc190d932016-08-30 17:23:28 -07001770 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1771 smmu_domain->cfg.asid = INVALID_ASID;
1772
Patrick Dalyc190d932016-08-30 17:23:28 -07001773 dynamic = is_dynamic_domain(domain);
1774 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1775 dev_err(smmu->dev, "dynamic domains not supported\n");
1776 ret = -EPERM;
1777 goto out_unlock;
1778 }
1779
Will Deaconc752ce42014-06-25 22:46:31 +01001780 /*
1781 * Mapping the requested stage onto what we support is surprisingly
1782 * complicated, mainly because the spec allows S1+S2 SMMUs without
1783 * support for nested translation. That means we end up with the
1784 * following table:
1785 *
1786 * Requested Supported Actual
1787 * S1 N S1
1788 * S1 S1+S2 S1
1789 * S1 S2 S2
1790 * S1 S1 S1
1791 * N N N
1792 * N S1+S2 S2
1793 * N S2 S2
1794 * N S1 S1
1795 *
1796 * Note that you can't actually request stage-2 mappings.
1797 */
1798 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1799 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1800 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1801 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1802
Robin Murphy7602b872016-04-28 17:12:09 +01001803 /*
1804 * Choosing a suitable context format is even more fiddly. Until we
1805 * grow some way for the caller to express a preference, and/or move
1806 * the decision into the io-pgtable code where it arguably belongs,
1807 * just aim for the closest thing to the rest of the system, and hope
1808 * that the hardware isn't esoteric enough that we can't assume AArch64
1809 * support to be a superset of AArch32 support...
1810 */
1811 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1812 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001813 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1814 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1815 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1816 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1817 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001818 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1819 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1820 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1821 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1822 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1823
1824 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1825 ret = -EINVAL;
1826 goto out_unlock;
1827 }
1828
Will Deaconc752ce42014-06-25 22:46:31 +01001829 switch (smmu_domain->stage) {
1830 case ARM_SMMU_DOMAIN_S1:
1831 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1832 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001833 ias = smmu->va_size;
1834 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001835 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001836 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001837 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1838 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001839 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001840 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001841 ias = min(ias, 32UL);
1842 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001843 } else {
1844 fmt = ARM_V7S;
1845 ias = min(ias, 32UL);
1846 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001847 }
Will Deaconc752ce42014-06-25 22:46:31 +01001848 break;
1849 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001850 /*
1851 * We will likely want to change this if/when KVM gets
1852 * involved.
1853 */
Will Deaconc752ce42014-06-25 22:46:31 +01001854 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001855 cfg->cbar = CBAR_TYPE_S2_TRANS;
1856 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001857 ias = smmu->ipa_size;
1858 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001859 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001860 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001861 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001862 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001863 ias = min(ias, 40UL);
1864 oas = min(oas, 40UL);
1865 }
Will Deaconc752ce42014-06-25 22:46:31 +01001866 break;
1867 default:
1868 ret = -EINVAL;
1869 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001870 }
1871
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001872 if (is_fast)
1873 fmt = ARM_V8L_FAST;
1874
Patrick Dalyce6786f2016-11-09 14:19:23 -08001875 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1876 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001877 if (is_iommu_pt_coherent(smmu_domain))
1878 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Patrick Daly49ccf332017-09-27 15:10:29 -07001879 if ((quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
1880 (smmu->model == QCOM_SMMUV500))
1881 quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001882
Patrick Dalyda765c62017-09-11 16:31:07 -07001883 tlb = &arm_smmu_gather_ops;
Patrick Daly83174c12017-10-26 12:31:15 -07001884 if (smmu->options & ARM_SMMU_OPT_MMU500_ERRATA1)
Patrick Dalyda765c62017-09-11 16:31:07 -07001885 tlb = &qsmmuv500_errata1_smmu_gather_ops;
1886
Patrick Dalyda688822017-05-17 20:12:48 -07001887 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1888 if (ret < 0)
1889 goto out_unlock;
1890 cfg->cbndx = ret;
1891
Robin Murphyb7862e32016-04-13 18:13:03 +01001892 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001893 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1894 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001895 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001896 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001897 }
1898
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301899 if (arm_smmu_is_slave_side_secure(smmu_domain)) {
1900 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
1901 .quirks = quirks,
1902 .pgsize_bitmap = smmu->pgsize_bitmap,
1903 .arm_msm_secure_cfg = {
1904 .sec_id = smmu->sec_id,
1905 .cbndx = cfg->cbndx,
1906 },
1907 .iommu_dev = smmu->dev,
1908 };
1909 fmt = ARM_MSM_SECURE;
1910 } else {
1911 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
1912 .quirks = quirks,
1913 .pgsize_bitmap = smmu->pgsize_bitmap,
1914 .ias = ias,
1915 .oas = oas,
1916 .tlb = tlb,
1917 .iommu_dev = smmu->dev,
1918 };
1919 }
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001920
Will Deacon518f7132014-11-14 17:17:54 +00001921 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001922 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001923 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1924 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001925 if (!pgtbl_ops) {
1926 ret = -ENOMEM;
1927 goto out_clear_smmu;
1928 }
1929
Patrick Dalyc11d1082016-09-01 15:52:44 -07001930 /*
1931 * assign any page table memory that might have been allocated
1932 * during alloc_io_pgtable_ops
1933 */
Patrick Dalye271f212016-10-04 13:24:49 -07001934 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001935 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001936 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001937
Robin Murphyd5466352016-05-09 17:20:09 +01001938 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001939 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001940 domain->geometry.aperture_end = (1UL << ias) - 1;
1941 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001942
Patrick Dalyc190d932016-08-30 17:23:28 -07001943 /* Assign an asid */
1944 ret = arm_smmu_init_asid(domain, smmu);
1945 if (ret)
1946 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001947
Patrick Dalyc190d932016-08-30 17:23:28 -07001948 if (!dynamic) {
1949 /* Initialise the context bank with our page table cfg */
1950 arm_smmu_init_context_bank(smmu_domain,
1951 &smmu_domain->pgtbl_cfg);
Charan Teja Reddy35144b02017-09-05 16:20:46 +05301952 /* for slave side secure, we may have to force the pagetable
1953 * format to V8L.
1954 */
1955 ret = arm_smmu_set_pt_format(smmu_domain,
1956 &smmu_domain->pgtbl_cfg);
1957 if (ret)
1958 goto out_clear_smmu;
Patrick Dalyc190d932016-08-30 17:23:28 -07001959
Patrick Daly03330cc2017-08-11 14:56:38 -07001960 arm_smmu_arch_init_context_bank(smmu_domain, dev);
1961
Patrick Dalyc190d932016-08-30 17:23:28 -07001962 /*
1963 * Request context fault interrupt. Do this last to avoid the
1964 * handler seeing a half-initialised domain state.
1965 */
1966 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1967 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001968 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1969 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001970 if (ret < 0) {
1971 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1972 cfg->irptndx, irq);
1973 cfg->irptndx = INVALID_IRPTNDX;
1974 goto out_clear_smmu;
1975 }
1976 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001977 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001978 }
Will Deacon518f7132014-11-14 17:17:54 +00001979 mutex_unlock(&smmu_domain->init_mutex);
1980
1981 /* Publish page table ops for map/unmap */
1982 smmu_domain->pgtbl_ops = pgtbl_ops;
Shiraz Hashimeca8c2e2018-01-15 20:08:38 +05301983 if (arm_smmu_is_slave_side_secure(smmu_domain) &&
1984 !arm_smmu_master_attached(smmu, dev->iommu_fwspec))
1985 arm_smmu_restore_sec_cfg(smmu, cfg->cbndx);
1986
Will Deacona9a1b0b2014-05-01 18:05:08 +01001987 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001988
Will Deacon518f7132014-11-14 17:17:54 +00001989out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001990 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001991 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001992out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001993 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001994 return ret;
1995}
1996
Patrick Daly77db4f92016-10-14 15:34:10 -07001997static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1998{
1999 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
2000 smmu_domain->cfg.cbndx = INVALID_CBNDX;
2001 smmu_domain->secure_vmid = VMID_INVAL;
2002}
2003
Will Deacon45ae7cf2013-06-24 18:31:25 +01002004static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
2005{
Joerg Roedel1d672632015-03-26 13:43:10 +01002006 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01002007 struct arm_smmu_device *smmu = smmu_domain->smmu;
2008 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01002009 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002010 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07002011 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002012 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002013
Robin Murphy7e96c742016-09-14 15:26:46 +01002014 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002015 return;
2016
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002017 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002018 if (ret) {
2019 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
2020 smmu);
2021 return;
2022 }
2023
Patrick Dalyc190d932016-08-30 17:23:28 -07002024 dynamic = is_dynamic_domain(domain);
2025 if (dynamic) {
2026 arm_smmu_free_asid(domain);
2027 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002028 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07002029 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002030 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002031 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002032 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07002033 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07002034 return;
2035 }
2036
Will Deacon518f7132014-11-14 17:17:54 +00002037 /*
2038 * Disable the context bank and free the page tables before freeing
2039 * it.
2040 */
Will Deacon44680ee2014-06-25 11:29:12 +01002041 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01002042 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01002043
Will Deacon44680ee2014-06-25 11:29:12 +01002044 if (cfg->irptndx != INVALID_IRPTNDX) {
2045 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08002046 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002047 }
2048
Markus Elfring44830b02015-11-06 18:32:41 +01002049 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07002050 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002051 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002052 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002053 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01002054 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002055
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002056 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07002057 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002058}
2059
Joerg Roedel1d672632015-03-26 13:43:10 +01002060static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002061{
2062 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002063
Patrick Daly09801312016-08-29 17:02:52 -07002064 /* Do not support DOMAIN_DMA for now */
2065 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01002066 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002067 /*
2068 * Allocate the domain and initialise some of its data structures.
2069 * We can't really do anything meaningful until we've added a
2070 * master.
2071 */
2072 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
2073 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01002074 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002075
Robin Murphy7e96c742016-09-14 15:26:46 +01002076 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
2077 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00002078 kfree(smmu_domain);
2079 return NULL;
2080 }
2081
Will Deacon518f7132014-11-14 17:17:54 +00002082 mutex_init(&smmu_domain->init_mutex);
2083 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002084 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
2085 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07002086 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002087 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07002088 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01002089
2090 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002091}
2092
Joerg Roedel1d672632015-03-26 13:43:10 +01002093static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002094{
Joerg Roedel1d672632015-03-26 13:43:10 +01002095 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01002096
2097 /*
2098 * Free the domain resources. We assume that all devices have
2099 * already been detached.
2100 */
Robin Murphy9adb9592016-01-26 18:06:36 +00002101 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002102 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002103 kfree(smmu_domain);
2104}
2105
Robin Murphy468f4942016-09-12 17:13:49 +01002106static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
2107{
2108 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01002109 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01002110
2111 if (smr->valid)
2112 reg |= SMR_VALID;
2113 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2114}
2115
Robin Murphya754fd12016-09-12 17:13:50 +01002116static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
2117{
2118 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
2119 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
2120 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
Patrick Daly7f377fe2017-10-06 17:37:10 -07002121 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT |
2122 S2CR_SHCFG_NSH << S2CR_SHCFG_SHIFT;
Robin Murphya754fd12016-09-12 17:13:50 +01002123
2124 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2125}
2126
2127static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
2128{
2129 arm_smmu_write_s2cr(smmu, idx);
2130 if (smmu->smrs)
2131 arm_smmu_write_smr(smmu, idx);
2132}
2133
Robin Murphy6668f692016-09-12 17:13:54 +01002134static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01002135{
2136 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01002137 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002138
Robin Murphy6668f692016-09-12 17:13:54 +01002139 /* Stream indexing is blissfully easy */
2140 if (!smrs)
2141 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01002142
Robin Murphy6668f692016-09-12 17:13:54 +01002143 /* Validating SMRs is... less so */
2144 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2145 if (!smrs[i].valid) {
2146 /*
2147 * Note the first free entry we come across, which
2148 * we'll claim in the end if nothing else matches.
2149 */
2150 if (free_idx < 0)
2151 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01002152 continue;
2153 }
Robin Murphy6668f692016-09-12 17:13:54 +01002154 /*
2155 * If the new entry is _entirely_ matched by an existing entry,
2156 * then reuse that, with the guarantee that there also cannot
2157 * be any subsequent conflicting entries. In normal use we'd
2158 * expect simply identical entries for this case, but there's
2159 * no harm in accommodating the generalisation.
2160 */
2161 if ((mask & smrs[i].mask) == mask &&
2162 !((id ^ smrs[i].id) & ~smrs[i].mask))
2163 return i;
2164 /*
2165 * If the new entry has any other overlap with an existing one,
2166 * though, then there always exists at least one stream ID
2167 * which would cause a conflict, and we can't allow that risk.
2168 */
2169 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
2170 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002171 }
2172
Robin Murphy6668f692016-09-12 17:13:54 +01002173 return free_idx;
2174}
2175
2176static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
2177{
2178 if (--smmu->s2crs[idx].count)
2179 return false;
2180
2181 smmu->s2crs[idx] = s2cr_init_val;
2182 if (smmu->smrs)
2183 smmu->smrs[idx].valid = false;
2184
2185 return true;
2186}
2187
2188static int arm_smmu_master_alloc_smes(struct device *dev)
2189{
Robin Murphy06e393e2016-09-12 17:13:55 +01002190 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2191 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01002192 struct arm_smmu_device *smmu = cfg->smmu;
2193 struct arm_smmu_smr *smrs = smmu->smrs;
2194 struct iommu_group *group;
2195 int i, idx, ret;
2196
2197 mutex_lock(&smmu->stream_map_mutex);
2198 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01002199 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002200 u16 sid = fwspec->ids[i];
2201 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
2202
Robin Murphy6668f692016-09-12 17:13:54 +01002203 if (idx != INVALID_SMENDX) {
2204 ret = -EEXIST;
2205 goto out_err;
2206 }
2207
Robin Murphy7e96c742016-09-14 15:26:46 +01002208 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01002209 if (ret < 0)
2210 goto out_err;
2211
2212 idx = ret;
2213 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002214 smrs[idx].id = sid;
2215 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01002216 smrs[idx].valid = true;
2217 }
2218 smmu->s2crs[idx].count++;
2219 cfg->smendx[i] = (s16)idx;
2220 }
2221
2222 group = iommu_group_get_for_dev(dev);
2223 if (!group)
2224 group = ERR_PTR(-ENOMEM);
2225 if (IS_ERR(group)) {
2226 ret = PTR_ERR(group);
2227 goto out_err;
2228 }
2229 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01002230
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002231 /* It worked! Don't poke the actual hardware until we've attached */
2232 for_each_cfg_sme(fwspec, i, idx)
Robin Murphy6668f692016-09-12 17:13:54 +01002233 smmu->s2crs[idx].group = group;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002234
Robin Murphy6668f692016-09-12 17:13:54 +01002235 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002236 return 0;
2237
Robin Murphy6668f692016-09-12 17:13:54 +01002238out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01002239 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01002240 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01002241 cfg->smendx[i] = INVALID_SMENDX;
2242 }
Robin Murphy6668f692016-09-12 17:13:54 +01002243 mutex_unlock(&smmu->stream_map_mutex);
2244 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002245}
2246
Robin Murphy06e393e2016-09-12 17:13:55 +01002247static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002248{
Robin Murphy06e393e2016-09-12 17:13:55 +01002249 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
2250 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01002251 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01002252
Robin Murphy6668f692016-09-12 17:13:54 +01002253 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002254 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002255 if (arm_smmu_free_sme(smmu, idx))
2256 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01002257 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002258 }
Robin Murphy6668f692016-09-12 17:13:54 +01002259 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002260}
2261
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002262static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
2263 struct iommu_fwspec *fwspec)
2264{
2265 struct arm_smmu_device *smmu = smmu_domain->smmu;
2266 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2267 int i, idx;
2268 const struct iommu_gather_ops *tlb;
2269
2270 tlb = smmu_domain->pgtbl_cfg.tlb;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302271 if (!tlb)
2272 return;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002273
2274 mutex_lock(&smmu->stream_map_mutex);
2275 for_each_cfg_sme(fwspec, i, idx) {
2276 WARN_ON(s2cr[idx].attach_count == 0);
2277 s2cr[idx].attach_count -= 1;
2278
2279 if (s2cr[idx].attach_count > 0)
2280 continue;
2281
2282 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2283 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2284 }
2285 mutex_unlock(&smmu->stream_map_mutex);
2286
2287 /* Ensure there are no stale mappings for this context bank */
2288 tlb->tlb_flush_all(smmu_domain);
2289}
2290
Will Deacon45ae7cf2013-06-24 18:31:25 +01002291static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01002292 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002293{
Will Deacon44680ee2014-06-25 11:29:12 +01002294 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002295 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2296 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2297 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002298 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002299
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002300 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002301 for_each_cfg_sme(fwspec, i, idx) {
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002302 if (s2cr[idx].attach_count++ > 0)
Robin Murphy6668f692016-09-12 17:13:54 +01002303 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002304
2305 s2cr[idx].type = type;
2306 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2307 s2cr[idx].cbndx = cbndx;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002308 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002309 }
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002310 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002311
2312 return 0;
2313}
2314
Patrick Daly09801312016-08-29 17:02:52 -07002315static void arm_smmu_detach_dev(struct iommu_domain *domain,
2316 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002317{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002318 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002319 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002320 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly09801312016-08-29 17:02:52 -07002321 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002322 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002323
2324 if (dynamic)
2325 return;
2326
Patrick Daly09801312016-08-29 17:02:52 -07002327 if (!smmu) {
2328 dev_err(dev, "Domain not attached; cannot detach!\n");
2329 return;
2330 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002331
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302332 if (atomic_domain)
2333 arm_smmu_power_on_atomic(smmu->pwr);
2334 else
2335 arm_smmu_power_on(smmu->pwr);
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002336
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302337 arm_smmu_domain_remove_master(smmu_domain, fwspec);
2338 arm_smmu_power_off(smmu->pwr);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002339}
2340
Patrick Dalye271f212016-10-04 13:24:49 -07002341static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002342{
Patrick Dalye271f212016-10-04 13:24:49 -07002343 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002344 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2345 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2346 int source_vmid = VMID_HLOS;
2347 struct arm_smmu_pte_info *pte_info, *temp;
2348
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302349 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalye271f212016-10-04 13:24:49 -07002350 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002351
Patrick Dalye271f212016-10-04 13:24:49 -07002352 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002353 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2354 PAGE_SIZE, &source_vmid, 1,
2355 dest_vmids, dest_perms, 2);
2356 if (WARN_ON(ret))
2357 break;
2358 }
2359
2360 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2361 entry) {
2362 list_del(&pte_info->entry);
2363 kfree(pte_info);
2364 }
Patrick Dalye271f212016-10-04 13:24:49 -07002365 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002366}
2367
2368static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2369{
2370 int ret;
2371 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002372 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002373 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2374 struct arm_smmu_pte_info *pte_info, *temp;
2375
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302376 if (!arm_smmu_is_master_side_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002377 return;
2378
2379 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2380 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2381 PAGE_SIZE, source_vmlist, 2,
2382 &dest_vmids, &dest_perms, 1);
2383 if (WARN_ON(ret))
2384 break;
2385 free_pages_exact(pte_info->virt_addr, pte_info->size);
2386 }
2387
2388 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2389 entry) {
2390 list_del(&pte_info->entry);
2391 kfree(pte_info);
2392 }
2393}
2394
2395static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2396{
2397 struct arm_smmu_domain *smmu_domain = cookie;
2398 struct arm_smmu_pte_info *pte_info;
2399
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302400 if (smmu_domain->slave_side_secure ||
2401 !arm_smmu_has_secure_vmid(smmu_domain)) {
2402 if (smmu_domain->slave_side_secure)
2403 WARN(1, "slave side secure is enforced\n");
2404 else
2405 WARN(1, "Invalid VMID is set !!\n");
2406 return;
2407 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002408
2409 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2410 if (!pte_info)
2411 return;
2412
2413 pte_info->virt_addr = addr;
2414 pte_info->size = size;
2415 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2416}
2417
2418static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2419{
2420 struct arm_smmu_domain *smmu_domain = cookie;
2421 struct arm_smmu_pte_info *pte_info;
2422
Charan Teja Reddy35144b02017-09-05 16:20:46 +05302423 if (smmu_domain->slave_side_secure ||
2424 !arm_smmu_has_secure_vmid(smmu_domain)) {
2425 if (smmu_domain->slave_side_secure)
2426 WARN(1, "slave side secure is enforced\n");
2427 else
2428 WARN(1, "Invalid VMID is set !!\n");
2429 return -EINVAL;
2430 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002431
2432 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2433 if (!pte_info)
2434 return -ENOMEM;
2435 pte_info->virt_addr = addr;
2436 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2437 return 0;
2438}
2439
Will Deacon45ae7cf2013-06-24 18:31:25 +01002440static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2441{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002442 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002443 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002444 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002445 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002446 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002447
Robin Murphy06e393e2016-09-12 17:13:55 +01002448 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002449 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2450 return -ENXIO;
2451 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002452
Robin Murphy4f79b142016-10-17 12:06:21 +01002453 /*
2454 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2455 * domains between of_xlate() and add_device() - we have no way to cope
2456 * with that, so until ARM gets converted to rely on groups and default
2457 * domains, just say no (but more politely than by dereferencing NULL).
2458 * This should be at least a WARN_ON once that's sorted.
2459 */
2460 if (!fwspec->iommu_priv)
2461 return -ENODEV;
2462
Robin Murphy06e393e2016-09-12 17:13:55 +01002463 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002464
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002465 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002466 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002467 if (ret)
2468 return ret;
2469
Will Deacon518f7132014-11-14 17:17:54 +00002470 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002471 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002472 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002473 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002474
Patrick Dalyc190d932016-08-30 17:23:28 -07002475 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002476 if (is_dynamic_domain(domain)) {
2477 ret = 0;
2478 goto out_power_off;
2479 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002480
Will Deacon45ae7cf2013-06-24 18:31:25 +01002481 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002482 * Sanity check the domain. We don't support domains across
2483 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002484 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002485 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002486 dev_err(dev,
2487 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002488 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002489 ret = -EINVAL;
2490 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002491 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002492
2493 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002494 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002495
2496out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002497 /*
2498 * Keep an additional vote for non-atomic power until domain is
2499 * detached
2500 */
2501 if (!ret && atomic_domain) {
2502 WARN_ON(arm_smmu_power_on(smmu->pwr));
2503 arm_smmu_power_off_atomic(smmu->pwr);
2504 }
2505
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002506 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002507
Will Deacon45ae7cf2013-06-24 18:31:25 +01002508 return ret;
2509}
2510
Will Deacon45ae7cf2013-06-24 18:31:25 +01002511static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002512 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002513{
Will Deacon518f7132014-11-14 17:17:54 +00002514 int ret;
2515 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002516 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002517 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002518
Will Deacon518f7132014-11-14 17:17:54 +00002519 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002520 return -ENODEV;
2521
Patrick Dalye271f212016-10-04 13:24:49 -07002522 arm_smmu_secure_domain_lock(smmu_domain);
2523
Will Deacon518f7132014-11-14 17:17:54 +00002524 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2525 ret = ops->map(ops, iova, paddr, size, prot);
2526 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002527
2528 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002529 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002530
Will Deacon518f7132014-11-14 17:17:54 +00002531 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002532}
2533
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002534static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2535 dma_addr_t iova)
2536{
2537 uint64_t ret;
2538 unsigned long flags;
2539 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2540 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2541
2542 if (!ops)
2543 return 0;
2544
2545 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2546 ret = ops->iova_to_pte(ops, iova);
2547 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2548 return ret;
2549}
2550
Will Deacon45ae7cf2013-06-24 18:31:25 +01002551static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2552 size_t size)
2553{
Will Deacon518f7132014-11-14 17:17:54 +00002554 size_t ret;
2555 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002556 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002557 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002558
Will Deacon518f7132014-11-14 17:17:54 +00002559 if (!ops)
2560 return 0;
2561
Patrick Daly8befb662016-08-17 20:03:28 -07002562 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002563 if (ret)
2564 return ret;
2565
Patrick Dalye271f212016-10-04 13:24:49 -07002566 arm_smmu_secure_domain_lock(smmu_domain);
2567
Will Deacon518f7132014-11-14 17:17:54 +00002568 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2569 ret = ops->unmap(ops, iova, size);
2570 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002571
Patrick Daly8befb662016-08-17 20:03:28 -07002572 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002573 /*
2574 * While splitting up block mappings, we might allocate page table
2575 * memory during unmap, so the vmids needs to be assigned to the
2576 * memory here as well.
2577 */
2578 arm_smmu_assign_table(smmu_domain);
2579 /* Also unassign any pages that were free'd during unmap */
2580 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002581 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002582 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002583}
2584
Patrick Daly88d321d2017-02-09 18:02:13 -08002585#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002586static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2587 struct scatterlist *sg, unsigned int nents, int prot)
2588{
2589 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002590 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002591 unsigned long flags;
2592 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2593 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002594 unsigned int idx_start, idx_end;
2595 struct scatterlist *sg_start, *sg_end;
2596 unsigned long __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002597
2598 if (!ops)
2599 return -ENODEV;
2600
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002601 arm_smmu_secure_domain_lock(smmu_domain);
2602
Patrick Daly88d321d2017-02-09 18:02:13 -08002603 __saved_iova_start = iova;
2604 idx_start = idx_end = 0;
2605 sg_start = sg_end = sg;
2606 while (idx_end < nents) {
2607 batch_size = sg_end->length;
2608 sg_end = sg_next(sg_end);
2609 idx_end++;
2610 while ((idx_end < nents) &&
2611 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002612
Patrick Daly88d321d2017-02-09 18:02:13 -08002613 batch_size += sg_end->length;
2614 sg_end = sg_next(sg_end);
2615 idx_end++;
2616 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002617
Patrick Daly88d321d2017-02-09 18:02:13 -08002618 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2619 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2620 prot, &size);
2621 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2622 /* Returns 0 on error */
2623 if (!ret) {
2624 size_to_unmap = iova + size - __saved_iova_start;
2625 goto out;
2626 }
2627
2628 iova += batch_size;
2629 idx_start = idx_end;
2630 sg_start = sg_end;
2631 }
2632
2633out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002634 arm_smmu_assign_table(smmu_domain);
2635
Patrick Daly88d321d2017-02-09 18:02:13 -08002636 if (size_to_unmap) {
2637 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2638 iova = __saved_iova_start;
2639 }
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002640 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly88d321d2017-02-09 18:02:13 -08002641 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002642}
2643
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002644static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002645 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002646{
Joerg Roedel1d672632015-03-26 13:43:10 +01002647 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002648 struct arm_smmu_device *smmu = smmu_domain->smmu;
2649 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2650 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2651 struct device *dev = smmu->dev;
2652 void __iomem *cb_base;
2653 u32 tmp;
2654 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002655 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002656
2657 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2658
Robin Murphy661d9622015-05-27 17:09:34 +01002659 /* ATS1 registers can only be written atomically */
2660 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002661 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002662 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2663 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002664 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002665
2666 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2667 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002668 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002669 dev_err(dev,
2670 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2671 &iova, &phys);
2672 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002673 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002674 }
2675
Robin Murphyf9a05f02016-04-13 18:13:01 +01002676 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002677 if (phys & CB_PAR_F) {
2678 dev_err(dev, "translation fault!\n");
2679 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002680 phys = 0;
2681 } else {
2682 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002683 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002684
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002685 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002686}
2687
Will Deacon45ae7cf2013-06-24 18:31:25 +01002688static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002689 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002690{
Will Deacon518f7132014-11-14 17:17:54 +00002691 phys_addr_t ret;
2692 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002693 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002694 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002695
Will Deacon518f7132014-11-14 17:17:54 +00002696 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002697 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002698
Will Deacon518f7132014-11-14 17:17:54 +00002699 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002700 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002701 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002702
Will Deacon518f7132014-11-14 17:17:54 +00002703 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002704}
2705
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002706/*
2707 * This function can sleep, and cannot be called from atomic context. Will
2708 * power on register block if required. This restriction does not apply to the
2709 * original iova_to_phys() op.
2710 */
2711static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2712 dma_addr_t iova)
2713{
2714 phys_addr_t ret = 0;
2715 unsigned long flags;
2716 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly62ba1922017-08-30 16:47:18 -07002717 struct arm_smmu_device *smmu = smmu_domain->smmu;
2718
2719 if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS)
2720 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002721
Patrick Dalyad441dd2016-09-15 15:50:46 -07002722 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002723 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2724 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002725 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002726 return ret;
2727 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002728
2729 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2730 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2731 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002732 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002733
2734 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2735
2736 return ret;
2737}
2738
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002739static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002740{
Will Deacond0948942014-06-24 17:30:10 +01002741 switch (cap) {
2742 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002743 /*
2744 * Return true here as the SMMU can always send out coherent
2745 * requests.
2746 */
2747 return true;
Will Deacond0948942014-06-24 17:30:10 +01002748 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002749 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002750 case IOMMU_CAP_NOEXEC:
2751 return true;
Will Deacond0948942014-06-24 17:30:10 +01002752 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002753 return false;
Will Deacond0948942014-06-24 17:30:10 +01002754 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002755}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002756
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302757#ifdef CONFIG_MSM_TZ_SMMU
2758static struct arm_smmu_device *arm_smmu_get_by_addr(void __iomem *addr)
2759{
2760 struct arm_smmu_device *smmu;
2761 unsigned long flags;
2762
2763 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2764 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2765 unsigned long base = (unsigned long)smmu->base;
2766 unsigned long mask = ~(smmu->size - 1);
2767
2768 if ((base & mask) == ((unsigned long)addr & mask)) {
2769 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2770 return smmu;
2771 }
2772 }
2773 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2774 return NULL;
2775}
2776
2777bool arm_smmu_skip_write(void __iomem *addr)
2778{
2779 struct arm_smmu_device *smmu;
Shiraz Hashima28a4792018-01-13 00:39:52 +05302780 unsigned long cb;
2781 int i;
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302782
2783 smmu = arm_smmu_get_by_addr(addr);
Shiraz Hashima28a4792018-01-13 00:39:52 +05302784
2785 /* Skip write if smmu not available by now */
2786 if (!smmu)
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302787 return true;
Shiraz Hashima28a4792018-01-13 00:39:52 +05302788
2789 /* Do not write to global space */
2790 if (((unsigned long)addr & (smmu->size - 1)) < (smmu->size >> 1))
2791 return true;
2792
2793 /* Finally skip writing to secure CB */
2794 cb = ((unsigned long)addr & ((smmu->size >> 1) - 1)) >> PAGE_SHIFT;
2795 for (i = 0; i < smmu->num_mapping_groups; i++) {
2796 if ((smmu->s2crs[i].cbndx == cb) &&
2797 (smmu->s2crs[i].write_protected))
2798 return true;
2799 }
2800
2801 return false;
Charan Teja Reddy6f5cbe7d2017-12-28 19:14:15 +05302802}
2803#endif
2804
Patrick Daly8e3371a2017-02-13 22:14:53 -08002805static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2806{
2807 struct arm_smmu_device *smmu;
2808 unsigned long flags;
2809
2810 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2811 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2812 if (smmu->dev->of_node == np) {
2813 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2814 return smmu;
2815 }
2816 }
2817 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2818 return NULL;
2819}
2820
Robin Murphy7e96c742016-09-14 15:26:46 +01002821static int arm_smmu_match_node(struct device *dev, void *data)
2822{
2823 return dev->of_node == data;
2824}
2825
2826static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2827{
2828 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2829 np, arm_smmu_match_node);
2830 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002831 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002832}
2833
Will Deacon03edb222015-01-19 14:27:33 +00002834static int arm_smmu_add_device(struct device *dev)
2835{
Robin Murphy06e393e2016-09-12 17:13:55 +01002836 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002837 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002838 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002839 int i, ret;
2840
Robin Murphy7e96c742016-09-14 15:26:46 +01002841 if (using_legacy_binding) {
2842 ret = arm_smmu_register_legacy_master(dev, &smmu);
2843 fwspec = dev->iommu_fwspec;
2844 if (ret)
2845 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002846 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002847 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2848 if (!smmu)
2849 return -ENODEV;
2850 } else {
2851 return -ENODEV;
2852 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002853
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002854 ret = arm_smmu_power_on(smmu->pwr);
2855 if (ret)
2856 goto out_free;
2857
Robin Murphyd5b41782016-09-14 15:21:39 +01002858 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002859 for (i = 0; i < fwspec->num_ids; i++) {
2860 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002861 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002862
Robin Murphy06e393e2016-09-12 17:13:55 +01002863 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002864 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002865 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002866 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002867 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002868 if (mask & ~smmu->smr_mask_mask) {
2869 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2870 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002871 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002872 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002873 }
Will Deacon03edb222015-01-19 14:27:33 +00002874
Robin Murphy06e393e2016-09-12 17:13:55 +01002875 ret = -ENOMEM;
2876 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2877 GFP_KERNEL);
2878 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002879 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002880
2881 cfg->smmu = smmu;
2882 fwspec->iommu_priv = cfg;
2883 while (i--)
2884 cfg->smendx[i] = INVALID_SMENDX;
2885
Robin Murphy6668f692016-09-12 17:13:54 +01002886 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002887 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002888 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002889
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002890 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002891 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002892
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002893out_pwr_off:
2894 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002895out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002896 if (fwspec)
2897 kfree(fwspec->iommu_priv);
2898 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002899 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002900}
2901
Will Deacon45ae7cf2013-06-24 18:31:25 +01002902static void arm_smmu_remove_device(struct device *dev)
2903{
Robin Murphy06e393e2016-09-12 17:13:55 +01002904 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002905 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002906
Robin Murphy06e393e2016-09-12 17:13:55 +01002907 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002908 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002909
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002910 smmu = fwspec_smmu(fwspec);
2911 if (arm_smmu_power_on(smmu->pwr)) {
2912 WARN_ON(1);
2913 return;
2914 }
2915
Robin Murphy06e393e2016-09-12 17:13:55 +01002916 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002917 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002918 kfree(fwspec->iommu_priv);
2919 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002920 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002921}
2922
Joerg Roedelaf659932015-10-21 23:51:41 +02002923static struct iommu_group *arm_smmu_device_group(struct device *dev)
2924{
Robin Murphy06e393e2016-09-12 17:13:55 +01002925 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2926 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002927 struct iommu_group *group = NULL;
2928 int i, idx;
2929
Robin Murphy06e393e2016-09-12 17:13:55 +01002930 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002931 if (group && smmu->s2crs[idx].group &&
2932 group != smmu->s2crs[idx].group)
2933 return ERR_PTR(-EINVAL);
2934
2935 group = smmu->s2crs[idx].group;
2936 }
2937
Patrick Daly03330cc2017-08-11 14:56:38 -07002938 if (!group) {
2939 if (dev_is_pci(dev))
2940 group = pci_device_group(dev);
2941 else
2942 group = generic_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002943
Patrick Daly03330cc2017-08-11 14:56:38 -07002944 if (IS_ERR(group))
2945 return NULL;
2946 }
2947
2948 if (arm_smmu_arch_device_group(dev, group)) {
2949 iommu_group_put(group);
2950 return ERR_PTR(-EINVAL);
2951 }
Joerg Roedelaf659932015-10-21 23:51:41 +02002952
Joerg Roedelaf659932015-10-21 23:51:41 +02002953 return group;
2954}
2955
Will Deaconc752ce42014-06-25 22:46:31 +01002956static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2957 enum iommu_attr attr, void *data)
2958{
Joerg Roedel1d672632015-03-26 13:43:10 +01002959 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002960 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002961
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002962 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01002963 switch (attr) {
2964 case DOMAIN_ATTR_NESTING:
2965 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002966 ret = 0;
2967 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002968 case DOMAIN_ATTR_PT_BASE_ADDR:
2969 *((phys_addr_t *)data) =
2970 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002971 ret = 0;
2972 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002973 case DOMAIN_ATTR_CONTEXT_BANK:
2974 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002975 if (smmu_domain->smmu == NULL) {
2976 ret = -ENODEV;
2977 break;
2978 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002979 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2980 ret = 0;
2981 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002982 case DOMAIN_ATTR_TTBR0: {
2983 u64 val;
2984 struct arm_smmu_device *smmu = smmu_domain->smmu;
2985 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002986 if (smmu == NULL) {
2987 ret = -ENODEV;
2988 break;
2989 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002990 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2991 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2992 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2993 << (TTBRn_ASID_SHIFT);
2994 *((u64 *)data) = val;
2995 ret = 0;
2996 break;
2997 }
2998 case DOMAIN_ATTR_CONTEXTIDR:
2999 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003000 if (smmu_domain->smmu == NULL) {
3001 ret = -ENODEV;
3002 break;
3003 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003004 *((u32 *)data) = smmu_domain->cfg.procid;
3005 ret = 0;
3006 break;
3007 case DOMAIN_ATTR_PROCID:
3008 *((u32 *)data) = smmu_domain->cfg.procid;
3009 ret = 0;
3010 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07003011 case DOMAIN_ATTR_DYNAMIC:
3012 *((int *)data) = !!(smmu_domain->attributes
3013 & (1 << DOMAIN_ATTR_DYNAMIC));
3014 ret = 0;
3015 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07003016 case DOMAIN_ATTR_NON_FATAL_FAULTS:
3017 *((int *)data) = !!(smmu_domain->attributes
3018 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
3019 ret = 0;
3020 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07003021 case DOMAIN_ATTR_S1_BYPASS:
3022 *((int *)data) = !!(smmu_domain->attributes
3023 & (1 << DOMAIN_ATTR_S1_BYPASS));
3024 ret = 0;
3025 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07003026 case DOMAIN_ATTR_SECURE_VMID:
3027 *((int *)data) = smmu_domain->secure_vmid;
3028 ret = 0;
3029 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08003030 case DOMAIN_ATTR_PGTBL_INFO: {
3031 struct iommu_pgtbl_info *info = data;
3032
3033 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
3034 ret = -ENODEV;
3035 break;
3036 }
3037 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
3038 ret = 0;
3039 break;
3040 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003041 case DOMAIN_ATTR_FAST:
3042 *((int *)data) = !!(smmu_domain->attributes
3043 & (1 << DOMAIN_ATTR_FAST));
3044 ret = 0;
3045 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003046 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3047 *((int *)data) = !!(smmu_domain->attributes
3048 & (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
3049 ret = 0;
3050 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08003051 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3052 *((int *)data) = !!(smmu_domain->attributes &
3053 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
3054 ret = 0;
3055 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003056 case DOMAIN_ATTR_EARLY_MAP:
3057 *((int *)data) = !!(smmu_domain->attributes
3058 & (1 << DOMAIN_ATTR_EARLY_MAP));
3059 ret = 0;
3060 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07003061 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003062 if (!smmu_domain->smmu) {
3063 ret = -ENODEV;
3064 break;
3065 }
Liam Mark53cf2342016-12-20 11:36:07 -08003066 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
3067 ret = 0;
3068 break;
3069 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
3070 *((int *)data) = !!(smmu_domain->attributes
3071 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07003072 ret = 0;
3073 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303074 case DOMAIN_ATTR_CB_STALL_DISABLE:
3075 *((int *)data) = !!(smmu_domain->attributes
3076 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
3077 ret = 0;
3078 break;
Patrick Daly83174c12017-10-26 12:31:15 -07003079 case DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN:
Patrick Daly23301482017-10-12 16:18:25 -07003080 *((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
3081 ret = 0;
3082 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003083 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003084 ret = -ENODEV;
3085 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003086 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06003087 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06003088 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003089}
3090
3091static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
3092 enum iommu_attr attr, void *data)
3093{
Will Deacon518f7132014-11-14 17:17:54 +00003094 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01003095 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01003096
Will Deacon518f7132014-11-14 17:17:54 +00003097 mutex_lock(&smmu_domain->init_mutex);
3098
Will Deaconc752ce42014-06-25 22:46:31 +01003099 switch (attr) {
3100 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00003101 if (smmu_domain->smmu) {
3102 ret = -EPERM;
3103 goto out_unlock;
3104 }
3105
Will Deaconc752ce42014-06-25 22:46:31 +01003106 if (*(int *)data)
3107 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
3108 else
3109 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
3110
Will Deacon518f7132014-11-14 17:17:54 +00003111 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06003112 case DOMAIN_ATTR_PROCID:
3113 if (smmu_domain->smmu != NULL) {
3114 dev_err(smmu_domain->smmu->dev,
3115 "cannot change procid attribute while attached\n");
3116 ret = -EBUSY;
3117 break;
3118 }
3119 smmu_domain->cfg.procid = *((u32 *)data);
3120 ret = 0;
3121 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07003122 case DOMAIN_ATTR_DYNAMIC: {
3123 int dynamic = *((int *)data);
3124
3125 if (smmu_domain->smmu != NULL) {
3126 dev_err(smmu_domain->smmu->dev,
3127 "cannot change dynamic attribute while attached\n");
3128 ret = -EBUSY;
3129 break;
3130 }
3131
3132 if (dynamic)
3133 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
3134 else
3135 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
3136 ret = 0;
3137 break;
3138 }
3139 case DOMAIN_ATTR_CONTEXT_BANK:
3140 /* context bank can't be set while attached */
3141 if (smmu_domain->smmu != NULL) {
3142 ret = -EBUSY;
3143 break;
3144 }
3145 /* ... and it can only be set for dynamic contexts. */
3146 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
3147 ret = -EINVAL;
3148 break;
3149 }
3150
3151 /* this will be validated during attach */
3152 smmu_domain->cfg.cbndx = *((unsigned int *)data);
3153 ret = 0;
3154 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07003155 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
3156 u32 non_fatal_faults = *((int *)data);
3157
3158 if (non_fatal_faults)
3159 smmu_domain->attributes |=
3160 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
3161 else
3162 smmu_domain->attributes &=
3163 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
3164 ret = 0;
3165 break;
3166 }
Patrick Dalye62d3362016-03-15 18:58:28 -07003167 case DOMAIN_ATTR_S1_BYPASS: {
3168 int bypass = *((int *)data);
3169
3170 /* bypass can't be changed while attached */
3171 if (smmu_domain->smmu != NULL) {
3172 ret = -EBUSY;
3173 break;
3174 }
3175 if (bypass)
3176 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
3177 else
3178 smmu_domain->attributes &=
3179 ~(1 << DOMAIN_ATTR_S1_BYPASS);
3180
3181 ret = 0;
3182 break;
3183 }
Patrick Daly8befb662016-08-17 20:03:28 -07003184 case DOMAIN_ATTR_ATOMIC:
3185 {
3186 int atomic_ctx = *((int *)data);
3187
3188 /* can't be changed while attached */
3189 if (smmu_domain->smmu != NULL) {
3190 ret = -EBUSY;
3191 break;
3192 }
3193 if (atomic_ctx)
3194 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
3195 else
3196 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
3197 break;
3198 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07003199 case DOMAIN_ATTR_SECURE_VMID:
3200 if (smmu_domain->secure_vmid != VMID_INVAL) {
3201 ret = -ENODEV;
3202 WARN(1, "secure vmid already set!");
3203 break;
3204 }
3205 smmu_domain->secure_vmid = *((int *)data);
3206 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003207 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3208 if (*((int *)data))
3209 smmu_domain->attributes |=
3210 1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
3211 ret = 0;
3212 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003213 /*
3214 * fast_smmu_unmap_page() and fast_smmu_alloc_iova() both
3215 * expect that the bus/clock/regulator are already on. Thus also
3216 * force DOMAIN_ATTR_ATOMIC to bet set.
3217 */
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003218 case DOMAIN_ATTR_FAST:
Patrick Daly0df84ac2017-10-11 17:32:41 -07003219 {
3220 int fast = *((int *)data);
3221
3222 if (fast) {
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003223 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003224 smmu_domain->attributes |= 1 << DOMAIN_ATTR_ATOMIC;
3225 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003226 ret = 0;
3227 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003228 }
Patrick Dalyce6786f2016-11-09 14:19:23 -08003229 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3230 /* can't be changed while attached */
3231 if (smmu_domain->smmu != NULL) {
3232 ret = -EBUSY;
3233 break;
3234 }
3235 if (*((int *)data))
3236 smmu_domain->attributes |=
3237 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
3238 ret = 0;
3239 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003240 case DOMAIN_ATTR_EARLY_MAP: {
3241 int early_map = *((int *)data);
3242
3243 ret = 0;
3244 if (early_map) {
3245 smmu_domain->attributes |=
3246 1 << DOMAIN_ATTR_EARLY_MAP;
3247 } else {
3248 if (smmu_domain->smmu)
3249 ret = arm_smmu_enable_s1_translations(
3250 smmu_domain);
3251
3252 if (!ret)
3253 smmu_domain->attributes &=
3254 ~(1 << DOMAIN_ATTR_EARLY_MAP);
3255 }
3256 break;
3257 }
Liam Mark53cf2342016-12-20 11:36:07 -08003258 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
3259 int force_coherent = *((int *)data);
3260
3261 if (smmu_domain->smmu != NULL) {
3262 dev_err(smmu_domain->smmu->dev,
3263 "cannot change force coherent attribute while attached\n");
3264 ret = -EBUSY;
3265 break;
3266 }
3267
3268 if (force_coherent)
3269 smmu_domain->attributes |=
3270 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
3271 else
3272 smmu_domain->attributes &=
3273 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
3274
3275 ret = 0;
3276 break;
3277 }
3278
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303279 case DOMAIN_ATTR_CB_STALL_DISABLE:
3280 if (*((int *)data))
3281 smmu_domain->attributes |=
3282 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
3283 ret = 0;
3284 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003285 default:
Will Deacon518f7132014-11-14 17:17:54 +00003286 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01003287 }
Will Deacon518f7132014-11-14 17:17:54 +00003288
3289out_unlock:
3290 mutex_unlock(&smmu_domain->init_mutex);
3291 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003292}
3293
Robin Murphy7e96c742016-09-14 15:26:46 +01003294static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
3295{
3296 u32 fwid = 0;
3297
3298 if (args->args_count > 0)
3299 fwid |= (u16)args->args[0];
3300
3301 if (args->args_count > 1)
3302 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
3303
3304 return iommu_fwspec_add_ids(dev, &fwid, 1);
3305}
3306
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003307static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
3308{
3309 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3310 struct arm_smmu_device *smmu = smmu_domain->smmu;
3311 void __iomem *cb_base;
3312 u32 reg;
3313 int ret;
3314
3315 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3316 ret = arm_smmu_power_on(smmu->pwr);
3317 if (ret)
3318 return ret;
3319
3320 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3321 reg |= SCTLR_M;
3322
3323 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
3324 arm_smmu_power_off(smmu->pwr);
3325 return ret;
3326}
3327
Liam Mark3ba41cf2016-12-09 14:39:04 -08003328static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
3329 dma_addr_t iova)
3330{
3331 bool ret;
3332 unsigned long flags;
3333 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3334 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3335
3336 if (!ops)
3337 return false;
3338
3339 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3340 ret = ops->is_iova_coherent(ops, iova);
3341 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3342 return ret;
3343}
3344
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003345static void arm_smmu_trigger_fault(struct iommu_domain *domain,
3346 unsigned long flags)
3347{
3348 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3349 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3350 struct arm_smmu_device *smmu;
3351 void __iomem *cb_base;
3352
3353 if (!smmu_domain->smmu) {
3354 pr_err("Can't trigger faults on non-attached domains\n");
3355 return;
3356 }
3357
3358 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003359 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003360 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003361
3362 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3363 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
3364 flags, cfg->cbndx);
3365 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07003366 /* give the interrupt time to fire... */
3367 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003368
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003369 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003370}
3371
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003372static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3373{
Patrick Dalyda765c62017-09-11 16:31:07 -07003374 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3375 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
3376
3377 tlb->tlb_flush_all(smmu_domain);
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003378}
3379
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003380static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3381{
3382 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3383
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003384 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003385}
3386
3387static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3388{
3389 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3390
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003391 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003392}
3393
Will Deacon518f7132014-11-14 17:17:54 +00003394static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003395 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003396 .domain_alloc = arm_smmu_domain_alloc,
3397 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003398 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003399 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003400 .map = arm_smmu_map,
3401 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003402 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003403 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003404 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003405 .add_device = arm_smmu_add_device,
3406 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003407 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003408 .domain_get_attr = arm_smmu_domain_get_attr,
3409 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003410 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003411 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003412 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003413 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003414 .enable_config_clocks = arm_smmu_enable_config_clocks,
3415 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003416 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003417 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003418};
3419
Patrick Dalyad441dd2016-09-15 15:50:46 -07003420#define IMPL_DEF1_MICRO_MMU_CTRL 0
3421#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3422#define MICRO_MMU_CTRL_IDLE (1 << 3)
3423
3424/* Definitions for implementation-defined registers */
3425#define ACTLR_QCOM_OSH_SHIFT 28
3426#define ACTLR_QCOM_OSH 1
3427
3428#define ACTLR_QCOM_ISH_SHIFT 29
3429#define ACTLR_QCOM_ISH 1
3430
3431#define ACTLR_QCOM_NSH_SHIFT 30
3432#define ACTLR_QCOM_NSH 1
3433
3434static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003435{
3436 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003437 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003438
3439 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3440 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3441 0, 30000)) {
3442 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3443 return -EBUSY;
3444 }
3445
3446 return 0;
3447}
3448
Patrick Dalyad441dd2016-09-15 15:50:46 -07003449static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003450{
3451 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3452 u32 reg;
3453
3454 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3455 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303456
3457 if (arm_smmu_is_static_cb(smmu)) {
3458 phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
3459 smmu->phys_addr;
3460
3461 if (scm_io_write(impl_def1_base_phys +
3462 IMPL_DEF1_MICRO_MMU_CTRL, reg)) {
3463 dev_err(smmu->dev,
3464 "scm_io_write fail. SMMU might not be halted");
3465 return -EINVAL;
3466 }
3467 } else {
3468 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3469 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003470
Patrick Dalyad441dd2016-09-15 15:50:46 -07003471 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003472}
3473
Patrick Dalyad441dd2016-09-15 15:50:46 -07003474static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003475{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003476 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003477}
3478
Patrick Dalyad441dd2016-09-15 15:50:46 -07003479static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003480{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003481 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003482}
3483
Patrick Dalyad441dd2016-09-15 15:50:46 -07003484static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003485{
3486 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3487 u32 reg;
3488
3489 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3490 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303491
3492 if (arm_smmu_is_static_cb(smmu)) {
3493 phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
3494 smmu->phys_addr;
3495
3496 if (scm_io_write(impl_def1_base_phys +
3497 IMPL_DEF1_MICRO_MMU_CTRL, reg))
3498 dev_err(smmu->dev,
3499 "scm_io_write fail. SMMU might not be resumed");
3500 } else {
3501 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3502 }
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003503}
3504
Patrick Dalyad441dd2016-09-15 15:50:46 -07003505static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003506{
3507 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003508 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003509 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003510 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003511
Patrick Dalyad441dd2016-09-15 15:50:46 -07003512 /*
3513 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3514 * to prevent table walks with an inconsistent state.
3515 */
3516 for (i = 0; i < smmu->num_context_banks; ++i) {
3517 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3518 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3519 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3520 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3521 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3522 }
3523
3524 /* Program implementation defined registers */
3525 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003526 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3527 writel_relaxed(regs[i].value,
3528 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003529 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003530}
3531
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003532static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3533 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003534{
3535 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3536 struct arm_smmu_device *smmu = smmu_domain->smmu;
3537 int ret;
3538 phys_addr_t phys = 0;
3539 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003540 u32 sctlr, sctlr_orig, fsr;
3541 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003542
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003543 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003544 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003545 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003546
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003547 spin_lock_irqsave(&smmu->atos_lock, flags);
3548 cb_base = ARM_SMMU_CB_BASE(smmu) +
3549 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003550
3551 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003552 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003553 qsmmuv2_wait_for_halt(smmu);
3554
3555 /* clear FSR to allow ATOS to log any faults */
3556 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3557 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3558
3559 /* disable stall mode momentarily */
3560 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3561 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3562 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3563
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003564 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003565
3566 /* restore SCTLR */
3567 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3568
3569 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003570 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3571
3572 arm_smmu_power_off(smmu_domain->smmu->pwr);
3573 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003574}
3575
3576struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3577 .device_reset = qsmmuv2_device_reset,
3578 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003579};
3580
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003581static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003582{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003583 int i;
3584 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003585 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003586 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003587
Peng Fan3ca37122016-05-03 21:50:30 +08003588 /*
3589 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3590 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3591 * bit is only present in MMU-500r2 onwards.
3592 */
3593 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3594 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3595 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3596 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3597 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3598 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3599 }
3600
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003601 /* Make sure all context banks are disabled and clear CB_FSR */
3602 for (i = 0; i < smmu->num_context_banks; ++i) {
3603 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3604 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3605 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003606 /*
3607 * Disable MMU-500's not-particularly-beneficial next-page
3608 * prefetcher for the sake of errata #841119 and #826419.
3609 */
3610 if (smmu->model == ARM_MMU500) {
3611 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3612 reg &= ~ARM_MMU500_ACTLR_CPRE;
3613 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3614 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003615 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003616}
3617
3618static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3619{
3620 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003621 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003622 u32 reg;
3623
3624 /* clear global FSR */
3625 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3626 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3627
Robin Murphy468f4942016-09-12 17:13:49 +01003628 /*
3629 * Reset stream mapping groups: Initial values mark all SMRn as
3630 * invalid and all S2CRn as bypass unless overridden.
3631 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003632 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3633 for (i = 0; i < smmu->num_mapping_groups; ++i)
3634 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003635
Patrick Daly59b6d202017-06-12 13:12:15 -07003636 arm_smmu_context_bank_reset(smmu);
3637 }
Will Deacon1463fe42013-07-31 19:21:27 +01003638
Will Deacon45ae7cf2013-06-24 18:31:25 +01003639 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003640 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3641 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3642
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003643 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003644
Will Deacon45ae7cf2013-06-24 18:31:25 +01003645 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003646 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003647
3648 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003649 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003650
Robin Murphy25a1c962016-02-10 14:25:33 +00003651 /* Enable client access, handling unmatched streams as appropriate */
3652 reg &= ~sCR0_CLIENTPD;
3653 if (disable_bypass)
3654 reg |= sCR0_USFCFG;
3655 else
3656 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003657
3658 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003659 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003660
3661 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003662 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003663
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003664 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3665 reg |= sCR0_VMID16EN;
3666
Patrick Daly7f377fe2017-10-06 17:37:10 -07003667 /* Force bypass transaction to be Non-Shareable & not io-coherent */
3668 reg &= ~(sCR0_SHCFG_MASK << sCR0_SHCFG_SHIFT);
Prakash Gupta673a79f2017-11-16 18:07:00 +05303669 reg |= sCR0_SHCFG_NSH << sCR0_SHCFG_SHIFT;
Patrick Daly7f377fe2017-10-06 17:37:10 -07003670
Will Deacon45ae7cf2013-06-24 18:31:25 +01003671 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003672 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003673 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003674
3675 /* Manage any implementation defined features */
3676 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003677}
3678
3679static int arm_smmu_id_size_to_bits(int size)
3680{
3681 switch (size) {
3682 case 0:
3683 return 32;
3684 case 1:
3685 return 36;
3686 case 2:
3687 return 40;
3688 case 3:
3689 return 42;
3690 case 4:
3691 return 44;
3692 case 5:
3693 default:
3694 return 48;
3695 }
3696}
3697
Patrick Dalyda688822017-05-17 20:12:48 -07003698
3699/*
3700 * Some context banks needs to be transferred from bootloader to HLOS in a way
3701 * that allows ongoing traffic. The current expectation is that these context
3702 * banks operate in bypass mode.
3703 * Additionally, there must be exactly one device in devicetree with stream-ids
3704 * overlapping those used by the bootloader.
3705 */
3706static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3707 struct arm_smmu_device *smmu,
3708 struct device *dev)
3709{
3710 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003711 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003712 u32 i, idx;
3713 int cb = -EINVAL;
3714 bool dynamic;
3715
Patrick Dalye72526b2017-07-18 16:21:44 -07003716 /*
3717 * Dynamic domains have already set cbndx through domain attribute.
3718 * Verify that they picked a valid value.
3719 */
Patrick Dalyda688822017-05-17 20:12:48 -07003720 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07003721 if (dynamic) {
3722 cb = smmu_domain->cfg.cbndx;
3723 if (cb < smmu->num_context_banks)
3724 return cb;
3725 else
3726 return -EINVAL;
3727 }
Patrick Dalyda688822017-05-17 20:12:48 -07003728
3729 mutex_lock(&smmu->stream_map_mutex);
3730 for_each_cfg_sme(fwspec, i, idx) {
3731 if (smmu->s2crs[idx].cb_handoff)
3732 cb = smmu->s2crs[idx].cbndx;
3733 }
3734
Shiraz Hashima28a4792018-01-13 00:39:52 +05303735 if (cb >= 0 && arm_smmu_is_static_cb(smmu)) {
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303736 smmu_domain->slave_side_secure = true;
3737
Shiraz Hashima28a4792018-01-13 00:39:52 +05303738 if (arm_smmu_is_slave_side_secure(smmu_domain))
3739 for_each_cfg_sme(fwspec, i, idx)
3740 smmu->s2crs[idx].write_protected = true;
3741 }
3742
Charan Teja Reddyf0758df2017-09-04 18:52:07 +05303743 if (cb < 0 && !arm_smmu_is_static_cb(smmu)) {
Patrick Dalyda688822017-05-17 20:12:48 -07003744 mutex_unlock(&smmu->stream_map_mutex);
3745 return __arm_smmu_alloc_bitmap(smmu->context_map,
3746 smmu->num_s2_context_banks,
3747 smmu->num_context_banks);
3748 }
3749
3750 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07003751 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Charan Teja Reddy35144b02017-09-05 16:20:46 +05303752 if (!arm_smmu_is_static_cb(smmu))
3753 smmu->s2crs[i].cb_handoff = false;
Patrick Dalyda688822017-05-17 20:12:48 -07003754 smmu->s2crs[i].count -= 1;
3755 }
3756 }
3757 mutex_unlock(&smmu->stream_map_mutex);
3758
3759 return cb;
3760}
3761
3762static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
3763{
3764 u32 i, raw_smr, raw_s2cr;
3765 struct arm_smmu_smr smr;
3766 struct arm_smmu_s2cr s2cr;
3767
3768 for (i = 0; i < smmu->num_mapping_groups; i++) {
3769 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3770 ARM_SMMU_GR0_SMR(i));
3771 if (!(raw_smr & SMR_VALID))
3772 continue;
3773
3774 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3775 smr.id = (u16)raw_smr;
3776 smr.valid = true;
3777
3778 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3779 ARM_SMMU_GR0_S2CR(i));
Patrick Daly4ea4bdf2017-08-29 19:24:49 -07003780 memset(&s2cr, 0, sizeof(s2cr));
Patrick Dalyda688822017-05-17 20:12:48 -07003781 s2cr.group = NULL;
3782 s2cr.count = 1;
3783 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
3784 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
3785 S2CR_PRIVCFG_MASK;
3786 s2cr.cbndx = (u8)raw_s2cr;
3787 s2cr.cb_handoff = true;
3788
3789 if (s2cr.type != S2CR_TYPE_TRANS)
3790 continue;
3791
3792 smmu->smrs[i] = smr;
3793 smmu->s2crs[i] = s2cr;
3794 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
3795 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
3796 raw_smr, raw_s2cr, s2cr.cbndx);
3797 }
3798
3799 return 0;
3800}
3801
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003802static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3803{
3804 struct device *dev = smmu->dev;
3805 int i, ntuples, ret;
3806 u32 *tuples;
3807 struct arm_smmu_impl_def_reg *regs, *regit;
3808
3809 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3810 return 0;
3811
3812 ntuples /= sizeof(u32);
3813 if (ntuples % 2) {
3814 dev_err(dev,
3815 "Invalid number of attach-impl-defs registers: %d\n",
3816 ntuples);
3817 return -EINVAL;
3818 }
3819
3820 regs = devm_kmalloc(
3821 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3822 GFP_KERNEL);
3823 if (!regs)
3824 return -ENOMEM;
3825
3826 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3827 if (!tuples)
3828 return -ENOMEM;
3829
3830 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3831 tuples, ntuples);
3832 if (ret)
3833 return ret;
3834
3835 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3836 regit->offset = tuples[i];
3837 regit->value = tuples[i + 1];
3838 }
3839
3840 devm_kfree(dev, tuples);
3841
3842 smmu->impl_def_attach_registers = regs;
3843 smmu->num_impl_def_attach_registers = ntuples / 2;
3844
3845 return 0;
3846}
3847
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003848
3849static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003850{
3851 const char *cname;
3852 struct property *prop;
3853 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003854 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003855
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003856 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003857 of_property_count_strings(dev->of_node, "clock-names");
3858
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003859 if (pwr->num_clocks < 1) {
3860 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003861 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003862 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003863
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003864 pwr->clocks = devm_kzalloc(
3865 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003866 GFP_KERNEL);
3867
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003868 if (!pwr->clocks)
3869 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003870
3871 i = 0;
3872 of_property_for_each_string(dev->of_node, "clock-names",
3873 prop, cname) {
3874 struct clk *c = devm_clk_get(dev, cname);
3875
3876 if (IS_ERR(c)) {
3877 dev_err(dev, "Couldn't get clock: %s",
3878 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003879 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003880 }
3881
3882 if (clk_get_rate(c) == 0) {
3883 long rate = clk_round_rate(c, 1000);
3884
3885 clk_set_rate(c, rate);
3886 }
3887
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003888 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003889
3890 ++i;
3891 }
3892 return 0;
3893}
3894
Charan Teja Reddyf8464882017-12-05 20:29:05 +05303895static int regulator_notifier(struct notifier_block *nb,
3896 unsigned long event, void *data)
3897{
3898 int ret = 0;
3899 struct arm_smmu_device *smmu = container_of(nb, struct arm_smmu_device,
3900 regulator_nb);
3901
3902 if (event != REGULATOR_EVENT_PRE_DISABLE &&
3903 event != REGULATOR_EVENT_ENABLE)
3904 return NOTIFY_OK;
3905
3906 ret = arm_smmu_prepare_clocks(smmu->pwr);
3907 if (ret)
3908 goto out;
3909
3910 ret = arm_smmu_power_on_atomic(smmu->pwr);
3911 if (ret)
3912 goto unprepare_clock;
3913
3914 if (event == REGULATOR_EVENT_PRE_DISABLE)
3915 qsmmuv2_halt(smmu);
3916 else if (event == REGULATOR_EVENT_ENABLE) {
Charan Teja Reddyec6f7822018-01-10 17:32:52 +05303917 if (arm_smmu_restore_sec_cfg(smmu, 0))
Charan Teja Reddyf8464882017-12-05 20:29:05 +05303918 goto power_off;
3919 qsmmuv2_resume(smmu);
3920 }
3921power_off:
3922 arm_smmu_power_off_atomic(smmu->pwr);
3923unprepare_clock:
3924 arm_smmu_unprepare_clocks(smmu->pwr);
3925out:
3926 return NOTIFY_OK;
3927}
3928
3929static int register_regulator_notifier(struct arm_smmu_device *smmu)
3930{
3931 struct device *dev = smmu->dev;
3932 struct regulator_bulk_data *consumers;
3933 int ret = 0, num_consumers;
3934 struct arm_smmu_power_resources *pwr = smmu->pwr;
3935
3936 if (!(smmu->options & ARM_SMMU_OPT_HALT))
3937 goto out;
3938
3939 num_consumers = pwr->num_gdscs;
3940 consumers = pwr->gdscs;
3941
3942 if (!num_consumers) {
3943 dev_info(dev, "no regulator info exist for %s\n",
3944 dev_name(dev));
3945 goto out;
3946 }
3947
3948 smmu->regulator_nb.notifier_call = regulator_notifier;
3949 /* registering the notifier against one gdsc is sufficient as
3950 * we do enable/disable regulators in group.
3951 */
3952 ret = regulator_register_notifier(consumers[0].consumer,
3953 &smmu->regulator_nb);
3954 if (ret)
3955 dev_err(dev, "Regulator notifier request failed\n");
3956out:
3957 return ret;
3958}
3959
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003960static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003961{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003962 const char *cname;
3963 struct property *prop;
3964 int i, ret = 0;
3965 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003966
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003967 pwr->num_gdscs =
3968 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3969
3970 if (pwr->num_gdscs < 1) {
3971 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003972 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003973 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003974
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003975 pwr->gdscs = devm_kzalloc(
3976 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3977
3978 if (!pwr->gdscs)
3979 return -ENOMEM;
3980
Prakash Guptafad87ca2017-05-16 12:13:02 +05303981 if (!of_property_read_u32(dev->of_node,
3982 "qcom,deferred-regulator-disable-delay",
3983 &(pwr->regulator_defer)))
3984 dev_info(dev, "regulator defer delay %d\n",
3985 pwr->regulator_defer);
3986
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003987 i = 0;
3988 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3989 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07003990 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003991
3992 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3993 return ret;
3994}
3995
3996static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3997{
3998 struct device *dev = pwr->dev;
3999
4000 /* We don't want the bus APIs to print an error message */
4001 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
4002 dev_dbg(dev, "No bus scaling info\n");
4003 return 0;
4004 }
4005
4006 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
4007 if (!pwr->bus_dt_data) {
4008 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
4009 return -EINVAL;
4010 }
4011
4012 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
4013 if (!pwr->bus_client) {
4014 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004015 return -EINVAL;
4016 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004017
4018 return 0;
4019}
4020
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004021/*
4022 * Cleanup done by devm. Any non-devm resources must clean up themselves.
4023 */
4024static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
4025 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07004026{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004027 struct arm_smmu_power_resources *pwr;
4028 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07004029
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004030 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
4031 if (!pwr)
4032 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07004033
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004034 pwr->dev = &pdev->dev;
4035 pwr->pdev = pdev;
4036 mutex_init(&pwr->power_lock);
4037 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07004038
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004039 ret = arm_smmu_init_clocks(pwr);
4040 if (ret)
4041 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004042
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004043 ret = arm_smmu_init_regulators(pwr);
4044 if (ret)
4045 return ERR_PTR(ret);
4046
4047 ret = arm_smmu_init_bus_scaling(pwr);
4048 if (ret)
4049 return ERR_PTR(ret);
4050
4051 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07004052}
4053
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004054/*
Patrick Dalyabeee952017-04-13 18:14:59 -07004055 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004056 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004057static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004058{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004059 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004060}
4061
Will Deacon45ae7cf2013-06-24 18:31:25 +01004062static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
4063{
4064 unsigned long size;
4065 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
4066 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01004067 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01004068 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004069
Charan Teja Reddyec6f7822018-01-10 17:32:52 +05304070 if (arm_smmu_restore_sec_cfg(smmu, 0))
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304071 return -ENODEV;
4072
Mitchel Humpherysba822582015-10-20 11:37:41 -07004073 dev_dbg(smmu->dev, "probing hardware configuration...\n");
4074 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01004075 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004076
4077 /* ID0 */
4078 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01004079
4080 /* Restrict available stages based on module parameter */
4081 if (force_stage == 1)
4082 id &= ~(ID0_S2TS | ID0_NTS);
4083 else if (force_stage == 2)
4084 id &= ~(ID0_S1TS | ID0_NTS);
4085
Will Deacon45ae7cf2013-06-24 18:31:25 +01004086 if (id & ID0_S1TS) {
4087 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004088 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004089 }
4090
4091 if (id & ID0_S2TS) {
4092 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004093 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004094 }
4095
4096 if (id & ID0_NTS) {
4097 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004098 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004099 }
4100
4101 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01004102 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004103 dev_err(smmu->dev, "\tno translation support!\n");
4104 return -ENODEV;
4105 }
4106
Robin Murphyb7862e32016-04-13 18:13:03 +01004107 if ((id & ID0_S1TS) &&
4108 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00004109 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004110 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00004111 }
4112
Robin Murphybae2c2d2015-07-29 19:46:05 +01004113 /*
4114 * In order for DMA API calls to work properly, we must defer to what
4115 * the DT says about coherency, regardless of what the hardware claims.
4116 * Fortunately, this also opens up a workaround for systems where the
4117 * ID register value has ended up configured incorrectly.
4118 */
4119 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
4120 cttw_reg = !!(id & ID0_CTTW);
4121 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01004122 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01004123 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004124 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01004125 cttw_dt ? "" : "non-");
4126 if (cttw_dt != cttw_reg)
4127 dev_notice(smmu->dev,
4128 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004129
Robin Murphy53867802016-09-12 17:13:48 +01004130 /* Max. number of entries we have for stream matching/indexing */
4131 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
4132 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004133 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01004134 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08004135 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004136
4137 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01004138 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
4139 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004140 dev_err(smmu->dev,
4141 "stream-matching supported, but no SMRs present!\n");
4142 return -ENODEV;
4143 }
4144
Robin Murphy53867802016-09-12 17:13:48 +01004145 /*
4146 * SMR.ID bits may not be preserved if the corresponding MASK
4147 * bits are set, so check each one separately. We can reject
4148 * masters later if they try to claim IDs outside these masks.
4149 */
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304150 if (!arm_smmu_is_static_cb(smmu)) {
4151 for (i = 0; i < size; i++) {
4152 smr = readl_relaxed(
4153 gr0_base + ARM_SMMU_GR0_SMR(i));
4154 if (!(smr & SMR_VALID))
4155 break;
4156 }
4157 if (i == size) {
4158 dev_err(smmu->dev,
4159 "Unable to compute streamid_masks\n");
4160 return -ENODEV;
4161 }
4162
4163 smr = smmu->streamid_mask << SMR_ID_SHIFT;
4164 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
Patrick Daly937de532016-12-12 18:44:09 -08004165 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304166 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08004167
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304168 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
4169 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
4170 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
4171 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
4172 } else {
4173 smmu->smr_mask_mask = SMR_MASK_MASK;
4174 smmu->streamid_mask = SID_MASK;
4175 }
Dhaval Patel031d7462015-05-09 14:47:29 -07004176
Robin Murphy468f4942016-09-12 17:13:49 +01004177 /* Zero-initialised to mark as invalid */
4178 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
4179 GFP_KERNEL);
4180 if (!smmu->smrs)
4181 return -ENOMEM;
4182
Robin Murphy53867802016-09-12 17:13:48 +01004183 dev_notice(smmu->dev,
4184 "\tstream matching with %lu register groups, mask 0x%x",
4185 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004186 }
Robin Murphya754fd12016-09-12 17:13:50 +01004187 /* s2cr->type == 0 means translation, so initialise explicitly */
4188 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
4189 GFP_KERNEL);
4190 if (!smmu->s2crs)
4191 return -ENOMEM;
4192 for (i = 0; i < size; i++)
4193 smmu->s2crs[i] = s2cr_init_val;
4194
Robin Murphy53867802016-09-12 17:13:48 +01004195 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01004196 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004197
Robin Murphy7602b872016-04-28 17:12:09 +01004198 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
4199 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
4200 if (!(id & ID0_PTFS_NO_AARCH32S))
4201 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
4202 }
4203
Will Deacon45ae7cf2013-06-24 18:31:25 +01004204 /* ID1 */
4205 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01004206 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004207
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01004208 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00004209 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01004210 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01004211 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07004212 dev_warn(smmu->dev,
4213 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
4214 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004215
Will Deacon518f7132014-11-14 17:17:54 +00004216 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004217 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
4218 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
4219 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
4220 return -ENODEV;
4221 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07004222 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01004223 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01004224 /*
4225 * Cavium CN88xx erratum #27704.
4226 * Ensure ASID and VMID allocation is unique across all SMMUs in
4227 * the system.
4228 */
4229 if (smmu->model == CAVIUM_SMMUV2) {
4230 smmu->cavium_id_base =
4231 atomic_add_return(smmu->num_context_banks,
4232 &cavium_smmu_context_count);
4233 smmu->cavium_id_base -= smmu->num_context_banks;
4234 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004235
4236 /* ID2 */
4237 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
4238 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004239 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004240
Will Deacon518f7132014-11-14 17:17:54 +00004241 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01004242 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00004243 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004244
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08004245 if (id & ID2_VMID16)
4246 smmu->features |= ARM_SMMU_FEAT_VMID16;
4247
Robin Murphyf1d84542015-03-04 16:41:05 +00004248 /*
4249 * What the page table walker can address actually depends on which
4250 * descriptor format is in use, but since a) we don't know that yet,
4251 * and b) it can vary per context bank, this will have to do...
4252 */
4253 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
4254 dev_warn(smmu->dev,
4255 "failed to set DMA mask for table walker\n");
4256
Robin Murphyb7862e32016-04-13 18:13:03 +01004257 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00004258 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01004259 if (smmu->version == ARM_SMMU_V1_64K)
4260 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004261 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004262 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00004263 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00004264 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01004265 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00004266 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01004267 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00004268 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01004269 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004270 }
4271
Robin Murphy7602b872016-04-28 17:12:09 +01004272 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01004273 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01004274 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01004275 if (smmu->features &
4276 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01004277 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01004278 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01004279 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01004280 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01004281 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01004282
Robin Murphyd5466352016-05-09 17:20:09 +01004283 if (arm_smmu_ops.pgsize_bitmap == -1UL)
4284 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
4285 else
4286 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004287 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01004288 smmu->pgsize_bitmap);
4289
Will Deacon518f7132014-11-14 17:17:54 +00004290
Will Deacon28d60072014-09-01 16:24:48 +01004291 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004292 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
4293 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004294
4295 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004296 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
4297 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004298
Will Deacon45ae7cf2013-06-24 18:31:25 +01004299 return 0;
4300}
4301
Robin Murphy67b65a32016-04-13 18:12:57 +01004302struct arm_smmu_match_data {
4303 enum arm_smmu_arch_version version;
4304 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004305 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01004306};
4307
Patrick Dalyd7476202016-09-08 18:23:28 -07004308#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
4309static struct arm_smmu_match_data name = { \
4310.version = ver, \
4311.model = imp, \
4312.arch_ops = ops, \
4313} \
Robin Murphy67b65a32016-04-13 18:12:57 +01004314
Patrick Daly1f8a2882016-09-12 17:32:05 -07004315struct arm_smmu_arch_ops qsmmuv500_arch_ops;
4316
Patrick Dalyd7476202016-09-08 18:23:28 -07004317ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
4318ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
4319ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
4320ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
4321ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07004322ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004323ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
4324 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01004325
Joerg Roedel09b52692014-10-02 12:24:45 +02004326static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01004327 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
4328 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
4329 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01004330 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01004331 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01004332 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07004333 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07004334 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01004335 { },
4336};
4337MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
4338
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304339#ifdef CONFIG_MSM_TZ_SMMU
4340int register_iommu_sec_ptbl(void)
4341{
4342 struct device_node *np;
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004343
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304344 for_each_matching_node(np, arm_smmu_of_match)
4345 if (of_find_property(np, "qcom,tz-device-id", NULL) &&
4346 of_device_is_available(np))
4347 break;
4348 if (!np)
4349 return -ENODEV;
4350
4351 of_node_put(np);
4352
4353 return msm_iommu_sec_pgtbl_init();
4354}
4355#endif
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004356static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
4357{
4358 if (!dev->iommu_fwspec)
4359 of_iommu_configure(dev, dev->of_node);
4360 return 0;
4361}
4362
Patrick Daly000a2f22017-02-13 22:18:12 -08004363static int arm_smmu_add_device_fixup(struct device *dev, void *data)
4364{
4365 struct iommu_ops *ops = data;
4366
4367 ops->add_device(dev);
4368 return 0;
4369}
4370
Patrick Daly1f8a2882016-09-12 17:32:05 -07004371static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004372static int arm_smmu_device_dt_probe(struct platform_device *pdev)
4373{
Robin Murphy67b65a32016-04-13 18:12:57 +01004374 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004375 struct resource *res;
4376 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004377 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01004378 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01004379 bool legacy_binding;
4380
4381 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
4382 if (legacy_binding && !using_generic_binding) {
4383 if (!using_legacy_binding)
4384 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
4385 using_legacy_binding = true;
4386 } else if (!legacy_binding && !using_legacy_binding) {
4387 using_generic_binding = true;
4388 } else {
4389 dev_err(dev, "not probing due to mismatched DT properties\n");
4390 return -ENODEV;
4391 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004392
4393 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4394 if (!smmu) {
4395 dev_err(dev, "failed to allocate arm_smmu_device\n");
4396 return -ENOMEM;
4397 }
4398 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08004399 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07004400 idr_init(&smmu->asid_idr);
4401 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004402
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004403 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01004404 smmu->version = data->version;
4405 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004406 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01004407
Will Deacon45ae7cf2013-06-24 18:31:25 +01004408 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304409 if (res)
4410 smmu->phys_addr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01004411 smmu->base = devm_ioremap_resource(dev, res);
4412 if (IS_ERR(smmu->base))
4413 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004414 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004415
4416 if (of_property_read_u32(dev->of_node, "#global-interrupts",
4417 &smmu->num_global_irqs)) {
4418 dev_err(dev, "missing #global-interrupts property\n");
4419 return -ENODEV;
4420 }
4421
4422 num_irqs = 0;
4423 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
4424 num_irqs++;
4425 if (num_irqs > smmu->num_global_irqs)
4426 smmu->num_context_irqs++;
4427 }
4428
Andreas Herrmann44a08de2013-10-01 13:39:07 +01004429 if (!smmu->num_context_irqs) {
4430 dev_err(dev, "found %d interrupts but expected at least %d\n",
4431 num_irqs, smmu->num_global_irqs + 1);
4432 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004433 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004434
4435 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
4436 GFP_KERNEL);
4437 if (!smmu->irqs) {
4438 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
4439 return -ENOMEM;
4440 }
4441
4442 for (i = 0; i < num_irqs; ++i) {
4443 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07004444
Will Deacon45ae7cf2013-06-24 18:31:25 +01004445 if (irq < 0) {
4446 dev_err(dev, "failed to get irq index %d\n", i);
4447 return -ENODEV;
4448 }
4449 smmu->irqs[i] = irq;
4450 }
4451
Dhaval Patel031d7462015-05-09 14:47:29 -07004452 parse_driver_options(smmu);
4453
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004454 smmu->pwr = arm_smmu_init_power_resources(pdev);
4455 if (IS_ERR(smmu->pwr))
4456 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004457
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004458 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004459 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004460 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004461
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304462 smmu->sec_id = msm_dev_to_device_id(dev);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004463 err = arm_smmu_device_cfg_probe(smmu);
4464 if (err)
4465 goto out_power_off;
4466
Patrick Dalyda688822017-05-17 20:12:48 -07004467 err = arm_smmu_handoff_cbs(smmu);
4468 if (err)
4469 goto out_power_off;
4470
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004471 err = arm_smmu_parse_impl_def_registers(smmu);
4472 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004473 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004474
Robin Murphyb7862e32016-04-13 18:13:03 +01004475 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004476 smmu->num_context_banks != smmu->num_context_irqs) {
4477 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004478 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4479 smmu->num_context_irqs, smmu->num_context_banks,
4480 smmu->num_context_banks);
4481 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004482 }
4483
Will Deacon45ae7cf2013-06-24 18:31:25 +01004484 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004485 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4486 NULL, arm_smmu_global_fault,
4487 IRQF_ONESHOT | IRQF_SHARED,
4488 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004489 if (err) {
4490 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4491 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004492 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004493 }
4494 }
4495
Patrick Dalyd7476202016-09-08 18:23:28 -07004496 err = arm_smmu_arch_init(smmu);
4497 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004498 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004499
Robin Murphy06e393e2016-09-12 17:13:55 +01004500 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004501 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004502 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004503 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004504
Patrick Daly8e3371a2017-02-13 22:14:53 -08004505 INIT_LIST_HEAD(&smmu->list);
4506 spin_lock(&arm_smmu_devices_lock);
4507 list_add(&smmu->list, &arm_smmu_devices);
4508 spin_unlock(&arm_smmu_devices_lock);
4509
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004510 /* bus_set_iommu depends on this. */
4511 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4512 arm_smmu_of_iommu_configure_fixup);
4513
Robin Murphy7e96c742016-09-14 15:26:46 +01004514 /* Oh, for a proper bus abstraction */
4515 if (!iommu_present(&platform_bus_type))
4516 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004517 else
4518 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4519 arm_smmu_add_device_fixup);
Charan Teja Reddyf8464882017-12-05 20:29:05 +05304520
4521 err = register_regulator_notifier(smmu);
4522 if (err)
4523 goto out_power_off;
4524
Robin Murphy7e96c742016-09-14 15:26:46 +01004525#ifdef CONFIG_ARM_AMBA
4526 if (!iommu_present(&amba_bustype))
4527 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4528#endif
4529#ifdef CONFIG_PCI
4530 if (!iommu_present(&pci_bus_type)) {
4531 pci_request_acs();
4532 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4533 }
4534#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004535 return 0;
4536
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004537out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004538 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004539
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004540out_exit_power_resources:
4541 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004542
Will Deacon45ae7cf2013-06-24 18:31:25 +01004543 return err;
4544}
4545
4546static int arm_smmu_device_remove(struct platform_device *pdev)
4547{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004548 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004549
4550 if (!smmu)
4551 return -ENODEV;
4552
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004553 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004554 return -EINVAL;
4555
Will Deaconecfadb62013-07-31 19:21:28 +01004556 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004557 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004558
Patrick Dalyc190d932016-08-30 17:23:28 -07004559 idr_destroy(&smmu->asid_idr);
4560
Will Deacon45ae7cf2013-06-24 18:31:25 +01004561 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004562 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004563 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004564
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004565 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004566
Will Deacon45ae7cf2013-06-24 18:31:25 +01004567 return 0;
4568}
4569
Will Deacon45ae7cf2013-06-24 18:31:25 +01004570static struct platform_driver arm_smmu_driver = {
4571 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004572 .name = "arm-smmu",
4573 .of_match_table = of_match_ptr(arm_smmu_of_match),
4574 },
4575 .probe = arm_smmu_device_dt_probe,
4576 .remove = arm_smmu_device_remove,
4577};
4578
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004579static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004580static int __init arm_smmu_init(void)
4581{
Robin Murphy7e96c742016-09-14 15:26:46 +01004582 static bool registered;
4583 int ret = 0;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004584 ktime_t cur;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004585
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004586 if (registered)
4587 return 0;
4588
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004589 cur = ktime_get();
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004590 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4591 if (ret)
4592 return ret;
4593
4594 ret = platform_driver_register(&arm_smmu_driver);
Charan Teja Reddy35144b02017-09-05 16:20:46 +05304595#ifdef CONFIG_MSM_TZ_SMMU
4596 ret = register_iommu_sec_ptbl();
4597#endif
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004598 registered = !ret;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004599 trace_smmu_init(ktime_us_delta(ktime_get(), cur));
4600
Robin Murphy7e96c742016-09-14 15:26:46 +01004601 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004602}
4603
4604static void __exit arm_smmu_exit(void)
4605{
4606 return platform_driver_unregister(&arm_smmu_driver);
4607}
4608
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004609subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004610module_exit(arm_smmu_exit);
4611
Robin Murphy7e96c742016-09-14 15:26:46 +01004612static int __init arm_smmu_of_init(struct device_node *np)
4613{
4614 int ret = arm_smmu_init();
4615
4616 if (ret)
4617 return ret;
4618
4619 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4620 return -ENODEV;
4621
4622 return 0;
4623}
4624IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4625IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4626IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4627IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4628IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4629IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004630
Patrick Dalya0fddb62017-03-27 19:26:59 -07004631#define TCU_HW_VERSION_HLOS1 (0x18)
4632
Patrick Daly1f8a2882016-09-12 17:32:05 -07004633#define DEBUG_SID_HALT_REG 0x0
4634#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004635#define DEBUG_SID_HALT_SID_MASK 0x3ff
4636
4637#define DEBUG_VA_ADDR_REG 0x8
4638
4639#define DEBUG_TXN_TRIGG_REG 0x18
4640#define DEBUG_TXN_AXPROT_SHIFT 6
4641#define DEBUG_TXN_AXCACHE_SHIFT 2
4642#define DEBUG_TRX_WRITE (0x1 << 1)
4643#define DEBUG_TXN_READ (0x0 << 1)
4644#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004645
4646#define DEBUG_SR_HALT_ACK_REG 0x20
4647#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004648#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4649
4650#define DEBUG_PAR_REG 0x28
4651#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4652#define DEBUG_PAR_PA_SHIFT 12
4653#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004654
Patrick Daly8c1202b2017-05-10 15:42:30 -07004655#define TBU_DBG_TIMEOUT_US 100
Patrick Daly1f8a2882016-09-12 17:32:05 -07004656
Patrick Daly23301482017-10-12 16:18:25 -07004657#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK 0x3
4658#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT 0x8
4659
Patrick Daly03330cc2017-08-11 14:56:38 -07004660
4661struct actlr_setting {
4662 struct arm_smmu_smr smr;
4663 u32 actlr;
4664};
4665
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004666struct qsmmuv500_archdata {
4667 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004668 void __iomem *tcu_base;
4669 u32 version;
Patrick Dalyda765c62017-09-11 16:31:07 -07004670
4671 struct actlr_setting *actlrs;
4672 u32 actlr_tbl_size;
4673
4674 struct arm_smmu_smr *errata1_clients;
4675 u32 num_errata1_clients;
4676 remote_spinlock_t errata1_lock;
4677 ktime_t last_tlbi_ktime;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004678};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004679#define get_qsmmuv500_archdata(smmu) \
4680 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004681
Patrick Daly1f8a2882016-09-12 17:32:05 -07004682struct qsmmuv500_tbu_device {
4683 struct list_head list;
4684 struct device *dev;
4685 struct arm_smmu_device *smmu;
4686 void __iomem *base;
4687 void __iomem *status_reg;
4688
4689 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004690 u32 sid_start;
4691 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004692
4693 /* Protects halt count */
4694 spinlock_t halt_lock;
4695 u32 halt_count;
4696};
4697
Patrick Daly03330cc2017-08-11 14:56:38 -07004698struct qsmmuv500_group_iommudata {
4699 bool has_actlr;
4700 u32 actlr;
4701};
4702#define to_qsmmuv500_group_iommudata(group) \
4703 ((struct qsmmuv500_group_iommudata *) \
4704 (iommu_group_get_iommudata(group)))
4705
4706
4707static bool arm_smmu_fwspec_match_smr(struct iommu_fwspec *fwspec,
Patrick Dalyda765c62017-09-11 16:31:07 -07004708 struct arm_smmu_smr *smr)
4709{
4710 struct arm_smmu_smr *smr2;
Patrick Daly03330cc2017-08-11 14:56:38 -07004711 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Patrick Dalyda765c62017-09-11 16:31:07 -07004712 int i, idx;
4713
Patrick Daly03330cc2017-08-11 14:56:38 -07004714 for_each_cfg_sme(fwspec, i, idx) {
4715 smr2 = &smmu->smrs[idx];
Patrick Dalyda765c62017-09-11 16:31:07 -07004716 /* Continue if table entry does not match */
4717 if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
4718 continue;
4719 return true;
4720 }
4721 return false;
4722}
4723
4724#define ERRATA1_REMOTE_SPINLOCK "S:6"
4725#define ERRATA1_TLBI_INTERVAL_US 10
4726static bool
4727qsmmuv500_errata1_required(struct arm_smmu_domain *smmu_domain,
4728 struct qsmmuv500_archdata *data)
4729{
4730 bool ret = false;
4731 int j;
4732 struct arm_smmu_smr *smr;
Patrick Daly03330cc2017-08-11 14:56:38 -07004733 struct iommu_fwspec *fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004734
4735 if (smmu_domain->qsmmuv500_errata1_init)
4736 return smmu_domain->qsmmuv500_errata1_client;
4737
Patrick Daly03330cc2017-08-11 14:56:38 -07004738 fwspec = smmu_domain->dev->iommu_fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004739 for (j = 0; j < data->num_errata1_clients; j++) {
4740 smr = &data->errata1_clients[j];
Patrick Daly03330cc2017-08-11 14:56:38 -07004741 if (arm_smmu_fwspec_match_smr(fwspec, smr)) {
Patrick Dalyda765c62017-09-11 16:31:07 -07004742 ret = true;
4743 break;
4744 }
4745 }
4746
4747 smmu_domain->qsmmuv500_errata1_init = true;
4748 smmu_domain->qsmmuv500_errata1_client = ret;
4749 return ret;
4750}
4751
Patrick Daly86960052017-12-04 18:53:13 -08004752#define SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
4753#define SCM_CONFIG_ERRATA1 0x3
Patrick Dalyda765c62017-09-11 16:31:07 -07004754static void __qsmmuv500_errata1_tlbiall(struct arm_smmu_domain *smmu_domain)
4755{
4756 struct arm_smmu_device *smmu = smmu_domain->smmu;
4757 struct device *dev = smmu_domain->dev;
4758 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4759 void __iomem *base;
Patrick Daly86960052017-12-04 18:53:13 -08004760 int ret;
Patrick Dalyda765c62017-09-11 16:31:07 -07004761 ktime_t cur;
4762 u32 val;
Patrick Daly86960052017-12-04 18:53:13 -08004763 struct scm_desc desc = {
4764 .args[0] = SCM_CONFIG_ERRATA1_CLIENT_ALL,
4765 .args[1] = false,
4766 .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
4767 };
Patrick Dalyda765c62017-09-11 16:31:07 -07004768
4769 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4770 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
4771 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
Patrick Daly86960052017-12-04 18:53:13 -08004772 if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
4773 !(val & TLBSTATUS_SACTIVE), 0, 100))
4774 return;
4775
4776 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
4777 SCM_CONFIG_ERRATA1),
4778 &desc);
4779 if (ret) {
4780 dev_err(smmu->dev, "Calling into TZ to disable ERRATA1 failed - IOMMU hardware in bad state\n");
4781 BUG();
4782 return;
4783 }
4784
4785 cur = ktime_get();
4786 trace_tlbi_throttle_start(dev, 0);
4787 msm_bus_noc_throttle_wa(true);
4788
Patrick Dalyda765c62017-09-11 16:31:07 -07004789 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
Patrick Daly86960052017-12-04 18:53:13 -08004790 !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
4791 dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout - IOMMU hardware in bad state");
4792 trace_tlbsync_timeout(dev, 0);
4793 BUG();
4794 }
Patrick Dalyda765c62017-09-11 16:31:07 -07004795
Patrick Daly86960052017-12-04 18:53:13 -08004796 msm_bus_noc_throttle_wa(false);
4797 trace_tlbi_throttle_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07004798
Patrick Daly86960052017-12-04 18:53:13 -08004799 desc.args[1] = true;
4800 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
4801 SCM_CONFIG_ERRATA1),
4802 &desc);
4803 if (ret) {
4804 dev_err(smmu->dev, "Calling into TZ to reenable ERRATA1 failed - IOMMU hardware in bad state\n");
4805 BUG();
Patrick Dalyda765c62017-09-11 16:31:07 -07004806 }
4807}
4808
4809/* Must be called with clocks/regulators enabled */
4810static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
4811{
4812 struct arm_smmu_domain *smmu_domain = cookie;
4813 struct device *dev = smmu_domain->dev;
4814 struct qsmmuv500_archdata *data =
4815 get_qsmmuv500_archdata(smmu_domain->smmu);
4816 ktime_t cur;
Patrick Daly1faa3112017-10-31 16:40:40 -07004817 unsigned long flags;
Patrick Dalyda765c62017-09-11 16:31:07 -07004818 bool errata;
4819
4820 cur = ktime_get();
Prakash Gupta25f90512017-11-20 14:56:54 +05304821 trace_tlbi_start(dev, 0);
Patrick Dalyda765c62017-09-11 16:31:07 -07004822
4823 errata = qsmmuv500_errata1_required(smmu_domain, data);
Patrick Daly1faa3112017-10-31 16:40:40 -07004824 remote_spin_lock_irqsave(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07004825 if (errata) {
4826 s64 delta;
4827
4828 delta = ktime_us_delta(ktime_get(), data->last_tlbi_ktime);
4829 if (delta < ERRATA1_TLBI_INTERVAL_US)
4830 udelay(ERRATA1_TLBI_INTERVAL_US - delta);
4831
4832 __qsmmuv500_errata1_tlbiall(smmu_domain);
4833
4834 data->last_tlbi_ktime = ktime_get();
4835 } else {
4836 __qsmmuv500_errata1_tlbiall(smmu_domain);
4837 }
Patrick Daly1faa3112017-10-31 16:40:40 -07004838 remote_spin_unlock_irqrestore(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07004839
Prakash Gupta25f90512017-11-20 14:56:54 +05304840 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07004841}
4842
4843static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
4844 .tlb_flush_all = qsmmuv500_errata1_tlb_inv_context,
4845 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
4846 .free_pages_exact = arm_smmu_free_pages_exact,
4847};
4848
Patrick Daly8c1202b2017-05-10 15:42:30 -07004849static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
4850 struct arm_smmu_domain *smmu_domain)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004851{
4852 unsigned long flags;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004853 u32 halt, fsr, sctlr_orig, sctlr, status;
4854 void __iomem *base, *cb_base;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004855
4856 spin_lock_irqsave(&tbu->halt_lock, flags);
4857 if (tbu->halt_count) {
4858 tbu->halt_count++;
4859 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4860 return 0;
4861 }
4862
Patrick Daly8c1202b2017-05-10 15:42:30 -07004863 cb_base = ARM_SMMU_CB_BASE(smmu_domain->smmu) +
4864 ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004865 base = tbu->base;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004866 halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
4867 halt |= DEBUG_SID_HALT_VAL;
4868 writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004869
Patrick Daly8c1202b2017-05-10 15:42:30 -07004870 if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
4871 (status & DEBUG_SR_HALT_ACK_VAL),
4872 0, TBU_DBG_TIMEOUT_US))
4873 goto out;
4874
4875 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4876 if (!(fsr & FSR_FAULT)) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004877 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4878 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4879 return -ETIMEDOUT;
4880 }
4881
Patrick Daly8c1202b2017-05-10 15:42:30 -07004882 /*
4883 * We are in a fault; Our request to halt the bus will not complete
4884 * until transactions in front of us (such as the fault itself) have
4885 * completed. Disable iommu faults and terminate any existing
4886 * transactions.
4887 */
4888 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4889 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4890 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4891
4892 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4893 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4894
4895 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
4896 (status & DEBUG_SR_HALT_ACK_VAL),
4897 0, TBU_DBG_TIMEOUT_US)) {
4898 dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
4899 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4900 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4901 return -ETIMEDOUT;
4902 }
4903
4904 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4905out:
Patrick Daly1f8a2882016-09-12 17:32:05 -07004906 tbu->halt_count = 1;
4907 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4908 return 0;
4909}
4910
4911static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4912{
4913 unsigned long flags;
4914 u32 val;
4915 void __iomem *base;
4916
4917 spin_lock_irqsave(&tbu->halt_lock, flags);
4918 if (!tbu->halt_count) {
4919 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4920 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4921 return;
4922
4923 } else if (tbu->halt_count > 1) {
4924 tbu->halt_count--;
4925 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4926 return;
4927 }
4928
4929 base = tbu->base;
4930 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4931 val &= ~DEBUG_SID_HALT_VAL;
4932 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4933
4934 tbu->halt_count = 0;
4935 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4936}
4937
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004938static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4939 struct arm_smmu_device *smmu, u32 sid)
4940{
4941 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004942 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004943
4944 list_for_each_entry(tbu, &data->tbus, list) {
4945 if (tbu->sid_start <= sid &&
4946 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004947 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004948 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004949 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004950}
4951
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004952static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
4953 struct qsmmuv500_tbu_device *tbu,
4954 unsigned long *flags)
4955{
4956 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004957 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004958 u32 val;
4959
4960 spin_lock_irqsave(&smmu->atos_lock, *flags);
4961 /* The status register is not accessible on version 1.0 */
4962 if (data->version == 0x01000000)
4963 return 0;
4964
4965 if (readl_poll_timeout_atomic(tbu->status_reg,
4966 val, (val == 0x1), 0,
4967 TBU_DBG_TIMEOUT_US)) {
4968 dev_err(tbu->dev, "ECATS hw busy!\n");
4969 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4970 return -ETIMEDOUT;
4971 }
4972
4973 return 0;
4974}
4975
4976static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
4977 struct qsmmuv500_tbu_device *tbu,
4978 unsigned long *flags)
4979{
4980 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004981 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004982
4983 /* The status register is not accessible on version 1.0 */
4984 if (data->version != 0x01000000)
4985 writel_relaxed(0, tbu->status_reg);
4986 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4987}
4988
4989/*
4990 * Zero means failure.
4991 */
4992static phys_addr_t qsmmuv500_iova_to_phys(
4993 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
4994{
4995 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4996 struct arm_smmu_device *smmu = smmu_domain->smmu;
4997 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4998 struct qsmmuv500_tbu_device *tbu;
4999 int ret;
5000 phys_addr_t phys = 0;
5001 u64 val, fsr;
5002 unsigned long flags;
5003 void __iomem *cb_base;
5004 u32 sctlr_orig, sctlr;
5005 int needs_redo = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005006 ktime_t timeout;
5007
5008 /* only 36 bit iova is supported */
5009 if (iova >= (1ULL << 36)) {
5010 dev_err_ratelimited(smmu->dev, "ECATS: address too large: %pad\n",
5011 &iova);
5012 return 0;
5013 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005014
5015 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
5016 tbu = qsmmuv500_find_tbu(smmu, sid);
5017 if (!tbu)
5018 return 0;
5019
5020 ret = arm_smmu_power_on(tbu->pwr);
5021 if (ret)
5022 return 0;
5023
Patrick Daly8c1202b2017-05-10 15:42:30 -07005024 ret = qsmmuv500_tbu_halt(tbu, smmu_domain);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005025 if (ret)
5026 goto out_power_off;
5027
Patrick Daly8c1202b2017-05-10 15:42:30 -07005028 /*
5029 * ECATS can trigger the fault interrupt, so disable it temporarily
5030 * and check for an interrupt manually.
5031 */
5032 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
5033 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
5034 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
5035
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005036 /* Only one concurrent atos operation */
5037 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
5038 if (ret)
5039 goto out_resume;
5040
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005041redo:
5042 /* Set address and stream-id */
5043 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
5044 val |= sid & DEBUG_SID_HALT_SID_MASK;
5045 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
5046 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
5047
5048 /*
5049 * Write-back Read and Write-Allocate
5050 * Priviledged, nonsecure, data transaction
5051 * Read operation.
5052 */
5053 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
5054 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
5055 val |= DEBUG_TXN_TRIGGER;
5056 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
5057
5058 ret = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07005059 //based on readx_poll_timeout_atomic
5060 timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
5061 for (;;) {
5062 val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
5063 if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
5064 break;
5065 val = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5066 if (val & FSR_FAULT)
5067 break;
5068 if (ktime_compare(ktime_get(), timeout) > 0) {
5069 dev_err(tbu->dev, "ECATS translation timed out!\n");
5070 ret = -ETIMEDOUT;
5071 break;
5072 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005073 }
5074
5075 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
5076 if (fsr & FSR_FAULT) {
5077 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
Patrick Daly8c1202b2017-05-10 15:42:30 -07005078 fsr);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005079 ret = -EINVAL;
5080
5081 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
5082 /*
5083 * Clear pending interrupts
5084 * Barrier required to ensure that the FSR is cleared
5085 * before resuming SMMU operation
5086 */
5087 wmb();
5088 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
5089 }
5090
5091 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
5092 if (val & DEBUG_PAR_FAULT_VAL) {
5093 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
5094 val);
5095 ret = -EINVAL;
5096 }
5097
5098 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
5099 if (ret < 0)
5100 phys = 0;
5101
5102 /* Reset hardware */
5103 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
5104 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
5105
5106 /*
5107 * After a failed translation, the next successful translation will
5108 * incorrectly be reported as a failure.
5109 */
5110 if (!phys && needs_redo++ < 2)
5111 goto redo;
5112
5113 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
5114 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
5115
5116out_resume:
5117 qsmmuv500_tbu_resume(tbu);
5118
5119out_power_off:
5120 arm_smmu_power_off(tbu->pwr);
5121
5122 return phys;
5123}
5124
5125static phys_addr_t qsmmuv500_iova_to_phys_hard(
5126 struct iommu_domain *domain, dma_addr_t iova)
5127{
5128 u16 sid;
5129 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
5130 struct iommu_fwspec *fwspec;
5131
5132 /* Select a sid */
5133 fwspec = smmu_domain->dev->iommu_fwspec;
5134 sid = (u16)fwspec->ids[0];
5135
5136 return qsmmuv500_iova_to_phys(domain, iova, sid);
5137}
5138
Patrick Daly03330cc2017-08-11 14:56:38 -07005139static void qsmmuv500_release_group_iommudata(void *data)
5140{
5141 kfree(data);
5142}
5143
5144/* If a device has a valid actlr, it must match */
5145static int qsmmuv500_device_group(struct device *dev,
5146 struct iommu_group *group)
5147{
5148 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
5149 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
5150 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5151 struct qsmmuv500_group_iommudata *iommudata;
5152 u32 actlr, i;
5153 struct arm_smmu_smr *smr;
5154
5155 iommudata = to_qsmmuv500_group_iommudata(group);
5156 if (!iommudata) {
5157 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
5158 if (!iommudata)
5159 return -ENOMEM;
5160
5161 iommu_group_set_iommudata(group, iommudata,
5162 qsmmuv500_release_group_iommudata);
5163 }
5164
5165 for (i = 0; i < data->actlr_tbl_size; i++) {
5166 smr = &data->actlrs[i].smr;
5167 actlr = data->actlrs[i].actlr;
5168
5169 if (!arm_smmu_fwspec_match_smr(fwspec, smr))
5170 continue;
5171
5172 if (!iommudata->has_actlr) {
5173 iommudata->actlr = actlr;
5174 iommudata->has_actlr = true;
5175 } else if (iommudata->actlr != actlr) {
5176 return -EINVAL;
5177 }
5178 }
5179
5180 return 0;
5181}
5182
5183static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
5184 struct device *dev)
5185{
5186 struct arm_smmu_device *smmu = smmu_domain->smmu;
5187 struct qsmmuv500_group_iommudata *iommudata =
5188 to_qsmmuv500_group_iommudata(dev->iommu_group);
5189 void __iomem *cb_base;
5190 const struct iommu_gather_ops *tlb;
5191
5192 if (!iommudata->has_actlr)
5193 return;
5194
5195 tlb = smmu_domain->pgtbl_cfg.tlb;
5196 cb_base = ARM_SMMU_CB_BASE(smmu) +
5197 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
5198
5199 writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR);
5200
5201 /*
Patrick Daly23301482017-10-12 16:18:25 -07005202 * Prefetch only works properly if the start and end of all
5203 * buffers in the page table are aligned to 16 Kb.
5204 */
Patrick Daly27bd9292017-11-22 13:59:59 -08005205 if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
Patrick Daly23301482017-10-12 16:18:25 -07005206 QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
5207 smmu_domain->qsmmuv500_errata2_min_align = true;
5208
5209 /*
Patrick Daly03330cc2017-08-11 14:56:38 -07005210 * Flush the context bank after modifying ACTLR to ensure there
5211 * are no cache entries with stale state
5212 */
5213 tlb->tlb_flush_all(smmu_domain);
5214}
5215
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005216static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005217{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005218 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005219 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005220 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005221
5222 if (!dev->driver) {
5223 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
5224 return -EINVAL;
5225 }
5226
5227 tbu = dev_get_drvdata(dev);
5228
5229 INIT_LIST_HEAD(&tbu->list);
5230 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07005231 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07005232 return 0;
5233}
5234
Patrick Dalyda765c62017-09-11 16:31:07 -07005235static int qsmmuv500_parse_errata1(struct arm_smmu_device *smmu)
5236{
5237 int len, i;
5238 struct device *dev = smmu->dev;
5239 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5240 struct arm_smmu_smr *smrs;
5241 const __be32 *cell;
5242
5243 cell = of_get_property(dev->of_node, "qcom,mmu500-errata-1", NULL);
5244 if (!cell)
5245 return 0;
5246
5247 remote_spin_lock_init(&data->errata1_lock, ERRATA1_REMOTE_SPINLOCK);
5248 len = of_property_count_elems_of_size(
5249 dev->of_node, "qcom,mmu500-errata-1", sizeof(u32) * 2);
5250 if (len < 0)
5251 return 0;
5252
5253 smrs = devm_kzalloc(dev, sizeof(*smrs) * len, GFP_KERNEL);
5254 if (!smrs)
5255 return -ENOMEM;
5256
5257 for (i = 0; i < len; i++) {
5258 smrs[i].id = of_read_number(cell++, 1);
5259 smrs[i].mask = of_read_number(cell++, 1);
5260 }
5261
5262 data->errata1_clients = smrs;
5263 data->num_errata1_clients = len;
5264 return 0;
5265}
5266
Patrick Daly03330cc2017-08-11 14:56:38 -07005267static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
5268{
5269 int len, i;
5270 struct device *dev = smmu->dev;
5271 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
5272 struct actlr_setting *actlrs;
5273 const __be32 *cell;
5274
5275 cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
5276 if (!cell)
5277 return 0;
5278
5279 len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
5280 sizeof(u32) * 3);
5281 if (len < 0)
5282 return 0;
5283
5284 actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
5285 if (!actlrs)
5286 return -ENOMEM;
5287
5288 for (i = 0; i < len; i++) {
5289 actlrs[i].smr.id = of_read_number(cell++, 1);
5290 actlrs[i].smr.mask = of_read_number(cell++, 1);
5291 actlrs[i].actlr = of_read_number(cell++, 1);
5292 }
5293
5294 data->actlrs = actlrs;
5295 data->actlr_tbl_size = len;
5296 return 0;
5297}
5298
Patrick Daly1f8a2882016-09-12 17:32:05 -07005299static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
5300{
Patrick Dalya0fddb62017-03-27 19:26:59 -07005301 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005302 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005303 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07005304 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005305 int ret;
Patrick Daly03330cc2017-08-11 14:56:38 -07005306 u32 val;
5307 void __iomem *reg;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005308
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005309 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
5310 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005311 return -ENOMEM;
5312
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005313 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07005314
5315 pdev = container_of(dev, struct platform_device, dev);
5316 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
5317 data->tcu_base = devm_ioremap_resource(dev, res);
5318 if (IS_ERR(data->tcu_base))
5319 return PTR_ERR(data->tcu_base);
5320
5321 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005322 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005323
Charan Teja Reddy424ed342018-01-18 12:25:06 +05305324 if (arm_smmu_is_static_cb(smmu))
5325 return 0;
5326
Patrick Dalyda765c62017-09-11 16:31:07 -07005327 ret = qsmmuv500_parse_errata1(smmu);
5328 if (ret)
5329 return ret;
5330
Patrick Daly03330cc2017-08-11 14:56:38 -07005331 ret = qsmmuv500_read_actlr_tbl(smmu);
5332 if (ret)
5333 return ret;
5334
5335 reg = ARM_SMMU_GR0(smmu);
5336 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5337 val &= ~ARM_MMU500_ACR_CACHE_LOCK;
5338 writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
5339 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5340 /*
5341 * Modifiying the nonsecure copy of the sACR register is only
5342 * allowed if permission is given in the secure sACR register.
5343 * Attempt to detect if we were able to update the value.
5344 */
5345 WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
5346
Patrick Daly1f8a2882016-09-12 17:32:05 -07005347 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
5348 if (ret)
5349 return ret;
5350
5351 /* Attempt to register child devices */
5352 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
5353 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07005354 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005355
5356 return 0;
5357}
5358
5359struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
5360 .init = qsmmuv500_arch_init,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005361 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly03330cc2017-08-11 14:56:38 -07005362 .init_context_bank = qsmmuv500_init_cb,
5363 .device_group = qsmmuv500_device_group,
Patrick Daly1f8a2882016-09-12 17:32:05 -07005364};
5365
5366static const struct of_device_id qsmmuv500_tbu_of_match[] = {
5367 {.compatible = "qcom,qsmmuv500-tbu"},
5368 {}
5369};
5370
5371static int qsmmuv500_tbu_probe(struct platform_device *pdev)
5372{
5373 struct resource *res;
5374 struct device *dev = &pdev->dev;
5375 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005376 const __be32 *cell;
5377 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005378
5379 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
5380 if (!tbu)
5381 return -ENOMEM;
5382
5383 INIT_LIST_HEAD(&tbu->list);
5384 tbu->dev = dev;
5385 spin_lock_init(&tbu->halt_lock);
5386
5387 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
5388 tbu->base = devm_ioremap_resource(dev, res);
5389 if (IS_ERR(tbu->base))
5390 return PTR_ERR(tbu->base);
5391
5392 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
5393 tbu->status_reg = devm_ioremap_resource(dev, res);
5394 if (IS_ERR(tbu->status_reg))
5395 return PTR_ERR(tbu->status_reg);
5396
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005397 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
5398 if (!cell || len < 8)
5399 return -EINVAL;
5400
5401 tbu->sid_start = of_read_number(cell, 1);
5402 tbu->num_sids = of_read_number(cell + 1, 1);
5403
Patrick Daly1f8a2882016-09-12 17:32:05 -07005404 tbu->pwr = arm_smmu_init_power_resources(pdev);
5405 if (IS_ERR(tbu->pwr))
5406 return PTR_ERR(tbu->pwr);
5407
5408 dev_set_drvdata(dev, tbu);
5409 return 0;
5410}
5411
5412static struct platform_driver qsmmuv500_tbu_driver = {
5413 .driver = {
5414 .name = "qsmmuv500-tbu",
5415 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
5416 },
5417 .probe = qsmmuv500_tbu_probe,
5418};
5419
Will Deacon45ae7cf2013-06-24 18:31:25 +01005420MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
5421MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
5422MODULE_LICENSE("GPL v2");