blob: 45fbd09fe12b701aa751bc8eb5cd9a0d6d72f2e7 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070050#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070051#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070052#include <linux/msm-bus.h>
53#include <dt-bindings/msm/msm-bus-ids.h>
Patrick Dalyda765c62017-09-11 16:31:07 -070054#include <linux/remote_spinlock.h>
55#include <linux/ktime.h>
56#include <trace/events/iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
58#include <linux/amba/bus.h>
59
Will Deacon518f7132014-11-14 17:17:54 +000060#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010061
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* Maximum number of context banks per SMMU */
63#define ARM_SMMU_MAX_CBS 128
64
Will Deacon45ae7cf2013-06-24 18:31:25 +010065/* SMMU global address space */
66#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010067#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010068
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000069/*
70 * SMMU global address space with conditional offset to access secure
71 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
72 * nsGFSYNR0: 0x450)
73 */
74#define ARM_SMMU_GR0_NS(smmu) \
75 ((smmu)->base + \
76 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
77 ? 0x400 : 0))
78
Robin Murphyf9a05f02016-04-13 18:13:01 +010079/*
80 * Some 64-bit registers only make sense to write atomically, but in such
81 * cases all the data relevant to AArch32 formats lies within the lower word,
82 * therefore this actually makes more sense than it might first appear.
83 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010085#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010086#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010087#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010088#endif
89
Will Deacon45ae7cf2013-06-24 18:31:25 +010090/* Configuration registers */
91#define ARM_SMMU_GR0_sCR0 0x0
92#define sCR0_CLIENTPD (1 << 0)
93#define sCR0_GFRE (1 << 1)
94#define sCR0_GFIE (1 << 2)
95#define sCR0_GCFGFRE (1 << 4)
96#define sCR0_GCFGFIE (1 << 5)
97#define sCR0_USFCFG (1 << 10)
98#define sCR0_VMIDPNE (1 << 11)
99#define sCR0_PTM (1 << 12)
100#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800101#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100102#define sCR0_BSU_SHIFT 14
103#define sCR0_BSU_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700104#define sCR0_SHCFG_SHIFT 22
105#define sCR0_SHCFG_MASK 0x3
106#define sCR0_SHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100107
Peng Fan3ca37122016-05-03 21:50:30 +0800108/* Auxiliary Configuration register */
109#define ARM_SMMU_GR0_sACR 0x10
110
Will Deacon45ae7cf2013-06-24 18:31:25 +0100111/* Identification registers */
112#define ARM_SMMU_GR0_ID0 0x20
113#define ARM_SMMU_GR0_ID1 0x24
114#define ARM_SMMU_GR0_ID2 0x28
115#define ARM_SMMU_GR0_ID3 0x2c
116#define ARM_SMMU_GR0_ID4 0x30
117#define ARM_SMMU_GR0_ID5 0x34
118#define ARM_SMMU_GR0_ID6 0x38
119#define ARM_SMMU_GR0_ID7 0x3c
120#define ARM_SMMU_GR0_sGFSR 0x48
121#define ARM_SMMU_GR0_sGFSYNR0 0x50
122#define ARM_SMMU_GR0_sGFSYNR1 0x54
123#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100124
125#define ID0_S1TS (1 << 30)
126#define ID0_S2TS (1 << 29)
127#define ID0_NTS (1 << 28)
128#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000129#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100130#define ID0_PTFS_NO_AARCH32 (1 << 25)
131#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100132#define ID0_CTTW (1 << 14)
133#define ID0_NUMIRPT_SHIFT 16
134#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700135#define ID0_NUMSIDB_SHIFT 9
136#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100137#define ID0_NUMSMRG_SHIFT 0
138#define ID0_NUMSMRG_MASK 0xff
139
140#define ID1_PAGESIZE (1 << 31)
141#define ID1_NUMPAGENDXB_SHIFT 28
142#define ID1_NUMPAGENDXB_MASK 7
143#define ID1_NUMS2CB_SHIFT 16
144#define ID1_NUMS2CB_MASK 0xff
145#define ID1_NUMCB_SHIFT 0
146#define ID1_NUMCB_MASK 0xff
147
148#define ID2_OAS_SHIFT 4
149#define ID2_OAS_MASK 0xf
150#define ID2_IAS_SHIFT 0
151#define ID2_IAS_MASK 0xf
152#define ID2_UBS_SHIFT 8
153#define ID2_UBS_MASK 0xf
154#define ID2_PTFS_4K (1 << 12)
155#define ID2_PTFS_16K (1 << 13)
156#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800157#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158
Peng Fan3ca37122016-05-03 21:50:30 +0800159#define ID7_MAJOR_SHIFT 4
160#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100161
Will Deacon45ae7cf2013-06-24 18:31:25 +0100162/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100163#define ARM_SMMU_GR0_TLBIVMID 0x64
164#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
165#define ARM_SMMU_GR0_TLBIALLH 0x6c
166#define ARM_SMMU_GR0_sTLBGSYNC 0x70
167#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
168#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800169#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170
171/* Stream mapping registers */
172#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
173#define SMR_VALID (1 << 31)
174#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700175#define SMR_MASK_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100176#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100177
178#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
179#define S2CR_CBNDX_SHIFT 0
180#define S2CR_CBNDX_MASK 0xff
181#define S2CR_TYPE_SHIFT 16
182#define S2CR_TYPE_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700183#define S2CR_SHCFG_SHIFT 8
184#define S2CR_SHCFG_MASK 0x3
185#define S2CR_SHCFG_NSH 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100186enum arm_smmu_s2cr_type {
187 S2CR_TYPE_TRANS,
188 S2CR_TYPE_BYPASS,
189 S2CR_TYPE_FAULT,
190};
191
192#define S2CR_PRIVCFG_SHIFT 24
193#define S2CR_PRIVCFG_MASK 0x3
194enum arm_smmu_s2cr_privcfg {
195 S2CR_PRIVCFG_DEFAULT,
196 S2CR_PRIVCFG_DIPAN,
197 S2CR_PRIVCFG_UNPRIV,
198 S2CR_PRIVCFG_PRIV,
199};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100200
201/* Context bank attribute registers */
202#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
203#define CBAR_VMID_SHIFT 0
204#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000205#define CBAR_S1_BPSHCFG_SHIFT 8
206#define CBAR_S1_BPSHCFG_MASK 3
207#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100208#define CBAR_S1_MEMATTR_SHIFT 12
209#define CBAR_S1_MEMATTR_MASK 0xf
210#define CBAR_S1_MEMATTR_WB 0xf
211#define CBAR_TYPE_SHIFT 16
212#define CBAR_TYPE_MASK 0x3
213#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
214#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
215#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
216#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
217#define CBAR_IRPTNDX_SHIFT 24
218#define CBAR_IRPTNDX_MASK 0xff
219
Shalaj Jain04059c52015-03-03 13:34:59 -0800220#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
221#define CBFRSYNRA_SID_MASK (0xffff)
222
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
224#define CBA2R_RW64_32BIT (0 << 0)
225#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800226#define CBA2R_VMID_SHIFT 16
227#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228
229/* Translation context bank */
230#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100231#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100232
233#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100234#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235#define ARM_SMMU_CB_RESUME 0x8
236#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100237#define ARM_SMMU_CB_TTBR0 0x20
238#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100239#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600240#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000242#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100243#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100244#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700245#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100246#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100247#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000248#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100249#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700250#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000251#define ARM_SMMU_CB_S1_TLBIVAL 0x620
252#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
253#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700254#define ARM_SMMU_CB_TLBSYNC 0x7f0
255#define ARM_SMMU_CB_TLBSTATUS 0x7f4
256#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100257#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000258#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100259
Patrick Daly7f377fe2017-10-06 17:37:10 -0700260#define SCTLR_SHCFG_SHIFT 22
261#define SCTLR_SHCFG_MASK 0x3
262#define SCTLR_SHCFG_NSH 0x3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100263#define SCTLR_S1_ASIDPNE (1 << 12)
264#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530265#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100266#define SCTLR_CFIE (1 << 6)
267#define SCTLR_CFRE (1 << 5)
268#define SCTLR_E (1 << 4)
269#define SCTLR_AFE (1 << 2)
270#define SCTLR_TRE (1 << 1)
271#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100272
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100273#define ARM_MMU500_ACTLR_CPRE (1 << 1)
274
Peng Fan3ca37122016-05-03 21:50:30 +0800275#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
276
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700277#define ARM_SMMU_IMPL_DEF0(smmu) \
278 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
279#define ARM_SMMU_IMPL_DEF1(smmu) \
280 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000281#define CB_PAR_F (1 << 0)
282
283#define ATSR_ACTIVE (1 << 0)
284
Will Deacon45ae7cf2013-06-24 18:31:25 +0100285#define RESUME_RETRY (0 << 0)
286#define RESUME_TERMINATE (1 << 0)
287
Will Deacon45ae7cf2013-06-24 18:31:25 +0100288#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100289#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100290
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100291#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100292
293#define FSR_MULTI (1 << 31)
294#define FSR_SS (1 << 30)
295#define FSR_UUT (1 << 8)
296#define FSR_ASF (1 << 7)
297#define FSR_TLBLKF (1 << 6)
298#define FSR_TLBMCF (1 << 5)
299#define FSR_EF (1 << 4)
300#define FSR_PF (1 << 3)
301#define FSR_AFF (1 << 2)
302#define FSR_TF (1 << 1)
303
Mitchel Humpherys29073202014-07-08 09:52:18 -0700304#define FSR_IGN (FSR_AFF | FSR_ASF | \
305 FSR_TLBMCF | FSR_TLBLKF)
306#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100307 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100308
309#define FSYNR0_WNR (1 << 4)
310
Will Deacon4cf740b2014-07-14 19:47:39 +0100311static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000312module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100313MODULE_PARM_DESC(force_stage,
314 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800315static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000316module_param(disable_bypass, bool, S_IRUGO);
317MODULE_PARM_DESC(disable_bypass,
318 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100319
Robin Murphy09360402014-08-28 17:51:59 +0100320enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100321 ARM_SMMU_V1,
322 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100323 ARM_SMMU_V2,
324};
325
Robin Murphy67b65a32016-04-13 18:12:57 +0100326enum arm_smmu_implementation {
327 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100328 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100329 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700330 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700331 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100332};
333
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700334struct arm_smmu_impl_def_reg {
335 u32 offset;
336 u32 value;
337};
338
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700339/*
340 * attach_count
341 * The SMR and S2CR registers are only programmed when the number of
342 * devices attached to the iommu using these registers is > 0. This
343 * is required for the "SID switch" use case for secure display.
344 * Protected by stream_map_mutex.
345 */
Robin Murphya754fd12016-09-12 17:13:50 +0100346struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100347 struct iommu_group *group;
348 int count;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700349 int attach_count;
Robin Murphya754fd12016-09-12 17:13:50 +0100350 enum arm_smmu_s2cr_type type;
351 enum arm_smmu_s2cr_privcfg privcfg;
352 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700353 bool cb_handoff;
Robin Murphya754fd12016-09-12 17:13:50 +0100354};
355
356#define s2cr_init_val (struct arm_smmu_s2cr){ \
357 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700358 .cb_handoff = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100359}
360
Will Deacon45ae7cf2013-06-24 18:31:25 +0100361struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100362 u16 mask;
363 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100364 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100365};
366
Will Deacona9a1b0b2014-05-01 18:05:08 +0100367struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100368 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100369 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100370};
Robin Murphy468f4942016-09-12 17:13:49 +0100371#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100372#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
373#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000374#define fwspec_smendx(fw, i) \
375 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100376#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000377 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100378
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700379/*
380 * Describes resources required for on/off power operation.
381 * Separate reference count is provided for atomic/nonatomic
382 * operations.
383 */
384struct arm_smmu_power_resources {
385 struct platform_device *pdev;
386 struct device *dev;
387
388 struct clk **clocks;
389 int num_clocks;
390
391 struct regulator_bulk_data *gdscs;
392 int num_gdscs;
393
394 uint32_t bus_client;
395 struct msm_bus_scale_pdata *bus_dt_data;
396
397 /* Protects power_count */
398 struct mutex power_lock;
399 int power_count;
400
401 /* Protects clock_refs_count */
402 spinlock_t clock_refs_lock;
403 int clock_refs_count;
Prakash Guptafad87ca2017-05-16 12:13:02 +0530404 int regulator_defer;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700405};
406
Patrick Daly03330cc2017-08-11 14:56:38 -0700407struct arm_smmu_arch_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100408struct arm_smmu_device {
409 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100410
411 void __iomem *base;
412 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100413 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100414
415#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
416#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
417#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
418#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
419#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000420#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800421#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100422#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
423#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
424#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
425#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
426#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100427 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000428
429#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800430#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Patrick Daly59b6d202017-06-12 13:12:15 -0700431#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700432#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Patrick Daly4423d3e2017-05-04 18:17:51 -0700433#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
Patrick Dalye7069342017-07-11 12:35:55 -0700434#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
Patrick Daly62ba1922017-08-30 16:47:18 -0700435#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 6)
Patrick Daly83174c12017-10-26 12:31:15 -0700436#define ARM_SMMU_OPT_MMU500_ERRATA1 (1 << 7)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000437 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100438 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100439 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100440
441 u32 num_context_banks;
442 u32 num_s2_context_banks;
443 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
444 atomic_t irptndx;
445
446 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100447 u16 streamid_mask;
448 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100449 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100450 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100451 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100452
Will Deacon518f7132014-11-14 17:17:54 +0000453 unsigned long va_size;
454 unsigned long ipa_size;
455 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100456 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100457
458 u32 num_global_irqs;
459 u32 num_context_irqs;
460 unsigned int *irqs;
461
Patrick Daly8e3371a2017-02-13 22:14:53 -0800462 struct list_head list;
463
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800464 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700465 /* Specific to QCOM */
466 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
467 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800468
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700469 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700470
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800471 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700472
473 /* protects idr */
474 struct mutex idr_mutex;
475 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700476
477 struct arm_smmu_arch_ops *arch_ops;
478 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100479};
480
Robin Murphy7602b872016-04-28 17:12:09 +0100481enum arm_smmu_context_fmt {
482 ARM_SMMU_CTX_FMT_NONE,
483 ARM_SMMU_CTX_FMT_AARCH64,
484 ARM_SMMU_CTX_FMT_AARCH32_L,
485 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100486};
487
488struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100489 u8 cbndx;
490 u8 irptndx;
491 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600492 u32 procid;
493 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100494 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100495};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100496#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600497#define INVALID_CBNDX 0xff
498#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700499/*
500 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
501 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
502 */
503#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100504
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600505#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800506#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100507
Will Deaconc752ce42014-06-25 22:46:31 +0100508enum arm_smmu_domain_stage {
509 ARM_SMMU_DOMAIN_S1 = 0,
510 ARM_SMMU_DOMAIN_S2,
511 ARM_SMMU_DOMAIN_NESTED,
512};
513
Patrick Dalyc11d1082016-09-01 15:52:44 -0700514struct arm_smmu_pte_info {
515 void *virt_addr;
516 size_t size;
517 struct list_head entry;
518};
519
Will Deacon45ae7cf2013-06-24 18:31:25 +0100520struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100521 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800522 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000523 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700524 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000525 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100526 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100527 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000528 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700529 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700530 u32 secure_vmid;
531 struct list_head pte_info_list;
532 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700533 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700534 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100535 struct iommu_domain domain;
Patrick Dalyda765c62017-09-11 16:31:07 -0700536
537 bool qsmmuv500_errata1_init;
538 bool qsmmuv500_errata1_client;
Patrick Daly23301482017-10-12 16:18:25 -0700539 bool qsmmuv500_errata2_min_align;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100540};
541
Patrick Daly8e3371a2017-02-13 22:14:53 -0800542static DEFINE_SPINLOCK(arm_smmu_devices_lock);
543static LIST_HEAD(arm_smmu_devices);
544
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000545struct arm_smmu_option_prop {
546 u32 opt;
547 const char *prop;
548};
549
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800550static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
551
Robin Murphy7e96c742016-09-14 15:26:46 +0100552static bool using_legacy_binding, using_generic_binding;
553
Mitchel Humpherys29073202014-07-08 09:52:18 -0700554static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000555 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800556 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700557 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700558 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700559 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700560 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Patrick Daly62ba1922017-08-30 16:47:18 -0700561 { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
Patrick Daly83174c12017-10-26 12:31:15 -0700562 { ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000563 { 0, NULL},
564};
565
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800566static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
567 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700568static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
569 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600570static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800571
Patrick Dalyc11d1082016-09-01 15:52:44 -0700572static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
573static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700574static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700575static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
576
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700577static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
578 dma_addr_t iova);
579
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800580static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
581
Patrick Dalyda688822017-05-17 20:12:48 -0700582static int arm_smmu_alloc_cb(struct iommu_domain *domain,
583 struct arm_smmu_device *smmu,
584 struct device *dev);
Patrick Dalyda765c62017-09-11 16:31:07 -0700585static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops;
Patrick Dalyda688822017-05-17 20:12:48 -0700586
Joerg Roedel1d672632015-03-26 13:43:10 +0100587static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
588{
589 return container_of(dom, struct arm_smmu_domain, domain);
590}
591
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000592static void parse_driver_options(struct arm_smmu_device *smmu)
593{
594 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700595
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000596 do {
597 if (of_property_read_bool(smmu->dev->of_node,
598 arm_smmu_options[i].prop)) {
599 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700600 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000601 arm_smmu_options[i].prop);
602 }
603 } while (arm_smmu_options[++i].opt);
604}
605
Patrick Dalyc190d932016-08-30 17:23:28 -0700606static bool is_dynamic_domain(struct iommu_domain *domain)
607{
608 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
609
610 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
611}
612
Liam Mark53cf2342016-12-20 11:36:07 -0800613static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
614{
615 if (smmu_domain->attributes &
616 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
617 return true;
618 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
619 return smmu_domain->smmu->dev->archdata.dma_coherent;
620 else
621 return false;
622}
623
Patrick Dalye271f212016-10-04 13:24:49 -0700624static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
625{
626 return (smmu_domain->secure_vmid != VMID_INVAL);
627}
628
629static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
630{
631 if (arm_smmu_is_domain_secure(smmu_domain))
632 mutex_lock(&smmu_domain->assign_lock);
633}
634
635static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
636{
637 if (arm_smmu_is_domain_secure(smmu_domain))
638 mutex_unlock(&smmu_domain->assign_lock);
639}
640
Patrick Daly03330cc2017-08-11 14:56:38 -0700641/*
642 * init()
643 * Hook for additional device tree parsing at probe time.
644 *
645 * device_reset()
646 * Hook for one-time architecture-specific register settings.
647 *
648 * iova_to_phys_hard()
649 * Provides debug information. May be called from the context fault irq handler.
650 *
651 * init_context_bank()
652 * Hook for architecture-specific settings which require knowledge of the
653 * dynamically allocated context bank number.
654 *
655 * device_group()
656 * Hook for checking whether a device is compatible with a said group.
657 */
658struct arm_smmu_arch_ops {
659 int (*init)(struct arm_smmu_device *smmu);
660 void (*device_reset)(struct arm_smmu_device *smmu);
661 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
662 dma_addr_t iova);
663 void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
664 struct device *dev);
665 int (*device_group)(struct device *dev, struct iommu_group *group);
666};
667
668static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
669{
670 if (!smmu->arch_ops)
671 return 0;
672 if (!smmu->arch_ops->init)
673 return 0;
674 return smmu->arch_ops->init(smmu);
675}
676
677static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
678{
679 if (!smmu->arch_ops)
680 return;
681 if (!smmu->arch_ops->device_reset)
682 return;
683 return smmu->arch_ops->device_reset(smmu);
684}
685
686static void arm_smmu_arch_init_context_bank(
687 struct arm_smmu_domain *smmu_domain, struct device *dev)
688{
689 struct arm_smmu_device *smmu = smmu_domain->smmu;
690
691 if (!smmu->arch_ops)
692 return;
693 if (!smmu->arch_ops->init_context_bank)
694 return;
695 return smmu->arch_ops->init_context_bank(smmu_domain, dev);
696}
697
698static int arm_smmu_arch_device_group(struct device *dev,
699 struct iommu_group *group)
700{
701 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
702 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
703
704 if (!smmu->arch_ops)
705 return 0;
706 if (!smmu->arch_ops->device_group)
707 return 0;
708 return smmu->arch_ops->device_group(dev, group);
709}
710
Will Deacon8f68f8e2014-07-15 11:27:08 +0100711static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100712{
713 if (dev_is_pci(dev)) {
714 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700715
Will Deacona9a1b0b2014-05-01 18:05:08 +0100716 while (!pci_is_root_bus(bus))
717 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100718 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100719 }
720
Robin Murphyd5b41782016-09-14 15:21:39 +0100721 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100722}
723
Robin Murphyd5b41782016-09-14 15:21:39 +0100724static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100725{
Robin Murphyd5b41782016-09-14 15:21:39 +0100726 *((__be32 *)data) = cpu_to_be32(alias);
727 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100728}
729
Robin Murphyd5b41782016-09-14 15:21:39 +0100730static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100731{
Robin Murphyd5b41782016-09-14 15:21:39 +0100732 struct of_phandle_iterator *it = *(void **)data;
733 struct device_node *np = it->node;
734 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100735
Robin Murphyd5b41782016-09-14 15:21:39 +0100736 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
737 "#stream-id-cells", 0)
738 if (it->node == np) {
739 *(void **)data = dev;
740 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700741 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100742 it->node = np;
743 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100744}
745
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100746static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100747static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100748
Robin Murphy06e393e2016-09-12 17:13:55 +0100749static int arm_smmu_register_legacy_master(struct device *dev,
750 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100751{
Robin Murphy06e393e2016-09-12 17:13:55 +0100752 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100753 struct device_node *np;
754 struct of_phandle_iterator it;
755 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100756 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100757 __be32 pci_sid;
758 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100759
Stephen Boydfecdeef2017-03-01 16:53:19 -0800760 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100761 np = dev_get_dev_node(dev);
762 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
763 of_node_put(np);
764 return -ENODEV;
765 }
766
767 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100768 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
769 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100770 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100771 of_node_put(np);
772 if (err == 0)
773 return -ENODEV;
774 if (err < 0)
775 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100776
Robin Murphyd5b41782016-09-14 15:21:39 +0100777 if (dev_is_pci(dev)) {
778 /* "mmu-masters" assumes Stream ID == Requester ID */
779 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
780 &pci_sid);
781 it.cur = &pci_sid;
782 it.cur_count = 1;
783 }
784
Robin Murphy06e393e2016-09-12 17:13:55 +0100785 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
786 &arm_smmu_ops);
787 if (err)
788 return err;
789
790 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
791 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100792 return -ENOMEM;
793
Robin Murphy06e393e2016-09-12 17:13:55 +0100794 *smmu = dev_get_drvdata(smmu_dev);
795 of_phandle_iterator_args(&it, sids, it.cur_count);
796 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
797 kfree(sids);
798 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100799}
800
801static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
802{
803 int idx;
804
805 do {
806 idx = find_next_zero_bit(map, end, start);
807 if (idx == end)
808 return -ENOSPC;
809 } while (test_and_set_bit(idx, map));
810
811 return idx;
812}
813
814static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
815{
816 clear_bit(idx, map);
817}
818
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700819static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700820{
821 int i, ret = 0;
822
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700823 for (i = 0; i < pwr->num_clocks; ++i) {
824 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700825 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700826 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700827 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700828 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700829 break;
830 }
831 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700832 return ret;
833}
834
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700835static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700836{
837 int i;
838
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700839 for (i = pwr->num_clocks; i; --i)
840 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700841}
842
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700843static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700844{
845 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700846
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700847 for (i = 0; i < pwr->num_clocks; ++i) {
848 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700849 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700850 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700851 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700852 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700853 break;
854 }
855 }
856
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700857 return ret;
858}
Patrick Daly8befb662016-08-17 20:03:28 -0700859
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700860static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
861{
862 int i;
863
864 for (i = pwr->num_clocks; i; --i)
865 clk_disable(pwr->clocks[i - 1]);
866}
867
868static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
869{
870 if (!pwr->bus_client)
871 return 0;
872 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
873}
874
875static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
876{
877 if (!pwr->bus_client)
878 return;
879 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
880}
881
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700882static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
883{
884 struct regulator_bulk_data *consumers;
885 int num_consumers, ret;
886 int i;
887
888 num_consumers = pwr->num_gdscs;
889 consumers = pwr->gdscs;
890 for (i = 0; i < num_consumers; i++) {
891 ret = regulator_enable(consumers[i].consumer);
892 if (ret)
893 goto out;
894 }
895 return 0;
896
897out:
898 i -= 1;
899 for (; i >= 0; i--)
900 regulator_disable(consumers[i].consumer);
901 return ret;
902}
903
Prakash Guptafad87ca2017-05-16 12:13:02 +0530904static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
905{
906 struct regulator_bulk_data *consumers;
907 int i;
908 int num_consumers, ret, r;
909
910 num_consumers = pwr->num_gdscs;
911 consumers = pwr->gdscs;
912 for (i = num_consumers - 1; i >= 0; --i) {
913 ret = regulator_disable_deferred(consumers[i].consumer,
914 pwr->regulator_defer);
915 if (ret != 0)
916 goto err;
917 }
918
919 return 0;
920
921err:
922 pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
923 for (++i; i < num_consumers; ++i) {
924 r = regulator_enable(consumers[i].consumer);
925 if (r != 0)
926 pr_err("Failed to reename %s: %d\n",
927 consumers[i].supply, r);
928 }
929
930 return ret;
931}
932
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700933/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
934static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
935{
936 int ret = 0;
937 unsigned long flags;
938
939 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
940 if (pwr->clock_refs_count > 0) {
941 pwr->clock_refs_count++;
942 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
943 return 0;
944 }
945
946 ret = arm_smmu_enable_clocks(pwr);
947 if (!ret)
948 pwr->clock_refs_count = 1;
949
950 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700951 return ret;
952}
953
954/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700955static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700956{
Patrick Daly8befb662016-08-17 20:03:28 -0700957 unsigned long flags;
958
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700959 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
960 if (pwr->clock_refs_count == 0) {
961 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
962 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
963 return;
964
965 } else if (pwr->clock_refs_count > 1) {
966 pwr->clock_refs_count--;
967 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700968 return;
969 }
970
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700971 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700972
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700973 pwr->clock_refs_count = 0;
974 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700975}
976
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700977static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700978{
979 int ret;
980
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700981 mutex_lock(&pwr->power_lock);
982 if (pwr->power_count > 0) {
983 pwr->power_count += 1;
984 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700985 return 0;
986 }
987
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700988 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700989 if (ret)
990 goto out_unlock;
991
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700992 ret = arm_smmu_enable_regulators(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700993 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700994 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700995
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700996 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700997 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700998 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -0700999
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001000 pwr->power_count = 1;
1001 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001002 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001003
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001004out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001005 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001006out_disable_bus:
1007 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001008out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001009 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001010 return ret;
1011}
1012
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001013static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001014{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001015 mutex_lock(&pwr->power_lock);
1016 if (pwr->power_count == 0) {
1017 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
1018 mutex_unlock(&pwr->power_lock);
1019 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001020
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001021 } else if (pwr->power_count > 1) {
1022 pwr->power_count--;
1023 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001024 return;
1025 }
1026
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001027 arm_smmu_unprepare_clocks(pwr);
Prakash Guptafad87ca2017-05-16 12:13:02 +05301028 arm_smmu_disable_regulators(pwr);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001029 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -07001030 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001031 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001032}
1033
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001034static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001035{
1036 int ret;
1037
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001038 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001039 if (ret)
1040 return ret;
1041
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001042 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001043 if (ret)
1044 goto out_disable;
1045
1046 return 0;
1047
1048out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001049 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001050 return ret;
1051}
1052
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001053static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001054{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001055 arm_smmu_power_off_atomic(pwr);
1056 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001057}
1058
1059/*
1060 * Must be used instead of arm_smmu_power_on if it may be called from
1061 * atomic context
1062 */
1063static int arm_smmu_domain_power_on(struct iommu_domain *domain,
1064 struct arm_smmu_device *smmu)
1065{
1066 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1067 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1068
1069 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001070 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001071
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001072 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001073}
1074
1075/*
1076 * Must be used instead of arm_smmu_power_on if it may be called from
1077 * atomic context
1078 */
1079static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1080 struct arm_smmu_device *smmu)
1081{
1082 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1083 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1084
1085 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001086 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001087 return;
1088 }
1089
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001090 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001091}
1092
Will Deacon45ae7cf2013-06-24 18:31:25 +01001093/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001094static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1095 int cbndx)
1096{
1097 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1098 u32 val;
1099
1100 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1101 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1102 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -07001103 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001104 dev_err(smmu->dev, "TLBSYNC timeout!\n");
1105}
1106
Will Deacon518f7132014-11-14 17:17:54 +00001107static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001108{
1109 int count = 0;
1110 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1111
1112 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1113 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1114 & sTLBGSTATUS_GSACTIVE) {
1115 cpu_relax();
1116 if (++count == TLB_LOOP_TIMEOUT) {
1117 dev_err_ratelimited(smmu->dev,
1118 "TLB sync timed out -- SMMU may be deadlocked\n");
1119 return;
1120 }
1121 udelay(1);
1122 }
1123}
1124
Will Deacon518f7132014-11-14 17:17:54 +00001125static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001126{
Will Deacon518f7132014-11-14 17:17:54 +00001127 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001128 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001129}
1130
Patrick Daly8befb662016-08-17 20:03:28 -07001131/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001132static void arm_smmu_tlb_inv_context(void *cookie)
1133{
1134 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +01001135 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1136 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001137 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001138 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -07001139 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon1463fe42013-07-31 19:21:27 +01001140
Patrick Dalye7069342017-07-11 12:35:55 -07001141 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001142 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001143 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001144 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001145 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001146 } else if (stage1 && use_tlbiall) {
1147 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1148 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1149 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001150 } else {
1151 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001152 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001153 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001154 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001155 }
Will Deacon1463fe42013-07-31 19:21:27 +01001156}
1157
Will Deacon518f7132014-11-14 17:17:54 +00001158static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001159 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001160{
1161 struct arm_smmu_domain *smmu_domain = cookie;
1162 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1163 struct arm_smmu_device *smmu = smmu_domain->smmu;
1164 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1165 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001166 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001167
Patrick Dalye7069342017-07-11 12:35:55 -07001168 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001169 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1170 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1171
Robin Murphy7602b872016-04-28 17:12:09 +01001172 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001173 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001174 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001175 do {
1176 writel_relaxed(iova, reg);
1177 iova += granule;
1178 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001179 } else {
1180 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001181 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001182 do {
1183 writeq_relaxed(iova, reg);
1184 iova += granule >> 12;
1185 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001186 }
Patrick Dalye7069342017-07-11 12:35:55 -07001187 } else if (stage1 && use_tlbiall) {
1188 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1189 reg += ARM_SMMU_CB_S1_TLBIALL;
1190 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001191 } else if (smmu->version == ARM_SMMU_V2) {
1192 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1193 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1194 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001195 iova >>= 12;
1196 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001197 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001198 iova += granule >> 12;
1199 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001200 } else {
1201 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001202 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001203 }
1204}
1205
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001206struct arm_smmu_secure_pool_chunk {
1207 void *addr;
1208 size_t size;
1209 struct list_head list;
1210};
1211
1212static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1213 size_t size)
1214{
1215 struct arm_smmu_secure_pool_chunk *it;
1216
1217 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1218 if (it->size == size) {
1219 void *addr = it->addr;
1220
1221 list_del(&it->list);
1222 kfree(it);
1223 return addr;
1224 }
1225 }
1226
1227 return NULL;
1228}
1229
1230static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1231 void *addr, size_t size)
1232{
1233 struct arm_smmu_secure_pool_chunk *chunk;
1234
1235 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1236 if (!chunk)
1237 return -ENOMEM;
1238
1239 chunk->addr = addr;
1240 chunk->size = size;
1241 memset(addr, 0, size);
1242 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1243
1244 return 0;
1245}
1246
1247static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1248{
1249 struct arm_smmu_secure_pool_chunk *it, *i;
1250
1251 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1252 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1253 /* pages will be freed later (after being unassigned) */
Prakash Gupta8e827be2017-10-04 12:37:11 +05301254 list_del(&it->list);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001255 kfree(it);
1256 }
1257}
1258
Patrick Dalyc11d1082016-09-01 15:52:44 -07001259static void *arm_smmu_alloc_pages_exact(void *cookie,
1260 size_t size, gfp_t gfp_mask)
1261{
1262 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001263 void *page;
1264 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001265
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001266 if (!arm_smmu_is_domain_secure(smmu_domain))
1267 return alloc_pages_exact(size, gfp_mask);
1268
1269 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1270 if (page)
1271 return page;
1272
1273 page = alloc_pages_exact(size, gfp_mask);
1274 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001275 ret = arm_smmu_prepare_pgtable(page, cookie);
1276 if (ret) {
1277 free_pages_exact(page, size);
1278 return NULL;
1279 }
1280 }
1281
1282 return page;
1283}
1284
1285static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1286{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001287 struct arm_smmu_domain *smmu_domain = cookie;
1288
1289 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1290 free_pages_exact(virt, size);
1291 return;
1292 }
1293
1294 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1295 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001296}
1297
Will Deacon518f7132014-11-14 17:17:54 +00001298static struct iommu_gather_ops arm_smmu_gather_ops = {
1299 .tlb_flush_all = arm_smmu_tlb_inv_context,
1300 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1301 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001302 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1303 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001304};
1305
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001306static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1307 dma_addr_t iova, u32 fsr)
1308{
1309 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001310 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyda765c62017-09-11 16:31:07 -07001311 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001312 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001313 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001314
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001315 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyda765c62017-09-11 16:31:07 -07001316 tlb->tlb_flush_all(smmu_domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001317 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001318
Patrick Dalyad441dd2016-09-15 15:50:46 -07001319 if (phys != phys_post_tlbiall) {
1320 dev_err(smmu->dev,
1321 "ATOS results differed across TLBIALL...\n"
1322 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1323 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001324
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001325 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001326}
1327
Will Deacon45ae7cf2013-06-24 18:31:25 +01001328static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1329{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001330 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001331 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001332 unsigned long iova;
1333 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001334 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001335 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1336 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001337 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001338 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001339 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001340 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001341 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001342 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001343 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001344
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001345 static DEFINE_RATELIMIT_STATE(_rs,
1346 DEFAULT_RATELIMIT_INTERVAL,
1347 DEFAULT_RATELIMIT_BURST);
1348
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001349 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001350 if (ret)
1351 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001352
Shalaj Jain04059c52015-03-03 13:34:59 -08001353 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001354 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001355 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1356
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001357 if (!(fsr & FSR_FAULT)) {
1358 ret = IRQ_NONE;
1359 goto out_power_off;
1360 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001361
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001362 if (fatal_asf && (fsr & FSR_ASF)) {
1363 dev_err(smmu->dev,
1364 "Took an address size fault. Refusing to recover.\n");
1365 BUG();
1366 }
1367
Will Deacon45ae7cf2013-06-24 18:31:25 +01001368 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001369 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001370 if (fsr & FSR_TF)
1371 flags |= IOMMU_FAULT_TRANSLATION;
1372 if (fsr & FSR_PF)
1373 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001374 if (fsr & FSR_EF)
1375 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001376 if (fsr & FSR_SS)
1377 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001378
Robin Murphyf9a05f02016-04-13 18:13:01 +01001379 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001380 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001381 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1382 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001383 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1384 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001385 dev_dbg(smmu->dev,
1386 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1387 iova, fsr, fsynr, cfg->cbndx);
1388 dev_dbg(smmu->dev,
1389 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001390 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001391 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001392 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001393 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1394 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001395 if (__ratelimit(&_rs)) {
1396 dev_err(smmu->dev,
1397 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1398 iova, fsr, fsynr, cfg->cbndx);
1399 dev_err(smmu->dev, "FAR = %016lx\n",
1400 (unsigned long)iova);
1401 dev_err(smmu->dev,
1402 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1403 fsr,
1404 (fsr & 0x02) ? "TF " : "",
1405 (fsr & 0x04) ? "AFF " : "",
1406 (fsr & 0x08) ? "PF " : "",
1407 (fsr & 0x10) ? "EF " : "",
1408 (fsr & 0x20) ? "TLBMCF " : "",
1409 (fsr & 0x40) ? "TLBLKF " : "",
1410 (fsr & 0x80) ? "MHF " : "",
1411 (fsr & 0x40000000) ? "SS " : "",
1412 (fsr & 0x80000000) ? "MULTI " : "");
1413 dev_err(smmu->dev,
1414 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001415 if (!phys_soft)
1416 dev_err(smmu->dev,
1417 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1418 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001419 if (phys_atos)
1420 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1421 &phys_atos);
1422 else
1423 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001424 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1425 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001426 ret = IRQ_NONE;
1427 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001428 if (!non_fatal_fault) {
1429 dev_err(smmu->dev,
1430 "Unhandled arm-smmu context fault!\n");
1431 BUG();
1432 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001433 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001434
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001435 /*
1436 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1437 * if stalled. This is required to keep the IOMMU client stalled on
1438 * the outstanding fault. This gives the client a chance to take any
1439 * debug action and then terminate the stalled transaction.
1440 * So, the sequence in case of stall on fault should be:
1441 * 1) Do not clear FSR or write to RESUME here
1442 * 2) Client takes any debug action
1443 * 3) Client terminates the stalled transaction and resumes the IOMMU
1444 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1445 * not before so that the fault remains outstanding. This ensures
1446 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1447 * need to be terminated.
1448 */
1449 if (tmp != -EBUSY) {
1450 /* Clear the faulting FSR */
1451 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001452
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001453 /*
1454 * Barrier required to ensure that the FSR is cleared
1455 * before resuming SMMU operation
1456 */
1457 wmb();
1458
1459 /* Retry or terminate any stalled transactions */
1460 if (fsr & FSR_SS)
1461 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1462 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001463
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001464out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001465 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001466
Patrick Daly5ba28112016-08-30 19:18:52 -07001467 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001468}
1469
1470static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1471{
1472 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1473 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001474 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001475
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001476 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001477 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001478
Will Deacon45ae7cf2013-06-24 18:31:25 +01001479 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1480 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1481 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1482 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1483
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001484 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001485 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001486 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001487 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001488
Will Deacon45ae7cf2013-06-24 18:31:25 +01001489 dev_err_ratelimited(smmu->dev,
1490 "Unexpected global fault, this could be serious\n");
1491 dev_err_ratelimited(smmu->dev,
1492 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1493 gfsr, gfsynr0, gfsynr1, gfsynr2);
1494
1495 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001496 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001497 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001498}
1499
Will Deacon518f7132014-11-14 17:17:54 +00001500static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1501 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001502{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001503 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001504 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001505 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001506 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1507 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001508 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001509
Will Deacon45ae7cf2013-06-24 18:31:25 +01001510 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001511 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1512 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001513
Will Deacon4a1c93c2015-03-04 12:21:03 +00001514 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001515 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1516 reg = CBA2R_RW64_64BIT;
1517 else
1518 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001519 /* 16-bit VMIDs live in CBA2R */
1520 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001521 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001522
Will Deacon4a1c93c2015-03-04 12:21:03 +00001523 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1524 }
1525
Will Deacon45ae7cf2013-06-24 18:31:25 +01001526 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001527 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001528 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001529 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001530
Will Deacon57ca90f2014-02-06 14:59:05 +00001531 /*
1532 * Use the weakest shareability/memory types, so they are
1533 * overridden by the ttbcr/pte.
1534 */
1535 if (stage1) {
1536 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1537 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001538 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1539 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001540 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001541 }
Will Deacon44680ee2014-06-25 11:29:12 +01001542 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001543
Will Deacon518f7132014-11-14 17:17:54 +00001544 /* TTBRs */
1545 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001546 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001547
Robin Murphyb94df6f2016-08-11 17:44:06 +01001548 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1549 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1550 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1551 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1552 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1553 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1554 } else {
1555 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1556 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1557 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1558 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1559 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1560 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1561 }
Will Deacon518f7132014-11-14 17:17:54 +00001562 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001563 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001564 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001565 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001566
Will Deacon518f7132014-11-14 17:17:54 +00001567 /* TTBCR */
1568 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001569 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1570 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1571 reg2 = 0;
1572 } else {
1573 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1574 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1575 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001576 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001577 if (smmu->version > ARM_SMMU_V1)
1578 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001579 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001580 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001581 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001582 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001583
Will Deacon518f7132014-11-14 17:17:54 +00001584 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001585 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001586 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1587 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1588 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1589 } else {
1590 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1591 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1592 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001593 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001594 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001595 }
1596
Will Deacon45ae7cf2013-06-24 18:31:25 +01001597 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001598 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001599
Patrick Daly7f377fe2017-10-06 17:37:10 -07001600 /* Ensure bypass transactions are Non-shareable */
1601 reg |= SCTLR_SHCFG_NSH << SCTLR_SHCFG_SHIFT;
1602
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301603 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
1604 reg &= ~SCTLR_CFCFG;
1605 reg |= SCTLR_HUPCF;
1606 }
1607
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001608 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1609 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1610 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001611 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001612 if (stage1)
1613 reg |= SCTLR_S1_ASIDPNE;
1614#ifdef __BIG_ENDIAN
1615 reg |= SCTLR_E;
1616#endif
Will Deacon25724842013-08-21 13:49:53 +01001617 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001618}
1619
Patrick Dalyc190d932016-08-30 17:23:28 -07001620static int arm_smmu_init_asid(struct iommu_domain *domain,
1621 struct arm_smmu_device *smmu)
1622{
1623 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1624 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1625 bool dynamic = is_dynamic_domain(domain);
1626 int ret;
1627
1628 if (!dynamic) {
1629 cfg->asid = cfg->cbndx + 1;
1630 } else {
1631 mutex_lock(&smmu->idr_mutex);
1632 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1633 smmu->num_context_banks + 2,
1634 MAX_ASID + 1, GFP_KERNEL);
1635
1636 mutex_unlock(&smmu->idr_mutex);
1637 if (ret < 0) {
1638 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1639 ret);
1640 return ret;
1641 }
1642 cfg->asid = ret;
1643 }
1644 return 0;
1645}
1646
1647static void arm_smmu_free_asid(struct iommu_domain *domain)
1648{
1649 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1650 struct arm_smmu_device *smmu = smmu_domain->smmu;
1651 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1652 bool dynamic = is_dynamic_domain(domain);
1653
1654 if (cfg->asid == INVALID_ASID || !dynamic)
1655 return;
1656
1657 mutex_lock(&smmu->idr_mutex);
1658 idr_remove(&smmu->asid_idr, cfg->asid);
1659 mutex_unlock(&smmu->idr_mutex);
1660}
1661
Will Deacon45ae7cf2013-06-24 18:31:25 +01001662static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001663 struct arm_smmu_device *smmu,
1664 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001665{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001666 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001667 unsigned long ias, oas;
1668 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001669 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001670 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001671 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001672 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001673 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001674 bool dynamic;
Patrick Dalyda765c62017-09-11 16:31:07 -07001675 const struct iommu_gather_ops *tlb;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001676
Will Deacon518f7132014-11-14 17:17:54 +00001677 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001678 if (smmu_domain->smmu)
1679 goto out_unlock;
1680
Patrick Dalyc190d932016-08-30 17:23:28 -07001681 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1682 smmu_domain->cfg.asid = INVALID_ASID;
1683
Patrick Dalyc190d932016-08-30 17:23:28 -07001684 dynamic = is_dynamic_domain(domain);
1685 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1686 dev_err(smmu->dev, "dynamic domains not supported\n");
1687 ret = -EPERM;
1688 goto out_unlock;
1689 }
1690
Will Deaconc752ce42014-06-25 22:46:31 +01001691 /*
1692 * Mapping the requested stage onto what we support is surprisingly
1693 * complicated, mainly because the spec allows S1+S2 SMMUs without
1694 * support for nested translation. That means we end up with the
1695 * following table:
1696 *
1697 * Requested Supported Actual
1698 * S1 N S1
1699 * S1 S1+S2 S1
1700 * S1 S2 S2
1701 * S1 S1 S1
1702 * N N N
1703 * N S1+S2 S2
1704 * N S2 S2
1705 * N S1 S1
1706 *
1707 * Note that you can't actually request stage-2 mappings.
1708 */
1709 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1710 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1711 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1712 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1713
Robin Murphy7602b872016-04-28 17:12:09 +01001714 /*
1715 * Choosing a suitable context format is even more fiddly. Until we
1716 * grow some way for the caller to express a preference, and/or move
1717 * the decision into the io-pgtable code where it arguably belongs,
1718 * just aim for the closest thing to the rest of the system, and hope
1719 * that the hardware isn't esoteric enough that we can't assume AArch64
1720 * support to be a superset of AArch32 support...
1721 */
1722 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1723 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001724 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1725 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1726 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1727 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1728 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001729 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1730 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1731 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1732 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1733 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1734
1735 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1736 ret = -EINVAL;
1737 goto out_unlock;
1738 }
1739
Will Deaconc752ce42014-06-25 22:46:31 +01001740 switch (smmu_domain->stage) {
1741 case ARM_SMMU_DOMAIN_S1:
1742 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1743 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001744 ias = smmu->va_size;
1745 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001746 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001747 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001748 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1749 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001750 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001751 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001752 ias = min(ias, 32UL);
1753 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001754 } else {
1755 fmt = ARM_V7S;
1756 ias = min(ias, 32UL);
1757 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001758 }
Will Deaconc752ce42014-06-25 22:46:31 +01001759 break;
1760 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001761 /*
1762 * We will likely want to change this if/when KVM gets
1763 * involved.
1764 */
Will Deaconc752ce42014-06-25 22:46:31 +01001765 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001766 cfg->cbar = CBAR_TYPE_S2_TRANS;
1767 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001768 ias = smmu->ipa_size;
1769 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001770 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001771 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001772 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001773 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001774 ias = min(ias, 40UL);
1775 oas = min(oas, 40UL);
1776 }
Will Deaconc752ce42014-06-25 22:46:31 +01001777 break;
1778 default:
1779 ret = -EINVAL;
1780 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001781 }
1782
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001783 if (is_fast)
1784 fmt = ARM_V8L_FAST;
1785
Patrick Dalyce6786f2016-11-09 14:19:23 -08001786 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1787 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001788 if (is_iommu_pt_coherent(smmu_domain))
1789 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Patrick Daly49ccf332017-09-27 15:10:29 -07001790 if ((quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
1791 (smmu->model == QCOM_SMMUV500))
1792 quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001793
Patrick Dalyda765c62017-09-11 16:31:07 -07001794 tlb = &arm_smmu_gather_ops;
Patrick Daly83174c12017-10-26 12:31:15 -07001795 if (smmu->options & ARM_SMMU_OPT_MMU500_ERRATA1)
Patrick Dalyda765c62017-09-11 16:31:07 -07001796 tlb = &qsmmuv500_errata1_smmu_gather_ops;
1797
Patrick Dalyda688822017-05-17 20:12:48 -07001798 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1799 if (ret < 0)
1800 goto out_unlock;
1801 cfg->cbndx = ret;
1802
Robin Murphyb7862e32016-04-13 18:13:03 +01001803 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001804 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1805 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001806 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001807 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001808 }
1809
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001810 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001811 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001812 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001813 .ias = ias,
1814 .oas = oas,
Patrick Dalyda765c62017-09-11 16:31:07 -07001815 .tlb = tlb,
Robin Murphy2df7a252015-07-29 19:46:06 +01001816 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001817 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001818
Will Deacon518f7132014-11-14 17:17:54 +00001819 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001820 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001821 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1822 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001823 if (!pgtbl_ops) {
1824 ret = -ENOMEM;
1825 goto out_clear_smmu;
1826 }
1827
Patrick Dalyc11d1082016-09-01 15:52:44 -07001828 /*
1829 * assign any page table memory that might have been allocated
1830 * during alloc_io_pgtable_ops
1831 */
Patrick Dalye271f212016-10-04 13:24:49 -07001832 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001833 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001834 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001835
Robin Murphyd5466352016-05-09 17:20:09 +01001836 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001837 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001838 domain->geometry.aperture_end = (1UL << ias) - 1;
1839 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001840
Patrick Dalyc190d932016-08-30 17:23:28 -07001841 /* Assign an asid */
1842 ret = arm_smmu_init_asid(domain, smmu);
1843 if (ret)
1844 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001845
Patrick Dalyc190d932016-08-30 17:23:28 -07001846 if (!dynamic) {
1847 /* Initialise the context bank with our page table cfg */
1848 arm_smmu_init_context_bank(smmu_domain,
1849 &smmu_domain->pgtbl_cfg);
1850
Patrick Daly03330cc2017-08-11 14:56:38 -07001851 arm_smmu_arch_init_context_bank(smmu_domain, dev);
1852
Patrick Dalyc190d932016-08-30 17:23:28 -07001853 /*
1854 * Request context fault interrupt. Do this last to avoid the
1855 * handler seeing a half-initialised domain state.
1856 */
1857 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1858 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001859 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1860 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001861 if (ret < 0) {
1862 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1863 cfg->irptndx, irq);
1864 cfg->irptndx = INVALID_IRPTNDX;
1865 goto out_clear_smmu;
1866 }
1867 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001868 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001869 }
Will Deacon518f7132014-11-14 17:17:54 +00001870 mutex_unlock(&smmu_domain->init_mutex);
1871
1872 /* Publish page table ops for map/unmap */
1873 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001874 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001875
Will Deacon518f7132014-11-14 17:17:54 +00001876out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001877 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001878 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001879out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001880 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001881 return ret;
1882}
1883
Patrick Daly77db4f92016-10-14 15:34:10 -07001884static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1885{
1886 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1887 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1888 smmu_domain->secure_vmid = VMID_INVAL;
1889}
1890
Will Deacon45ae7cf2013-06-24 18:31:25 +01001891static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1892{
Joerg Roedel1d672632015-03-26 13:43:10 +01001893 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001894 struct arm_smmu_device *smmu = smmu_domain->smmu;
1895 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001896 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001897 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001898 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001899 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900
Robin Murphy7e96c742016-09-14 15:26:46 +01001901 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001902 return;
1903
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001904 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001905 if (ret) {
1906 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1907 smmu);
1908 return;
1909 }
1910
Patrick Dalyc190d932016-08-30 17:23:28 -07001911 dynamic = is_dynamic_domain(domain);
1912 if (dynamic) {
1913 arm_smmu_free_asid(domain);
1914 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001915 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001916 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001917 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001918 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001919 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001920 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001921 return;
1922 }
1923
Will Deacon518f7132014-11-14 17:17:54 +00001924 /*
1925 * Disable the context bank and free the page tables before freeing
1926 * it.
1927 */
Will Deacon44680ee2014-06-25 11:29:12 +01001928 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001929 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001930
Will Deacon44680ee2014-06-25 11:29:12 +01001931 if (cfg->irptndx != INVALID_IRPTNDX) {
1932 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001933 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001934 }
1935
Markus Elfring44830b02015-11-06 18:32:41 +01001936 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001937 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001938 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001939 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001940 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001941 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001942
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001943 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001944 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001945}
1946
Joerg Roedel1d672632015-03-26 13:43:10 +01001947static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001948{
1949 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001950
Patrick Daly09801312016-08-29 17:02:52 -07001951 /* Do not support DOMAIN_DMA for now */
1952 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001953 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954 /*
1955 * Allocate the domain and initialise some of its data structures.
1956 * We can't really do anything meaningful until we've added a
1957 * master.
1958 */
1959 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1960 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001961 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001962
Robin Murphy7e96c742016-09-14 15:26:46 +01001963 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1964 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001965 kfree(smmu_domain);
1966 return NULL;
1967 }
1968
Will Deacon518f7132014-11-14 17:17:54 +00001969 mutex_init(&smmu_domain->init_mutex);
1970 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001971 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1972 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001973 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001974 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001975 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001976
1977 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001978}
1979
Joerg Roedel1d672632015-03-26 13:43:10 +01001980static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001981{
Joerg Roedel1d672632015-03-26 13:43:10 +01001982 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001983
1984 /*
1985 * Free the domain resources. We assume that all devices have
1986 * already been detached.
1987 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001988 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001990 kfree(smmu_domain);
1991}
1992
Robin Murphy468f4942016-09-12 17:13:49 +01001993static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1994{
1995 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01001996 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01001997
1998 if (smr->valid)
1999 reg |= SMR_VALID;
2000 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2001}
2002
Robin Murphya754fd12016-09-12 17:13:50 +01002003static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
2004{
2005 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
2006 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
2007 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
Patrick Daly7f377fe2017-10-06 17:37:10 -07002008 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT |
2009 S2CR_SHCFG_NSH << S2CR_SHCFG_SHIFT;
Robin Murphya754fd12016-09-12 17:13:50 +01002010
2011 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2012}
2013
2014static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
2015{
2016 arm_smmu_write_s2cr(smmu, idx);
2017 if (smmu->smrs)
2018 arm_smmu_write_smr(smmu, idx);
2019}
2020
Robin Murphy6668f692016-09-12 17:13:54 +01002021static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01002022{
2023 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01002024 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002025
Robin Murphy6668f692016-09-12 17:13:54 +01002026 /* Stream indexing is blissfully easy */
2027 if (!smrs)
2028 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01002029
Robin Murphy6668f692016-09-12 17:13:54 +01002030 /* Validating SMRs is... less so */
2031 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2032 if (!smrs[i].valid) {
2033 /*
2034 * Note the first free entry we come across, which
2035 * we'll claim in the end if nothing else matches.
2036 */
2037 if (free_idx < 0)
2038 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01002039 continue;
2040 }
Robin Murphy6668f692016-09-12 17:13:54 +01002041 /*
2042 * If the new entry is _entirely_ matched by an existing entry,
2043 * then reuse that, with the guarantee that there also cannot
2044 * be any subsequent conflicting entries. In normal use we'd
2045 * expect simply identical entries for this case, but there's
2046 * no harm in accommodating the generalisation.
2047 */
2048 if ((mask & smrs[i].mask) == mask &&
2049 !((id ^ smrs[i].id) & ~smrs[i].mask))
2050 return i;
2051 /*
2052 * If the new entry has any other overlap with an existing one,
2053 * though, then there always exists at least one stream ID
2054 * which would cause a conflict, and we can't allow that risk.
2055 */
2056 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
2057 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002058 }
2059
Robin Murphy6668f692016-09-12 17:13:54 +01002060 return free_idx;
2061}
2062
2063static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
2064{
2065 if (--smmu->s2crs[idx].count)
2066 return false;
2067
2068 smmu->s2crs[idx] = s2cr_init_val;
2069 if (smmu->smrs)
2070 smmu->smrs[idx].valid = false;
2071
2072 return true;
2073}
2074
2075static int arm_smmu_master_alloc_smes(struct device *dev)
2076{
Robin Murphy06e393e2016-09-12 17:13:55 +01002077 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2078 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01002079 struct arm_smmu_device *smmu = cfg->smmu;
2080 struct arm_smmu_smr *smrs = smmu->smrs;
2081 struct iommu_group *group;
2082 int i, idx, ret;
2083
2084 mutex_lock(&smmu->stream_map_mutex);
2085 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01002086 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002087 u16 sid = fwspec->ids[i];
2088 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
2089
Robin Murphy6668f692016-09-12 17:13:54 +01002090 if (idx != INVALID_SMENDX) {
2091 ret = -EEXIST;
2092 goto out_err;
2093 }
2094
Robin Murphy7e96c742016-09-14 15:26:46 +01002095 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01002096 if (ret < 0)
2097 goto out_err;
2098
2099 idx = ret;
2100 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002101 smrs[idx].id = sid;
2102 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01002103 smrs[idx].valid = true;
2104 }
2105 smmu->s2crs[idx].count++;
2106 cfg->smendx[i] = (s16)idx;
2107 }
2108
2109 group = iommu_group_get_for_dev(dev);
2110 if (!group)
2111 group = ERR_PTR(-ENOMEM);
2112 if (IS_ERR(group)) {
2113 ret = PTR_ERR(group);
2114 goto out_err;
2115 }
2116 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01002117
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002118 /* It worked! Don't poke the actual hardware until we've attached */
2119 for_each_cfg_sme(fwspec, i, idx)
Robin Murphy6668f692016-09-12 17:13:54 +01002120 smmu->s2crs[idx].group = group;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002121
Robin Murphy6668f692016-09-12 17:13:54 +01002122 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002123 return 0;
2124
Robin Murphy6668f692016-09-12 17:13:54 +01002125out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01002126 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01002127 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01002128 cfg->smendx[i] = INVALID_SMENDX;
2129 }
Robin Murphy6668f692016-09-12 17:13:54 +01002130 mutex_unlock(&smmu->stream_map_mutex);
2131 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002132}
2133
Robin Murphy06e393e2016-09-12 17:13:55 +01002134static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002135{
Robin Murphy06e393e2016-09-12 17:13:55 +01002136 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
2137 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01002138 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01002139
Robin Murphy6668f692016-09-12 17:13:54 +01002140 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002141 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002142 if (arm_smmu_free_sme(smmu, idx))
2143 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01002144 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002145 }
Robin Murphy6668f692016-09-12 17:13:54 +01002146 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002147}
2148
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002149static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
2150 struct iommu_fwspec *fwspec)
2151{
2152 struct arm_smmu_device *smmu = smmu_domain->smmu;
2153 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2154 int i, idx;
2155 const struct iommu_gather_ops *tlb;
2156
2157 tlb = smmu_domain->pgtbl_cfg.tlb;
2158
2159 mutex_lock(&smmu->stream_map_mutex);
2160 for_each_cfg_sme(fwspec, i, idx) {
2161 WARN_ON(s2cr[idx].attach_count == 0);
2162 s2cr[idx].attach_count -= 1;
2163
2164 if (s2cr[idx].attach_count > 0)
2165 continue;
2166
2167 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2168 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2169 }
2170 mutex_unlock(&smmu->stream_map_mutex);
2171
2172 /* Ensure there are no stale mappings for this context bank */
2173 tlb->tlb_flush_all(smmu_domain);
2174}
2175
Will Deacon45ae7cf2013-06-24 18:31:25 +01002176static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01002177 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002178{
Will Deacon44680ee2014-06-25 11:29:12 +01002179 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002180 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2181 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2182 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002183 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002184
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002185 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002186 for_each_cfg_sme(fwspec, i, idx) {
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002187 if (s2cr[idx].attach_count++ > 0)
Robin Murphy6668f692016-09-12 17:13:54 +01002188 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002189
2190 s2cr[idx].type = type;
2191 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2192 s2cr[idx].cbndx = cbndx;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002193 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002194 }
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002195 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002196
2197 return 0;
2198}
2199
Patrick Daly09801312016-08-29 17:02:52 -07002200static void arm_smmu_detach_dev(struct iommu_domain *domain,
2201 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002202{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002203 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002204 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002205 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly09801312016-08-29 17:02:52 -07002206 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002207 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002208
2209 if (dynamic)
2210 return;
2211
Patrick Daly09801312016-08-29 17:02:52 -07002212 if (!smmu) {
2213 dev_err(dev, "Domain not attached; cannot detach!\n");
2214 return;
2215 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002216
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302217 if (atomic_domain)
2218 arm_smmu_power_on_atomic(smmu->pwr);
2219 else
2220 arm_smmu_power_on(smmu->pwr);
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002221
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302222 arm_smmu_domain_remove_master(smmu_domain, fwspec);
2223 arm_smmu_power_off(smmu->pwr);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002224}
2225
Patrick Dalye271f212016-10-04 13:24:49 -07002226static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002227{
Patrick Dalye271f212016-10-04 13:24:49 -07002228 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002229 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2230 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2231 int source_vmid = VMID_HLOS;
2232 struct arm_smmu_pte_info *pte_info, *temp;
2233
Patrick Dalye271f212016-10-04 13:24:49 -07002234 if (!arm_smmu_is_domain_secure(smmu_domain))
2235 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002236
Patrick Dalye271f212016-10-04 13:24:49 -07002237 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002238 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2239 PAGE_SIZE, &source_vmid, 1,
2240 dest_vmids, dest_perms, 2);
2241 if (WARN_ON(ret))
2242 break;
2243 }
2244
2245 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2246 entry) {
2247 list_del(&pte_info->entry);
2248 kfree(pte_info);
2249 }
Patrick Dalye271f212016-10-04 13:24:49 -07002250 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002251}
2252
2253static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2254{
2255 int ret;
2256 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002257 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002258 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2259 struct arm_smmu_pte_info *pte_info, *temp;
2260
Patrick Dalye271f212016-10-04 13:24:49 -07002261 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002262 return;
2263
2264 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2265 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2266 PAGE_SIZE, source_vmlist, 2,
2267 &dest_vmids, &dest_perms, 1);
2268 if (WARN_ON(ret))
2269 break;
2270 free_pages_exact(pte_info->virt_addr, pte_info->size);
2271 }
2272
2273 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2274 entry) {
2275 list_del(&pte_info->entry);
2276 kfree(pte_info);
2277 }
2278}
2279
2280static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2281{
2282 struct arm_smmu_domain *smmu_domain = cookie;
2283 struct arm_smmu_pte_info *pte_info;
2284
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002285 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002286
2287 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2288 if (!pte_info)
2289 return;
2290
2291 pte_info->virt_addr = addr;
2292 pte_info->size = size;
2293 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2294}
2295
2296static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2297{
2298 struct arm_smmu_domain *smmu_domain = cookie;
2299 struct arm_smmu_pte_info *pte_info;
2300
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002301 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002302
2303 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2304 if (!pte_info)
2305 return -ENOMEM;
2306 pte_info->virt_addr = addr;
2307 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2308 return 0;
2309}
2310
Will Deacon45ae7cf2013-06-24 18:31:25 +01002311static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2312{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002313 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002314 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002315 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002316 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002317 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002318
Robin Murphy06e393e2016-09-12 17:13:55 +01002319 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002320 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2321 return -ENXIO;
2322 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002323
Robin Murphy4f79b142016-10-17 12:06:21 +01002324 /*
2325 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2326 * domains between of_xlate() and add_device() - we have no way to cope
2327 * with that, so until ARM gets converted to rely on groups and default
2328 * domains, just say no (but more politely than by dereferencing NULL).
2329 * This should be at least a WARN_ON once that's sorted.
2330 */
2331 if (!fwspec->iommu_priv)
2332 return -ENODEV;
2333
Robin Murphy06e393e2016-09-12 17:13:55 +01002334 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002335
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002336 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002337 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002338 if (ret)
2339 return ret;
2340
Will Deacon518f7132014-11-14 17:17:54 +00002341 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002342 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002343 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002344 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002345
Patrick Dalyc190d932016-08-30 17:23:28 -07002346 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002347 if (is_dynamic_domain(domain)) {
2348 ret = 0;
2349 goto out_power_off;
2350 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002351
Will Deacon45ae7cf2013-06-24 18:31:25 +01002352 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002353 * Sanity check the domain. We don't support domains across
2354 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002355 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002356 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002357 dev_err(dev,
2358 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002359 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002360 ret = -EINVAL;
2361 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002362 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002363
2364 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002365 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002366
2367out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002368 /*
2369 * Keep an additional vote for non-atomic power until domain is
2370 * detached
2371 */
2372 if (!ret && atomic_domain) {
2373 WARN_ON(arm_smmu_power_on(smmu->pwr));
2374 arm_smmu_power_off_atomic(smmu->pwr);
2375 }
2376
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002377 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002378
Will Deacon45ae7cf2013-06-24 18:31:25 +01002379 return ret;
2380}
2381
Will Deacon45ae7cf2013-06-24 18:31:25 +01002382static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002383 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002384{
Will Deacon518f7132014-11-14 17:17:54 +00002385 int ret;
2386 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002387 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002388 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002389
Will Deacon518f7132014-11-14 17:17:54 +00002390 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002391 return -ENODEV;
2392
Patrick Dalye271f212016-10-04 13:24:49 -07002393 arm_smmu_secure_domain_lock(smmu_domain);
2394
Will Deacon518f7132014-11-14 17:17:54 +00002395 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2396 ret = ops->map(ops, iova, paddr, size, prot);
2397 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002398
2399 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002400 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002401
Will Deacon518f7132014-11-14 17:17:54 +00002402 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002403}
2404
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002405static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2406 dma_addr_t iova)
2407{
2408 uint64_t ret;
2409 unsigned long flags;
2410 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2411 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2412
2413 if (!ops)
2414 return 0;
2415
2416 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2417 ret = ops->iova_to_pte(ops, iova);
2418 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2419 return ret;
2420}
2421
Will Deacon45ae7cf2013-06-24 18:31:25 +01002422static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2423 size_t size)
2424{
Will Deacon518f7132014-11-14 17:17:54 +00002425 size_t ret;
2426 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002427 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002428 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002429
Will Deacon518f7132014-11-14 17:17:54 +00002430 if (!ops)
2431 return 0;
2432
Patrick Daly8befb662016-08-17 20:03:28 -07002433 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002434 if (ret)
2435 return ret;
2436
Patrick Dalye271f212016-10-04 13:24:49 -07002437 arm_smmu_secure_domain_lock(smmu_domain);
2438
Will Deacon518f7132014-11-14 17:17:54 +00002439 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2440 ret = ops->unmap(ops, iova, size);
2441 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002442
Patrick Daly8befb662016-08-17 20:03:28 -07002443 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002444 /*
2445 * While splitting up block mappings, we might allocate page table
2446 * memory during unmap, so the vmids needs to be assigned to the
2447 * memory here as well.
2448 */
2449 arm_smmu_assign_table(smmu_domain);
2450 /* Also unassign any pages that were free'd during unmap */
2451 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002452 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002453 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002454}
2455
Patrick Daly88d321d2017-02-09 18:02:13 -08002456#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002457static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2458 struct scatterlist *sg, unsigned int nents, int prot)
2459{
2460 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002461 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002462 unsigned long flags;
2463 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2464 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002465 unsigned int idx_start, idx_end;
2466 struct scatterlist *sg_start, *sg_end;
2467 unsigned long __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002468
2469 if (!ops)
2470 return -ENODEV;
2471
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002472 arm_smmu_secure_domain_lock(smmu_domain);
2473
Patrick Daly88d321d2017-02-09 18:02:13 -08002474 __saved_iova_start = iova;
2475 idx_start = idx_end = 0;
2476 sg_start = sg_end = sg;
2477 while (idx_end < nents) {
2478 batch_size = sg_end->length;
2479 sg_end = sg_next(sg_end);
2480 idx_end++;
2481 while ((idx_end < nents) &&
2482 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002483
Patrick Daly88d321d2017-02-09 18:02:13 -08002484 batch_size += sg_end->length;
2485 sg_end = sg_next(sg_end);
2486 idx_end++;
2487 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002488
Patrick Daly88d321d2017-02-09 18:02:13 -08002489 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2490 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2491 prot, &size);
2492 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2493 /* Returns 0 on error */
2494 if (!ret) {
2495 size_to_unmap = iova + size - __saved_iova_start;
2496 goto out;
2497 }
2498
2499 iova += batch_size;
2500 idx_start = idx_end;
2501 sg_start = sg_end;
2502 }
2503
2504out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002505 arm_smmu_assign_table(smmu_domain);
2506
Patrick Daly88d321d2017-02-09 18:02:13 -08002507 if (size_to_unmap) {
2508 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2509 iova = __saved_iova_start;
2510 }
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002511 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly88d321d2017-02-09 18:02:13 -08002512 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002513}
2514
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002515static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002516 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002517{
Joerg Roedel1d672632015-03-26 13:43:10 +01002518 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002519 struct arm_smmu_device *smmu = smmu_domain->smmu;
2520 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2521 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2522 struct device *dev = smmu->dev;
2523 void __iomem *cb_base;
2524 u32 tmp;
2525 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002526 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002527
2528 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2529
Robin Murphy661d9622015-05-27 17:09:34 +01002530 /* ATS1 registers can only be written atomically */
2531 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002532 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002533 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2534 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002535 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002536
2537 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2538 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002539 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002540 dev_err(dev,
2541 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2542 &iova, &phys);
2543 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002544 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002545 }
2546
Robin Murphyf9a05f02016-04-13 18:13:01 +01002547 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002548 if (phys & CB_PAR_F) {
2549 dev_err(dev, "translation fault!\n");
2550 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002551 phys = 0;
2552 } else {
2553 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002554 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002555
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002556 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002557}
2558
Will Deacon45ae7cf2013-06-24 18:31:25 +01002559static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002560 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002561{
Will Deacon518f7132014-11-14 17:17:54 +00002562 phys_addr_t ret;
2563 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002564 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002565 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002566
Will Deacon518f7132014-11-14 17:17:54 +00002567 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002568 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002569
Will Deacon518f7132014-11-14 17:17:54 +00002570 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002571 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002572 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002573
Will Deacon518f7132014-11-14 17:17:54 +00002574 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002575}
2576
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002577/*
2578 * This function can sleep, and cannot be called from atomic context. Will
2579 * power on register block if required. This restriction does not apply to the
2580 * original iova_to_phys() op.
2581 */
2582static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2583 dma_addr_t iova)
2584{
2585 phys_addr_t ret = 0;
2586 unsigned long flags;
2587 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly62ba1922017-08-30 16:47:18 -07002588 struct arm_smmu_device *smmu = smmu_domain->smmu;
2589
2590 if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS)
2591 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002592
Patrick Dalyad441dd2016-09-15 15:50:46 -07002593 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002594 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2595 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002596 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002597 return ret;
2598 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002599
2600 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2601 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2602 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002603 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002604
2605 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2606
2607 return ret;
2608}
2609
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002610static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002611{
Will Deacond0948942014-06-24 17:30:10 +01002612 switch (cap) {
2613 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002614 /*
2615 * Return true here as the SMMU can always send out coherent
2616 * requests.
2617 */
2618 return true;
Will Deacond0948942014-06-24 17:30:10 +01002619 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002620 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002621 case IOMMU_CAP_NOEXEC:
2622 return true;
Will Deacond0948942014-06-24 17:30:10 +01002623 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002624 return false;
Will Deacond0948942014-06-24 17:30:10 +01002625 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002626}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002627
Patrick Daly8e3371a2017-02-13 22:14:53 -08002628static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2629{
2630 struct arm_smmu_device *smmu;
2631 unsigned long flags;
2632
2633 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2634 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2635 if (smmu->dev->of_node == np) {
2636 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2637 return smmu;
2638 }
2639 }
2640 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2641 return NULL;
2642}
2643
Robin Murphy7e96c742016-09-14 15:26:46 +01002644static int arm_smmu_match_node(struct device *dev, void *data)
2645{
2646 return dev->of_node == data;
2647}
2648
2649static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2650{
2651 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2652 np, arm_smmu_match_node);
2653 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002654 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002655}
2656
Will Deacon03edb222015-01-19 14:27:33 +00002657static int arm_smmu_add_device(struct device *dev)
2658{
Robin Murphy06e393e2016-09-12 17:13:55 +01002659 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002660 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002661 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002662 int i, ret;
2663
Robin Murphy7e96c742016-09-14 15:26:46 +01002664 if (using_legacy_binding) {
2665 ret = arm_smmu_register_legacy_master(dev, &smmu);
2666 fwspec = dev->iommu_fwspec;
2667 if (ret)
2668 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002669 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002670 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2671 if (!smmu)
2672 return -ENODEV;
2673 } else {
2674 return -ENODEV;
2675 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002676
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002677 ret = arm_smmu_power_on(smmu->pwr);
2678 if (ret)
2679 goto out_free;
2680
Robin Murphyd5b41782016-09-14 15:21:39 +01002681 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002682 for (i = 0; i < fwspec->num_ids; i++) {
2683 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002684 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002685
Robin Murphy06e393e2016-09-12 17:13:55 +01002686 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002687 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002688 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002689 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002690 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002691 if (mask & ~smmu->smr_mask_mask) {
2692 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2693 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002694 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002695 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002696 }
Will Deacon03edb222015-01-19 14:27:33 +00002697
Robin Murphy06e393e2016-09-12 17:13:55 +01002698 ret = -ENOMEM;
2699 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2700 GFP_KERNEL);
2701 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002702 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002703
2704 cfg->smmu = smmu;
2705 fwspec->iommu_priv = cfg;
2706 while (i--)
2707 cfg->smendx[i] = INVALID_SMENDX;
2708
Robin Murphy6668f692016-09-12 17:13:54 +01002709 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002710 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002711 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002712
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002713 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002714 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002715
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002716out_pwr_off:
2717 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002718out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002719 if (fwspec)
2720 kfree(fwspec->iommu_priv);
2721 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002722 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002723}
2724
Will Deacon45ae7cf2013-06-24 18:31:25 +01002725static void arm_smmu_remove_device(struct device *dev)
2726{
Robin Murphy06e393e2016-09-12 17:13:55 +01002727 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002728 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002729
Robin Murphy06e393e2016-09-12 17:13:55 +01002730 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002731 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002732
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002733 smmu = fwspec_smmu(fwspec);
2734 if (arm_smmu_power_on(smmu->pwr)) {
2735 WARN_ON(1);
2736 return;
2737 }
2738
Robin Murphy06e393e2016-09-12 17:13:55 +01002739 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002740 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002741 kfree(fwspec->iommu_priv);
2742 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002743 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002744}
2745
Joerg Roedelaf659932015-10-21 23:51:41 +02002746static struct iommu_group *arm_smmu_device_group(struct device *dev)
2747{
Robin Murphy06e393e2016-09-12 17:13:55 +01002748 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2749 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002750 struct iommu_group *group = NULL;
2751 int i, idx;
2752
Robin Murphy06e393e2016-09-12 17:13:55 +01002753 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002754 if (group && smmu->s2crs[idx].group &&
2755 group != smmu->s2crs[idx].group)
2756 return ERR_PTR(-EINVAL);
2757
2758 group = smmu->s2crs[idx].group;
2759 }
2760
Patrick Daly03330cc2017-08-11 14:56:38 -07002761 if (!group) {
2762 if (dev_is_pci(dev))
2763 group = pci_device_group(dev);
2764 else
2765 group = generic_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002766
Patrick Daly03330cc2017-08-11 14:56:38 -07002767 if (IS_ERR(group))
2768 return NULL;
2769 }
2770
2771 if (arm_smmu_arch_device_group(dev, group)) {
2772 iommu_group_put(group);
2773 return ERR_PTR(-EINVAL);
2774 }
Joerg Roedelaf659932015-10-21 23:51:41 +02002775
Joerg Roedelaf659932015-10-21 23:51:41 +02002776 return group;
2777}
2778
Will Deaconc752ce42014-06-25 22:46:31 +01002779static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2780 enum iommu_attr attr, void *data)
2781{
Joerg Roedel1d672632015-03-26 13:43:10 +01002782 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002783 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002784
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002785 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01002786 switch (attr) {
2787 case DOMAIN_ATTR_NESTING:
2788 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002789 ret = 0;
2790 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002791 case DOMAIN_ATTR_PT_BASE_ADDR:
2792 *((phys_addr_t *)data) =
2793 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002794 ret = 0;
2795 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002796 case DOMAIN_ATTR_CONTEXT_BANK:
2797 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002798 if (smmu_domain->smmu == NULL) {
2799 ret = -ENODEV;
2800 break;
2801 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002802 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2803 ret = 0;
2804 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002805 case DOMAIN_ATTR_TTBR0: {
2806 u64 val;
2807 struct arm_smmu_device *smmu = smmu_domain->smmu;
2808 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002809 if (smmu == NULL) {
2810 ret = -ENODEV;
2811 break;
2812 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002813 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2814 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2815 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2816 << (TTBRn_ASID_SHIFT);
2817 *((u64 *)data) = val;
2818 ret = 0;
2819 break;
2820 }
2821 case DOMAIN_ATTR_CONTEXTIDR:
2822 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002823 if (smmu_domain->smmu == NULL) {
2824 ret = -ENODEV;
2825 break;
2826 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002827 *((u32 *)data) = smmu_domain->cfg.procid;
2828 ret = 0;
2829 break;
2830 case DOMAIN_ATTR_PROCID:
2831 *((u32 *)data) = smmu_domain->cfg.procid;
2832 ret = 0;
2833 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002834 case DOMAIN_ATTR_DYNAMIC:
2835 *((int *)data) = !!(smmu_domain->attributes
2836 & (1 << DOMAIN_ATTR_DYNAMIC));
2837 ret = 0;
2838 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002839 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2840 *((int *)data) = !!(smmu_domain->attributes
2841 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2842 ret = 0;
2843 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002844 case DOMAIN_ATTR_S1_BYPASS:
2845 *((int *)data) = !!(smmu_domain->attributes
2846 & (1 << DOMAIN_ATTR_S1_BYPASS));
2847 ret = 0;
2848 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002849 case DOMAIN_ATTR_SECURE_VMID:
2850 *((int *)data) = smmu_domain->secure_vmid;
2851 ret = 0;
2852 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002853 case DOMAIN_ATTR_PGTBL_INFO: {
2854 struct iommu_pgtbl_info *info = data;
2855
2856 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2857 ret = -ENODEV;
2858 break;
2859 }
2860 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2861 ret = 0;
2862 break;
2863 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002864 case DOMAIN_ATTR_FAST:
2865 *((int *)data) = !!(smmu_domain->attributes
2866 & (1 << DOMAIN_ATTR_FAST));
2867 ret = 0;
2868 break;
Patrick Daly1e279922017-09-06 15:57:45 -07002869 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
2870 *((int *)data) = !!(smmu_domain->attributes
2871 & (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
2872 ret = 0;
2873 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002874 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2875 *((int *)data) = !!(smmu_domain->attributes &
2876 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2877 ret = 0;
2878 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002879 case DOMAIN_ATTR_EARLY_MAP:
2880 *((int *)data) = !!(smmu_domain->attributes
2881 & (1 << DOMAIN_ATTR_EARLY_MAP));
2882 ret = 0;
2883 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002884 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002885 if (!smmu_domain->smmu) {
2886 ret = -ENODEV;
2887 break;
2888 }
Liam Mark53cf2342016-12-20 11:36:07 -08002889 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
2890 ret = 0;
2891 break;
2892 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
2893 *((int *)data) = !!(smmu_domain->attributes
2894 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002895 ret = 0;
2896 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05302897 case DOMAIN_ATTR_CB_STALL_DISABLE:
2898 *((int *)data) = !!(smmu_domain->attributes
2899 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
2900 ret = 0;
2901 break;
Patrick Daly83174c12017-10-26 12:31:15 -07002902 case DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN:
Patrick Daly23301482017-10-12 16:18:25 -07002903 *((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
2904 ret = 0;
2905 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002906 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002907 ret = -ENODEV;
2908 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002909 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002910 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002911 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002912}
2913
2914static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2915 enum iommu_attr attr, void *data)
2916{
Will Deacon518f7132014-11-14 17:17:54 +00002917 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002918 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002919
Will Deacon518f7132014-11-14 17:17:54 +00002920 mutex_lock(&smmu_domain->init_mutex);
2921
Will Deaconc752ce42014-06-25 22:46:31 +01002922 switch (attr) {
2923 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002924 if (smmu_domain->smmu) {
2925 ret = -EPERM;
2926 goto out_unlock;
2927 }
2928
Will Deaconc752ce42014-06-25 22:46:31 +01002929 if (*(int *)data)
2930 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2931 else
2932 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2933
Will Deacon518f7132014-11-14 17:17:54 +00002934 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002935 case DOMAIN_ATTR_PROCID:
2936 if (smmu_domain->smmu != NULL) {
2937 dev_err(smmu_domain->smmu->dev,
2938 "cannot change procid attribute while attached\n");
2939 ret = -EBUSY;
2940 break;
2941 }
2942 smmu_domain->cfg.procid = *((u32 *)data);
2943 ret = 0;
2944 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002945 case DOMAIN_ATTR_DYNAMIC: {
2946 int dynamic = *((int *)data);
2947
2948 if (smmu_domain->smmu != NULL) {
2949 dev_err(smmu_domain->smmu->dev,
2950 "cannot change dynamic attribute while attached\n");
2951 ret = -EBUSY;
2952 break;
2953 }
2954
2955 if (dynamic)
2956 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2957 else
2958 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2959 ret = 0;
2960 break;
2961 }
2962 case DOMAIN_ATTR_CONTEXT_BANK:
2963 /* context bank can't be set while attached */
2964 if (smmu_domain->smmu != NULL) {
2965 ret = -EBUSY;
2966 break;
2967 }
2968 /* ... and it can only be set for dynamic contexts. */
2969 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2970 ret = -EINVAL;
2971 break;
2972 }
2973
2974 /* this will be validated during attach */
2975 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2976 ret = 0;
2977 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002978 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2979 u32 non_fatal_faults = *((int *)data);
2980
2981 if (non_fatal_faults)
2982 smmu_domain->attributes |=
2983 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2984 else
2985 smmu_domain->attributes &=
2986 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2987 ret = 0;
2988 break;
2989 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002990 case DOMAIN_ATTR_S1_BYPASS: {
2991 int bypass = *((int *)data);
2992
2993 /* bypass can't be changed while attached */
2994 if (smmu_domain->smmu != NULL) {
2995 ret = -EBUSY;
2996 break;
2997 }
2998 if (bypass)
2999 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
3000 else
3001 smmu_domain->attributes &=
3002 ~(1 << DOMAIN_ATTR_S1_BYPASS);
3003
3004 ret = 0;
3005 break;
3006 }
Patrick Daly8befb662016-08-17 20:03:28 -07003007 case DOMAIN_ATTR_ATOMIC:
3008 {
3009 int atomic_ctx = *((int *)data);
3010
3011 /* can't be changed while attached */
3012 if (smmu_domain->smmu != NULL) {
3013 ret = -EBUSY;
3014 break;
3015 }
3016 if (atomic_ctx)
3017 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
3018 else
3019 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
3020 break;
3021 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07003022 case DOMAIN_ATTR_SECURE_VMID:
3023 if (smmu_domain->secure_vmid != VMID_INVAL) {
3024 ret = -ENODEV;
3025 WARN(1, "secure vmid already set!");
3026 break;
3027 }
3028 smmu_domain->secure_vmid = *((int *)data);
3029 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003030 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3031 if (*((int *)data))
3032 smmu_domain->attributes |=
3033 1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
3034 ret = 0;
3035 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003036 /*
3037 * fast_smmu_unmap_page() and fast_smmu_alloc_iova() both
3038 * expect that the bus/clock/regulator are already on. Thus also
3039 * force DOMAIN_ATTR_ATOMIC to bet set.
3040 */
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003041 case DOMAIN_ATTR_FAST:
Patrick Daly0df84ac2017-10-11 17:32:41 -07003042 {
3043 int fast = *((int *)data);
3044
3045 if (fast) {
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003046 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003047 smmu_domain->attributes |= 1 << DOMAIN_ATTR_ATOMIC;
3048 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003049 ret = 0;
3050 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003051 }
Patrick Dalyce6786f2016-11-09 14:19:23 -08003052 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3053 /* can't be changed while attached */
3054 if (smmu_domain->smmu != NULL) {
3055 ret = -EBUSY;
3056 break;
3057 }
3058 if (*((int *)data))
3059 smmu_domain->attributes |=
3060 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
3061 ret = 0;
3062 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003063 case DOMAIN_ATTR_EARLY_MAP: {
3064 int early_map = *((int *)data);
3065
3066 ret = 0;
3067 if (early_map) {
3068 smmu_domain->attributes |=
3069 1 << DOMAIN_ATTR_EARLY_MAP;
3070 } else {
3071 if (smmu_domain->smmu)
3072 ret = arm_smmu_enable_s1_translations(
3073 smmu_domain);
3074
3075 if (!ret)
3076 smmu_domain->attributes &=
3077 ~(1 << DOMAIN_ATTR_EARLY_MAP);
3078 }
3079 break;
3080 }
Liam Mark53cf2342016-12-20 11:36:07 -08003081 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
3082 int force_coherent = *((int *)data);
3083
3084 if (smmu_domain->smmu != NULL) {
3085 dev_err(smmu_domain->smmu->dev,
3086 "cannot change force coherent attribute while attached\n");
3087 ret = -EBUSY;
3088 break;
3089 }
3090
3091 if (force_coherent)
3092 smmu_domain->attributes |=
3093 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
3094 else
3095 smmu_domain->attributes &=
3096 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
3097
3098 ret = 0;
3099 break;
3100 }
3101
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303102 case DOMAIN_ATTR_CB_STALL_DISABLE:
3103 if (*((int *)data))
3104 smmu_domain->attributes |=
3105 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
3106 ret = 0;
3107 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003108 default:
Will Deacon518f7132014-11-14 17:17:54 +00003109 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01003110 }
Will Deacon518f7132014-11-14 17:17:54 +00003111
3112out_unlock:
3113 mutex_unlock(&smmu_domain->init_mutex);
3114 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003115}
3116
Robin Murphy7e96c742016-09-14 15:26:46 +01003117static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
3118{
3119 u32 fwid = 0;
3120
3121 if (args->args_count > 0)
3122 fwid |= (u16)args->args[0];
3123
3124 if (args->args_count > 1)
3125 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
3126
3127 return iommu_fwspec_add_ids(dev, &fwid, 1);
3128}
3129
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003130static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
3131{
3132 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3133 struct arm_smmu_device *smmu = smmu_domain->smmu;
3134 void __iomem *cb_base;
3135 u32 reg;
3136 int ret;
3137
3138 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3139 ret = arm_smmu_power_on(smmu->pwr);
3140 if (ret)
3141 return ret;
3142
3143 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3144 reg |= SCTLR_M;
3145
3146 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
3147 arm_smmu_power_off(smmu->pwr);
3148 return ret;
3149}
3150
Liam Mark3ba41cf2016-12-09 14:39:04 -08003151static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
3152 dma_addr_t iova)
3153{
3154 bool ret;
3155 unsigned long flags;
3156 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3157 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3158
3159 if (!ops)
3160 return false;
3161
3162 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3163 ret = ops->is_iova_coherent(ops, iova);
3164 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3165 return ret;
3166}
3167
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003168static void arm_smmu_trigger_fault(struct iommu_domain *domain,
3169 unsigned long flags)
3170{
3171 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3172 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3173 struct arm_smmu_device *smmu;
3174 void __iomem *cb_base;
3175
3176 if (!smmu_domain->smmu) {
3177 pr_err("Can't trigger faults on non-attached domains\n");
3178 return;
3179 }
3180
3181 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003182 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003183 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003184
3185 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3186 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
3187 flags, cfg->cbndx);
3188 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07003189 /* give the interrupt time to fire... */
3190 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003191
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003192 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003193}
3194
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003195static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3196{
Patrick Dalyda765c62017-09-11 16:31:07 -07003197 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3198 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
3199
3200 tlb->tlb_flush_all(smmu_domain);
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003201}
3202
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003203static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3204{
3205 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3206
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003207 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003208}
3209
3210static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3211{
3212 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3213
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003214 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003215}
3216
Will Deacon518f7132014-11-14 17:17:54 +00003217static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003218 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003219 .domain_alloc = arm_smmu_domain_alloc,
3220 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003221 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003222 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003223 .map = arm_smmu_map,
3224 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003225 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003226 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003227 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003228 .add_device = arm_smmu_add_device,
3229 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003230 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003231 .domain_get_attr = arm_smmu_domain_get_attr,
3232 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003233 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003234 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003235 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003236 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003237 .enable_config_clocks = arm_smmu_enable_config_clocks,
3238 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003239 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003240 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003241};
3242
Patrick Dalyad441dd2016-09-15 15:50:46 -07003243#define IMPL_DEF1_MICRO_MMU_CTRL 0
3244#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3245#define MICRO_MMU_CTRL_IDLE (1 << 3)
3246
3247/* Definitions for implementation-defined registers */
3248#define ACTLR_QCOM_OSH_SHIFT 28
3249#define ACTLR_QCOM_OSH 1
3250
3251#define ACTLR_QCOM_ISH_SHIFT 29
3252#define ACTLR_QCOM_ISH 1
3253
3254#define ACTLR_QCOM_NSH_SHIFT 30
3255#define ACTLR_QCOM_NSH 1
3256
3257static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003258{
3259 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003260 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003261
3262 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3263 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3264 0, 30000)) {
3265 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3266 return -EBUSY;
3267 }
3268
3269 return 0;
3270}
3271
Patrick Dalyad441dd2016-09-15 15:50:46 -07003272static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003273{
3274 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3275 u32 reg;
3276
3277 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3278 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3279 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3280
Patrick Dalyad441dd2016-09-15 15:50:46 -07003281 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003282}
3283
Patrick Dalyad441dd2016-09-15 15:50:46 -07003284static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003285{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003286 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003287}
3288
Patrick Dalyad441dd2016-09-15 15:50:46 -07003289static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003290{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003291 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003292}
3293
Patrick Dalyad441dd2016-09-15 15:50:46 -07003294static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003295{
3296 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3297 u32 reg;
3298
3299 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3300 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3301 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3302}
3303
Patrick Dalyad441dd2016-09-15 15:50:46 -07003304static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003305{
3306 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003307 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003308 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003309 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003310
Patrick Dalyad441dd2016-09-15 15:50:46 -07003311 /*
3312 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3313 * to prevent table walks with an inconsistent state.
3314 */
3315 for (i = 0; i < smmu->num_context_banks; ++i) {
3316 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3317 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3318 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3319 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3320 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3321 }
3322
3323 /* Program implementation defined registers */
3324 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003325 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3326 writel_relaxed(regs[i].value,
3327 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003328 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003329}
3330
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003331static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3332 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003333{
3334 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3335 struct arm_smmu_device *smmu = smmu_domain->smmu;
3336 int ret;
3337 phys_addr_t phys = 0;
3338 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003339 u32 sctlr, sctlr_orig, fsr;
3340 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003341
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003342 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003343 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003344 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003345
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003346 spin_lock_irqsave(&smmu->atos_lock, flags);
3347 cb_base = ARM_SMMU_CB_BASE(smmu) +
3348 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003349
3350 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003351 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003352 qsmmuv2_wait_for_halt(smmu);
3353
3354 /* clear FSR to allow ATOS to log any faults */
3355 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3356 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3357
3358 /* disable stall mode momentarily */
3359 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3360 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3361 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3362
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003363 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003364
3365 /* restore SCTLR */
3366 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3367
3368 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003369 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3370
3371 arm_smmu_power_off(smmu_domain->smmu->pwr);
3372 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003373}
3374
3375struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3376 .device_reset = qsmmuv2_device_reset,
3377 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003378};
3379
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003380static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003381{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003382 int i;
3383 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003384 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003385 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003386
Peng Fan3ca37122016-05-03 21:50:30 +08003387 /*
3388 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3389 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3390 * bit is only present in MMU-500r2 onwards.
3391 */
3392 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3393 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3394 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3395 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3396 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3397 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3398 }
3399
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003400 /* Make sure all context banks are disabled and clear CB_FSR */
3401 for (i = 0; i < smmu->num_context_banks; ++i) {
3402 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3403 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3404 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003405 /*
3406 * Disable MMU-500's not-particularly-beneficial next-page
3407 * prefetcher for the sake of errata #841119 and #826419.
3408 */
3409 if (smmu->model == ARM_MMU500) {
3410 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3411 reg &= ~ARM_MMU500_ACTLR_CPRE;
3412 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3413 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003414 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003415}
3416
3417static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3418{
3419 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003420 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003421 u32 reg;
3422
3423 /* clear global FSR */
3424 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3425 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3426
Robin Murphy468f4942016-09-12 17:13:49 +01003427 /*
3428 * Reset stream mapping groups: Initial values mark all SMRn as
3429 * invalid and all S2CRn as bypass unless overridden.
3430 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003431 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3432 for (i = 0; i < smmu->num_mapping_groups; ++i)
3433 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003434
Patrick Daly59b6d202017-06-12 13:12:15 -07003435 arm_smmu_context_bank_reset(smmu);
3436 }
Will Deacon1463fe42013-07-31 19:21:27 +01003437
Will Deacon45ae7cf2013-06-24 18:31:25 +01003438 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003439 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3440 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3441
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003442 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003443
Will Deacon45ae7cf2013-06-24 18:31:25 +01003444 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003445 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003446
3447 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003448 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003449
Robin Murphy25a1c962016-02-10 14:25:33 +00003450 /* Enable client access, handling unmatched streams as appropriate */
3451 reg &= ~sCR0_CLIENTPD;
3452 if (disable_bypass)
3453 reg |= sCR0_USFCFG;
3454 else
3455 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003456
3457 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003458 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003459
3460 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003461 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003462
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003463 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3464 reg |= sCR0_VMID16EN;
3465
Patrick Daly7f377fe2017-10-06 17:37:10 -07003466 /* Force bypass transaction to be Non-Shareable & not io-coherent */
3467 reg &= ~(sCR0_SHCFG_MASK << sCR0_SHCFG_SHIFT);
Prakash Gupta673a79f2017-11-16 18:07:00 +05303468 reg |= sCR0_SHCFG_NSH << sCR0_SHCFG_SHIFT;
Patrick Daly7f377fe2017-10-06 17:37:10 -07003469
Will Deacon45ae7cf2013-06-24 18:31:25 +01003470 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003471 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003472 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003473
3474 /* Manage any implementation defined features */
3475 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003476}
3477
3478static int arm_smmu_id_size_to_bits(int size)
3479{
3480 switch (size) {
3481 case 0:
3482 return 32;
3483 case 1:
3484 return 36;
3485 case 2:
3486 return 40;
3487 case 3:
3488 return 42;
3489 case 4:
3490 return 44;
3491 case 5:
3492 default:
3493 return 48;
3494 }
3495}
3496
Patrick Dalyda688822017-05-17 20:12:48 -07003497
3498/*
3499 * Some context banks needs to be transferred from bootloader to HLOS in a way
3500 * that allows ongoing traffic. The current expectation is that these context
3501 * banks operate in bypass mode.
3502 * Additionally, there must be exactly one device in devicetree with stream-ids
3503 * overlapping those used by the bootloader.
3504 */
3505static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3506 struct arm_smmu_device *smmu,
3507 struct device *dev)
3508{
3509 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003510 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003511 u32 i, idx;
3512 int cb = -EINVAL;
3513 bool dynamic;
3514
Patrick Dalye72526b2017-07-18 16:21:44 -07003515 /*
3516 * Dynamic domains have already set cbndx through domain attribute.
3517 * Verify that they picked a valid value.
3518 */
Patrick Dalyda688822017-05-17 20:12:48 -07003519 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07003520 if (dynamic) {
3521 cb = smmu_domain->cfg.cbndx;
3522 if (cb < smmu->num_context_banks)
3523 return cb;
3524 else
3525 return -EINVAL;
3526 }
Patrick Dalyda688822017-05-17 20:12:48 -07003527
3528 mutex_lock(&smmu->stream_map_mutex);
3529 for_each_cfg_sme(fwspec, i, idx) {
3530 if (smmu->s2crs[idx].cb_handoff)
3531 cb = smmu->s2crs[idx].cbndx;
3532 }
3533
3534 if (cb < 0) {
3535 mutex_unlock(&smmu->stream_map_mutex);
3536 return __arm_smmu_alloc_bitmap(smmu->context_map,
3537 smmu->num_s2_context_banks,
3538 smmu->num_context_banks);
3539 }
3540
3541 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07003542 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Patrick Dalyda688822017-05-17 20:12:48 -07003543 smmu->s2crs[i].cb_handoff = false;
3544 smmu->s2crs[i].count -= 1;
3545 }
3546 }
3547 mutex_unlock(&smmu->stream_map_mutex);
3548
3549 return cb;
3550}
3551
3552static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
3553{
3554 u32 i, raw_smr, raw_s2cr;
3555 struct arm_smmu_smr smr;
3556 struct arm_smmu_s2cr s2cr;
3557
3558 for (i = 0; i < smmu->num_mapping_groups; i++) {
3559 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3560 ARM_SMMU_GR0_SMR(i));
3561 if (!(raw_smr & SMR_VALID))
3562 continue;
3563
3564 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3565 smr.id = (u16)raw_smr;
3566 smr.valid = true;
3567
3568 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3569 ARM_SMMU_GR0_S2CR(i));
Patrick Daly4ea4bdf2017-08-29 19:24:49 -07003570 memset(&s2cr, 0, sizeof(s2cr));
Patrick Dalyda688822017-05-17 20:12:48 -07003571 s2cr.group = NULL;
3572 s2cr.count = 1;
3573 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
3574 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
3575 S2CR_PRIVCFG_MASK;
3576 s2cr.cbndx = (u8)raw_s2cr;
3577 s2cr.cb_handoff = true;
3578
3579 if (s2cr.type != S2CR_TYPE_TRANS)
3580 continue;
3581
3582 smmu->smrs[i] = smr;
3583 smmu->s2crs[i] = s2cr;
3584 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
3585 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
3586 raw_smr, raw_s2cr, s2cr.cbndx);
3587 }
3588
3589 return 0;
3590}
3591
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003592static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3593{
3594 struct device *dev = smmu->dev;
3595 int i, ntuples, ret;
3596 u32 *tuples;
3597 struct arm_smmu_impl_def_reg *regs, *regit;
3598
3599 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3600 return 0;
3601
3602 ntuples /= sizeof(u32);
3603 if (ntuples % 2) {
3604 dev_err(dev,
3605 "Invalid number of attach-impl-defs registers: %d\n",
3606 ntuples);
3607 return -EINVAL;
3608 }
3609
3610 regs = devm_kmalloc(
3611 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3612 GFP_KERNEL);
3613 if (!regs)
3614 return -ENOMEM;
3615
3616 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3617 if (!tuples)
3618 return -ENOMEM;
3619
3620 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3621 tuples, ntuples);
3622 if (ret)
3623 return ret;
3624
3625 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3626 regit->offset = tuples[i];
3627 regit->value = tuples[i + 1];
3628 }
3629
3630 devm_kfree(dev, tuples);
3631
3632 smmu->impl_def_attach_registers = regs;
3633 smmu->num_impl_def_attach_registers = ntuples / 2;
3634
3635 return 0;
3636}
3637
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003638
3639static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003640{
3641 const char *cname;
3642 struct property *prop;
3643 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003644 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003645
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003646 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003647 of_property_count_strings(dev->of_node, "clock-names");
3648
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003649 if (pwr->num_clocks < 1) {
3650 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003651 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003652 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003653
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003654 pwr->clocks = devm_kzalloc(
3655 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003656 GFP_KERNEL);
3657
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003658 if (!pwr->clocks)
3659 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003660
3661 i = 0;
3662 of_property_for_each_string(dev->of_node, "clock-names",
3663 prop, cname) {
3664 struct clk *c = devm_clk_get(dev, cname);
3665
3666 if (IS_ERR(c)) {
3667 dev_err(dev, "Couldn't get clock: %s",
3668 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003669 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003670 }
3671
3672 if (clk_get_rate(c) == 0) {
3673 long rate = clk_round_rate(c, 1000);
3674
3675 clk_set_rate(c, rate);
3676 }
3677
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003678 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003679
3680 ++i;
3681 }
3682 return 0;
3683}
3684
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003685static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003686{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003687 const char *cname;
3688 struct property *prop;
3689 int i, ret = 0;
3690 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003691
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003692 pwr->num_gdscs =
3693 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3694
3695 if (pwr->num_gdscs < 1) {
3696 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003697 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003698 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003699
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003700 pwr->gdscs = devm_kzalloc(
3701 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3702
3703 if (!pwr->gdscs)
3704 return -ENOMEM;
3705
Prakash Guptafad87ca2017-05-16 12:13:02 +05303706 if (!of_property_read_u32(dev->of_node,
3707 "qcom,deferred-regulator-disable-delay",
3708 &(pwr->regulator_defer)))
3709 dev_info(dev, "regulator defer delay %d\n",
3710 pwr->regulator_defer);
3711
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003712 i = 0;
3713 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3714 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07003715 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003716
3717 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3718 return ret;
3719}
3720
3721static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3722{
3723 struct device *dev = pwr->dev;
3724
3725 /* We don't want the bus APIs to print an error message */
3726 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3727 dev_dbg(dev, "No bus scaling info\n");
3728 return 0;
3729 }
3730
3731 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3732 if (!pwr->bus_dt_data) {
3733 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3734 return -EINVAL;
3735 }
3736
3737 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3738 if (!pwr->bus_client) {
3739 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003740 return -EINVAL;
3741 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003742
3743 return 0;
3744}
3745
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003746/*
3747 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3748 */
3749static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3750 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003751{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003752 struct arm_smmu_power_resources *pwr;
3753 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003754
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003755 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3756 if (!pwr)
3757 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003758
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003759 pwr->dev = &pdev->dev;
3760 pwr->pdev = pdev;
3761 mutex_init(&pwr->power_lock);
3762 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003763
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003764 ret = arm_smmu_init_clocks(pwr);
3765 if (ret)
3766 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003767
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003768 ret = arm_smmu_init_regulators(pwr);
3769 if (ret)
3770 return ERR_PTR(ret);
3771
3772 ret = arm_smmu_init_bus_scaling(pwr);
3773 if (ret)
3774 return ERR_PTR(ret);
3775
3776 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003777}
3778
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003779/*
Patrick Dalyabeee952017-04-13 18:14:59 -07003780 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003781 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003782static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003783{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003784 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003785}
3786
Will Deacon45ae7cf2013-06-24 18:31:25 +01003787static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3788{
3789 unsigned long size;
3790 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3791 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003792 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003793 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003794
Mitchel Humpherysba822582015-10-20 11:37:41 -07003795 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3796 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003797 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003798
3799 /* ID0 */
3800 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003801
3802 /* Restrict available stages based on module parameter */
3803 if (force_stage == 1)
3804 id &= ~(ID0_S2TS | ID0_NTS);
3805 else if (force_stage == 2)
3806 id &= ~(ID0_S1TS | ID0_NTS);
3807
Will Deacon45ae7cf2013-06-24 18:31:25 +01003808 if (id & ID0_S1TS) {
3809 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003810 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003811 }
3812
3813 if (id & ID0_S2TS) {
3814 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003815 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003816 }
3817
3818 if (id & ID0_NTS) {
3819 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003820 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003821 }
3822
3823 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003824 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003825 dev_err(smmu->dev, "\tno translation support!\n");
3826 return -ENODEV;
3827 }
3828
Robin Murphyb7862e32016-04-13 18:13:03 +01003829 if ((id & ID0_S1TS) &&
3830 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003831 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003832 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003833 }
3834
Robin Murphybae2c2d2015-07-29 19:46:05 +01003835 /*
3836 * In order for DMA API calls to work properly, we must defer to what
3837 * the DT says about coherency, regardless of what the hardware claims.
3838 * Fortunately, this also opens up a workaround for systems where the
3839 * ID register value has ended up configured incorrectly.
3840 */
3841 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3842 cttw_reg = !!(id & ID0_CTTW);
3843 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003844 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003845 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003846 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003847 cttw_dt ? "" : "non-");
3848 if (cttw_dt != cttw_reg)
3849 dev_notice(smmu->dev,
3850 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003851
Robin Murphy53867802016-09-12 17:13:48 +01003852 /* Max. number of entries we have for stream matching/indexing */
3853 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3854 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003855 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003856 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08003857 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003858
3859 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003860 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3861 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003862 dev_err(smmu->dev,
3863 "stream-matching supported, but no SMRs present!\n");
3864 return -ENODEV;
3865 }
3866
Robin Murphy53867802016-09-12 17:13:48 +01003867 /*
3868 * SMR.ID bits may not be preserved if the corresponding MASK
3869 * bits are set, so check each one separately. We can reject
3870 * masters later if they try to claim IDs outside these masks.
3871 */
Patrick Daly937de532016-12-12 18:44:09 -08003872 for (i = 0; i < size; i++) {
3873 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
3874 if (!(smr & SMR_VALID))
3875 break;
3876 }
3877 if (i == size) {
3878 dev_err(smmu->dev,
3879 "Unable to compute streamid_masks\n");
3880 return -ENODEV;
3881 }
3882
Robin Murphy53867802016-09-12 17:13:48 +01003883 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003884 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3885 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003886 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003887
Robin Murphy53867802016-09-12 17:13:48 +01003888 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003889 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3890 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003891 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003892
Robin Murphy468f4942016-09-12 17:13:49 +01003893 /* Zero-initialised to mark as invalid */
3894 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3895 GFP_KERNEL);
3896 if (!smmu->smrs)
3897 return -ENOMEM;
3898
Robin Murphy53867802016-09-12 17:13:48 +01003899 dev_notice(smmu->dev,
3900 "\tstream matching with %lu register groups, mask 0x%x",
3901 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003902 }
Robin Murphya754fd12016-09-12 17:13:50 +01003903 /* s2cr->type == 0 means translation, so initialise explicitly */
3904 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3905 GFP_KERNEL);
3906 if (!smmu->s2crs)
3907 return -ENOMEM;
3908 for (i = 0; i < size; i++)
3909 smmu->s2crs[i] = s2cr_init_val;
3910
Robin Murphy53867802016-09-12 17:13:48 +01003911 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003912 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003913
Robin Murphy7602b872016-04-28 17:12:09 +01003914 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3915 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3916 if (!(id & ID0_PTFS_NO_AARCH32S))
3917 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3918 }
3919
Will Deacon45ae7cf2013-06-24 18:31:25 +01003920 /* ID1 */
3921 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003922 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003923
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003924 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003925 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003926 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003927 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003928 dev_warn(smmu->dev,
3929 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3930 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003931
Will Deacon518f7132014-11-14 17:17:54 +00003932 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003933 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3934 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3935 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3936 return -ENODEV;
3937 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003938 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003939 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003940 /*
3941 * Cavium CN88xx erratum #27704.
3942 * Ensure ASID and VMID allocation is unique across all SMMUs in
3943 * the system.
3944 */
3945 if (smmu->model == CAVIUM_SMMUV2) {
3946 smmu->cavium_id_base =
3947 atomic_add_return(smmu->num_context_banks,
3948 &cavium_smmu_context_count);
3949 smmu->cavium_id_base -= smmu->num_context_banks;
3950 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003951
3952 /* ID2 */
3953 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3954 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003955 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003956
Will Deacon518f7132014-11-14 17:17:54 +00003957 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003958 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003959 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003960
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003961 if (id & ID2_VMID16)
3962 smmu->features |= ARM_SMMU_FEAT_VMID16;
3963
Robin Murphyf1d84542015-03-04 16:41:05 +00003964 /*
3965 * What the page table walker can address actually depends on which
3966 * descriptor format is in use, but since a) we don't know that yet,
3967 * and b) it can vary per context bank, this will have to do...
3968 */
3969 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3970 dev_warn(smmu->dev,
3971 "failed to set DMA mask for table walker\n");
3972
Robin Murphyb7862e32016-04-13 18:13:03 +01003973 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003974 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003975 if (smmu->version == ARM_SMMU_V1_64K)
3976 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003977 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003978 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003979 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003980 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003981 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003982 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003983 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003984 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003985 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003986 }
3987
Robin Murphy7602b872016-04-28 17:12:09 +01003988 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003989 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003990 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003991 if (smmu->features &
3992 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003993 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003994 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003995 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003996 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003997 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003998
Robin Murphyd5466352016-05-09 17:20:09 +01003999 if (arm_smmu_ops.pgsize_bitmap == -1UL)
4000 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
4001 else
4002 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004003 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01004004 smmu->pgsize_bitmap);
4005
Will Deacon518f7132014-11-14 17:17:54 +00004006
Will Deacon28d60072014-09-01 16:24:48 +01004007 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004008 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
4009 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004010
4011 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004012 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
4013 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004014
Will Deacon45ae7cf2013-06-24 18:31:25 +01004015 return 0;
4016}
4017
Robin Murphy67b65a32016-04-13 18:12:57 +01004018struct arm_smmu_match_data {
4019 enum arm_smmu_arch_version version;
4020 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004021 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01004022};
4023
Patrick Dalyd7476202016-09-08 18:23:28 -07004024#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
4025static struct arm_smmu_match_data name = { \
4026.version = ver, \
4027.model = imp, \
4028.arch_ops = ops, \
4029} \
Robin Murphy67b65a32016-04-13 18:12:57 +01004030
Patrick Daly1f8a2882016-09-12 17:32:05 -07004031struct arm_smmu_arch_ops qsmmuv500_arch_ops;
4032
Patrick Dalyd7476202016-09-08 18:23:28 -07004033ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
4034ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
4035ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
4036ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
4037ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07004038ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004039ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
4040 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01004041
Joerg Roedel09b52692014-10-02 12:24:45 +02004042static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01004043 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
4044 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
4045 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01004046 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01004047 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01004048 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07004049 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07004050 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01004051 { },
4052};
4053MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
4054
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004055
4056static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
4057{
4058 if (!dev->iommu_fwspec)
4059 of_iommu_configure(dev, dev->of_node);
4060 return 0;
4061}
4062
Patrick Daly000a2f22017-02-13 22:18:12 -08004063static int arm_smmu_add_device_fixup(struct device *dev, void *data)
4064{
4065 struct iommu_ops *ops = data;
4066
4067 ops->add_device(dev);
4068 return 0;
4069}
4070
Patrick Daly1f8a2882016-09-12 17:32:05 -07004071static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004072static int arm_smmu_device_dt_probe(struct platform_device *pdev)
4073{
Robin Murphy67b65a32016-04-13 18:12:57 +01004074 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004075 struct resource *res;
4076 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004077 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01004078 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01004079 bool legacy_binding;
4080
4081 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
4082 if (legacy_binding && !using_generic_binding) {
4083 if (!using_legacy_binding)
4084 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
4085 using_legacy_binding = true;
4086 } else if (!legacy_binding && !using_legacy_binding) {
4087 using_generic_binding = true;
4088 } else {
4089 dev_err(dev, "not probing due to mismatched DT properties\n");
4090 return -ENODEV;
4091 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004092
4093 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4094 if (!smmu) {
4095 dev_err(dev, "failed to allocate arm_smmu_device\n");
4096 return -ENOMEM;
4097 }
4098 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08004099 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07004100 idr_init(&smmu->asid_idr);
4101 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004102
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004103 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01004104 smmu->version = data->version;
4105 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004106 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01004107
Will Deacon45ae7cf2013-06-24 18:31:25 +01004108 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01004109 smmu->base = devm_ioremap_resource(dev, res);
4110 if (IS_ERR(smmu->base))
4111 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004112 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004113
4114 if (of_property_read_u32(dev->of_node, "#global-interrupts",
4115 &smmu->num_global_irqs)) {
4116 dev_err(dev, "missing #global-interrupts property\n");
4117 return -ENODEV;
4118 }
4119
4120 num_irqs = 0;
4121 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
4122 num_irqs++;
4123 if (num_irqs > smmu->num_global_irqs)
4124 smmu->num_context_irqs++;
4125 }
4126
Andreas Herrmann44a08de2013-10-01 13:39:07 +01004127 if (!smmu->num_context_irqs) {
4128 dev_err(dev, "found %d interrupts but expected at least %d\n",
4129 num_irqs, smmu->num_global_irqs + 1);
4130 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004131 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004132
4133 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
4134 GFP_KERNEL);
4135 if (!smmu->irqs) {
4136 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
4137 return -ENOMEM;
4138 }
4139
4140 for (i = 0; i < num_irqs; ++i) {
4141 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07004142
Will Deacon45ae7cf2013-06-24 18:31:25 +01004143 if (irq < 0) {
4144 dev_err(dev, "failed to get irq index %d\n", i);
4145 return -ENODEV;
4146 }
4147 smmu->irqs[i] = irq;
4148 }
4149
Dhaval Patel031d7462015-05-09 14:47:29 -07004150 parse_driver_options(smmu);
4151
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004152 smmu->pwr = arm_smmu_init_power_resources(pdev);
4153 if (IS_ERR(smmu->pwr))
4154 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004155
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004156 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004157 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004158 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004159
4160 err = arm_smmu_device_cfg_probe(smmu);
4161 if (err)
4162 goto out_power_off;
4163
Patrick Dalyda688822017-05-17 20:12:48 -07004164 err = arm_smmu_handoff_cbs(smmu);
4165 if (err)
4166 goto out_power_off;
4167
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004168 err = arm_smmu_parse_impl_def_registers(smmu);
4169 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004170 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004171
Robin Murphyb7862e32016-04-13 18:13:03 +01004172 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004173 smmu->num_context_banks != smmu->num_context_irqs) {
4174 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004175 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4176 smmu->num_context_irqs, smmu->num_context_banks,
4177 smmu->num_context_banks);
4178 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004179 }
4180
Will Deacon45ae7cf2013-06-24 18:31:25 +01004181 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004182 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4183 NULL, arm_smmu_global_fault,
4184 IRQF_ONESHOT | IRQF_SHARED,
4185 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004186 if (err) {
4187 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4188 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004189 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004190 }
4191 }
4192
Patrick Dalyd7476202016-09-08 18:23:28 -07004193 err = arm_smmu_arch_init(smmu);
4194 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004195 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004196
Robin Murphy06e393e2016-09-12 17:13:55 +01004197 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004198 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004199 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004200 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004201
Patrick Daly8e3371a2017-02-13 22:14:53 -08004202 INIT_LIST_HEAD(&smmu->list);
4203 spin_lock(&arm_smmu_devices_lock);
4204 list_add(&smmu->list, &arm_smmu_devices);
4205 spin_unlock(&arm_smmu_devices_lock);
4206
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004207 /* bus_set_iommu depends on this. */
4208 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4209 arm_smmu_of_iommu_configure_fixup);
4210
Robin Murphy7e96c742016-09-14 15:26:46 +01004211 /* Oh, for a proper bus abstraction */
4212 if (!iommu_present(&platform_bus_type))
4213 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004214 else
4215 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4216 arm_smmu_add_device_fixup);
Robin Murphy7e96c742016-09-14 15:26:46 +01004217#ifdef CONFIG_ARM_AMBA
4218 if (!iommu_present(&amba_bustype))
4219 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4220#endif
4221#ifdef CONFIG_PCI
4222 if (!iommu_present(&pci_bus_type)) {
4223 pci_request_acs();
4224 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4225 }
4226#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004227 return 0;
4228
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004229out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004230 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004231
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004232out_exit_power_resources:
4233 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004234
Will Deacon45ae7cf2013-06-24 18:31:25 +01004235 return err;
4236}
4237
4238static int arm_smmu_device_remove(struct platform_device *pdev)
4239{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004240 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004241
4242 if (!smmu)
4243 return -ENODEV;
4244
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004245 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004246 return -EINVAL;
4247
Will Deaconecfadb62013-07-31 19:21:28 +01004248 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004249 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004250
Patrick Dalyc190d932016-08-30 17:23:28 -07004251 idr_destroy(&smmu->asid_idr);
4252
Will Deacon45ae7cf2013-06-24 18:31:25 +01004253 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004254 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004255 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004256
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004257 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004258
Will Deacon45ae7cf2013-06-24 18:31:25 +01004259 return 0;
4260}
4261
Will Deacon45ae7cf2013-06-24 18:31:25 +01004262static struct platform_driver arm_smmu_driver = {
4263 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004264 .name = "arm-smmu",
4265 .of_match_table = of_match_ptr(arm_smmu_of_match),
4266 },
4267 .probe = arm_smmu_device_dt_probe,
4268 .remove = arm_smmu_device_remove,
4269};
4270
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004271static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004272static int __init arm_smmu_init(void)
4273{
Robin Murphy7e96c742016-09-14 15:26:46 +01004274 static bool registered;
4275 int ret = 0;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004276 ktime_t cur;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004277
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004278 if (registered)
4279 return 0;
4280
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004281 cur = ktime_get();
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004282 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4283 if (ret)
4284 return ret;
4285
4286 ret = platform_driver_register(&arm_smmu_driver);
4287 registered = !ret;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004288 trace_smmu_init(ktime_us_delta(ktime_get(), cur));
4289
Robin Murphy7e96c742016-09-14 15:26:46 +01004290 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004291}
4292
4293static void __exit arm_smmu_exit(void)
4294{
4295 return platform_driver_unregister(&arm_smmu_driver);
4296}
4297
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004298subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004299module_exit(arm_smmu_exit);
4300
Robin Murphy7e96c742016-09-14 15:26:46 +01004301static int __init arm_smmu_of_init(struct device_node *np)
4302{
4303 int ret = arm_smmu_init();
4304
4305 if (ret)
4306 return ret;
4307
4308 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4309 return -ENODEV;
4310
4311 return 0;
4312}
4313IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4314IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4315IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4316IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4317IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4318IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004319
Patrick Dalya0fddb62017-03-27 19:26:59 -07004320#define TCU_HW_VERSION_HLOS1 (0x18)
4321
Patrick Daly1f8a2882016-09-12 17:32:05 -07004322#define DEBUG_SID_HALT_REG 0x0
4323#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004324#define DEBUG_SID_HALT_SID_MASK 0x3ff
4325
4326#define DEBUG_VA_ADDR_REG 0x8
4327
4328#define DEBUG_TXN_TRIGG_REG 0x18
4329#define DEBUG_TXN_AXPROT_SHIFT 6
4330#define DEBUG_TXN_AXCACHE_SHIFT 2
4331#define DEBUG_TRX_WRITE (0x1 << 1)
4332#define DEBUG_TXN_READ (0x0 << 1)
4333#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004334
4335#define DEBUG_SR_HALT_ACK_REG 0x20
4336#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004337#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4338
4339#define DEBUG_PAR_REG 0x28
4340#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4341#define DEBUG_PAR_PA_SHIFT 12
4342#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004343
Patrick Daly8c1202b2017-05-10 15:42:30 -07004344#define TBU_DBG_TIMEOUT_US 100
Patrick Daly1f8a2882016-09-12 17:32:05 -07004345
Patrick Daly23301482017-10-12 16:18:25 -07004346#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK 0x3
4347#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT 0x8
4348
Patrick Daly03330cc2017-08-11 14:56:38 -07004349
4350struct actlr_setting {
4351 struct arm_smmu_smr smr;
4352 u32 actlr;
4353};
4354
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004355struct qsmmuv500_archdata {
4356 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004357 void __iomem *tcu_base;
4358 u32 version;
Patrick Dalyda765c62017-09-11 16:31:07 -07004359
4360 struct actlr_setting *actlrs;
4361 u32 actlr_tbl_size;
4362
4363 struct arm_smmu_smr *errata1_clients;
4364 u32 num_errata1_clients;
4365 remote_spinlock_t errata1_lock;
4366 ktime_t last_tlbi_ktime;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004367};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004368#define get_qsmmuv500_archdata(smmu) \
4369 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004370
Patrick Daly1f8a2882016-09-12 17:32:05 -07004371struct qsmmuv500_tbu_device {
4372 struct list_head list;
4373 struct device *dev;
4374 struct arm_smmu_device *smmu;
4375 void __iomem *base;
4376 void __iomem *status_reg;
4377
4378 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004379 u32 sid_start;
4380 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004381
4382 /* Protects halt count */
4383 spinlock_t halt_lock;
4384 u32 halt_count;
4385};
4386
Patrick Daly03330cc2017-08-11 14:56:38 -07004387struct qsmmuv500_group_iommudata {
4388 bool has_actlr;
4389 u32 actlr;
4390};
4391#define to_qsmmuv500_group_iommudata(group) \
4392 ((struct qsmmuv500_group_iommudata *) \
4393 (iommu_group_get_iommudata(group)))
4394
4395
4396static bool arm_smmu_fwspec_match_smr(struct iommu_fwspec *fwspec,
Patrick Dalyda765c62017-09-11 16:31:07 -07004397 struct arm_smmu_smr *smr)
4398{
4399 struct arm_smmu_smr *smr2;
Patrick Daly03330cc2017-08-11 14:56:38 -07004400 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Patrick Dalyda765c62017-09-11 16:31:07 -07004401 int i, idx;
4402
Patrick Daly03330cc2017-08-11 14:56:38 -07004403 for_each_cfg_sme(fwspec, i, idx) {
4404 smr2 = &smmu->smrs[idx];
Patrick Dalyda765c62017-09-11 16:31:07 -07004405 /* Continue if table entry does not match */
4406 if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
4407 continue;
4408 return true;
4409 }
4410 return false;
4411}
4412
4413#define ERRATA1_REMOTE_SPINLOCK "S:6"
4414#define ERRATA1_TLBI_INTERVAL_US 10
4415static bool
4416qsmmuv500_errata1_required(struct arm_smmu_domain *smmu_domain,
4417 struct qsmmuv500_archdata *data)
4418{
4419 bool ret = false;
4420 int j;
4421 struct arm_smmu_smr *smr;
Patrick Daly03330cc2017-08-11 14:56:38 -07004422 struct iommu_fwspec *fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004423
4424 if (smmu_domain->qsmmuv500_errata1_init)
4425 return smmu_domain->qsmmuv500_errata1_client;
4426
Patrick Daly03330cc2017-08-11 14:56:38 -07004427 fwspec = smmu_domain->dev->iommu_fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004428 for (j = 0; j < data->num_errata1_clients; j++) {
4429 smr = &data->errata1_clients[j];
Patrick Daly03330cc2017-08-11 14:56:38 -07004430 if (arm_smmu_fwspec_match_smr(fwspec, smr)) {
Patrick Dalyda765c62017-09-11 16:31:07 -07004431 ret = true;
4432 break;
4433 }
4434 }
4435
4436 smmu_domain->qsmmuv500_errata1_init = true;
4437 smmu_domain->qsmmuv500_errata1_client = ret;
4438 return ret;
4439}
4440
4441static void __qsmmuv500_errata1_tlbiall(struct arm_smmu_domain *smmu_domain)
4442{
4443 struct arm_smmu_device *smmu = smmu_domain->smmu;
4444 struct device *dev = smmu_domain->dev;
4445 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4446 void __iomem *base;
4447 ktime_t cur;
4448 u32 val;
4449
4450 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4451 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
4452 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
4453 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
4454 !(val & TLBSTATUS_SACTIVE), 0, 100)) {
4455 cur = ktime_get();
4456 trace_errata_throttle_start(dev, 0);
4457
4458 msm_bus_noc_throttle_wa(true);
4459 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
4460 !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
4461 dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout");
4462 trace_errata_failed(dev, 0);
4463 }
4464
4465 msm_bus_noc_throttle_wa(false);
4466
4467 trace_errata_throttle_end(
4468 dev, ktime_us_delta(ktime_get(), cur));
4469 }
4470}
4471
4472/* Must be called with clocks/regulators enabled */
4473static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
4474{
4475 struct arm_smmu_domain *smmu_domain = cookie;
4476 struct device *dev = smmu_domain->dev;
4477 struct qsmmuv500_archdata *data =
4478 get_qsmmuv500_archdata(smmu_domain->smmu);
4479 ktime_t cur;
Patrick Daly1faa3112017-10-31 16:40:40 -07004480 unsigned long flags;
Patrick Dalyda765c62017-09-11 16:31:07 -07004481 bool errata;
4482
4483 cur = ktime_get();
4484 trace_errata_tlbi_start(dev, 0);
4485
4486 errata = qsmmuv500_errata1_required(smmu_domain, data);
Patrick Daly1faa3112017-10-31 16:40:40 -07004487 remote_spin_lock_irqsave(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07004488 if (errata) {
4489 s64 delta;
4490
4491 delta = ktime_us_delta(ktime_get(), data->last_tlbi_ktime);
4492 if (delta < ERRATA1_TLBI_INTERVAL_US)
4493 udelay(ERRATA1_TLBI_INTERVAL_US - delta);
4494
4495 __qsmmuv500_errata1_tlbiall(smmu_domain);
4496
4497 data->last_tlbi_ktime = ktime_get();
4498 } else {
4499 __qsmmuv500_errata1_tlbiall(smmu_domain);
4500 }
Patrick Daly1faa3112017-10-31 16:40:40 -07004501 remote_spin_unlock_irqrestore(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07004502
4503 trace_errata_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
4504}
4505
4506static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
4507 .tlb_flush_all = qsmmuv500_errata1_tlb_inv_context,
4508 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
4509 .free_pages_exact = arm_smmu_free_pages_exact,
4510};
4511
Patrick Daly8c1202b2017-05-10 15:42:30 -07004512static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
4513 struct arm_smmu_domain *smmu_domain)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004514{
4515 unsigned long flags;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004516 u32 halt, fsr, sctlr_orig, sctlr, status;
4517 void __iomem *base, *cb_base;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004518
4519 spin_lock_irqsave(&tbu->halt_lock, flags);
4520 if (tbu->halt_count) {
4521 tbu->halt_count++;
4522 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4523 return 0;
4524 }
4525
Patrick Daly8c1202b2017-05-10 15:42:30 -07004526 cb_base = ARM_SMMU_CB_BASE(smmu_domain->smmu) +
4527 ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004528 base = tbu->base;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004529 halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
4530 halt |= DEBUG_SID_HALT_VAL;
4531 writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004532
Patrick Daly8c1202b2017-05-10 15:42:30 -07004533 if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
4534 (status & DEBUG_SR_HALT_ACK_VAL),
4535 0, TBU_DBG_TIMEOUT_US))
4536 goto out;
4537
4538 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4539 if (!(fsr & FSR_FAULT)) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004540 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4541 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4542 return -ETIMEDOUT;
4543 }
4544
Patrick Daly8c1202b2017-05-10 15:42:30 -07004545 /*
4546 * We are in a fault; Our request to halt the bus will not complete
4547 * until transactions in front of us (such as the fault itself) have
4548 * completed. Disable iommu faults and terminate any existing
4549 * transactions.
4550 */
4551 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4552 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4553 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4554
4555 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4556 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4557
4558 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
4559 (status & DEBUG_SR_HALT_ACK_VAL),
4560 0, TBU_DBG_TIMEOUT_US)) {
4561 dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
4562 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4563 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4564 return -ETIMEDOUT;
4565 }
4566
4567 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4568out:
Patrick Daly1f8a2882016-09-12 17:32:05 -07004569 tbu->halt_count = 1;
4570 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4571 return 0;
4572}
4573
4574static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4575{
4576 unsigned long flags;
4577 u32 val;
4578 void __iomem *base;
4579
4580 spin_lock_irqsave(&tbu->halt_lock, flags);
4581 if (!tbu->halt_count) {
4582 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4583 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4584 return;
4585
4586 } else if (tbu->halt_count > 1) {
4587 tbu->halt_count--;
4588 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4589 return;
4590 }
4591
4592 base = tbu->base;
4593 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4594 val &= ~DEBUG_SID_HALT_VAL;
4595 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4596
4597 tbu->halt_count = 0;
4598 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4599}
4600
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004601static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4602 struct arm_smmu_device *smmu, u32 sid)
4603{
4604 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004605 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004606
4607 list_for_each_entry(tbu, &data->tbus, list) {
4608 if (tbu->sid_start <= sid &&
4609 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004610 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004611 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004612 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004613}
4614
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004615static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
4616 struct qsmmuv500_tbu_device *tbu,
4617 unsigned long *flags)
4618{
4619 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004620 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004621 u32 val;
4622
4623 spin_lock_irqsave(&smmu->atos_lock, *flags);
4624 /* The status register is not accessible on version 1.0 */
4625 if (data->version == 0x01000000)
4626 return 0;
4627
4628 if (readl_poll_timeout_atomic(tbu->status_reg,
4629 val, (val == 0x1), 0,
4630 TBU_DBG_TIMEOUT_US)) {
4631 dev_err(tbu->dev, "ECATS hw busy!\n");
4632 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4633 return -ETIMEDOUT;
4634 }
4635
4636 return 0;
4637}
4638
4639static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
4640 struct qsmmuv500_tbu_device *tbu,
4641 unsigned long *flags)
4642{
4643 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004644 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004645
4646 /* The status register is not accessible on version 1.0 */
4647 if (data->version != 0x01000000)
4648 writel_relaxed(0, tbu->status_reg);
4649 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4650}
4651
4652/*
4653 * Zero means failure.
4654 */
4655static phys_addr_t qsmmuv500_iova_to_phys(
4656 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
4657{
4658 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4659 struct arm_smmu_device *smmu = smmu_domain->smmu;
4660 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4661 struct qsmmuv500_tbu_device *tbu;
4662 int ret;
4663 phys_addr_t phys = 0;
4664 u64 val, fsr;
4665 unsigned long flags;
4666 void __iomem *cb_base;
4667 u32 sctlr_orig, sctlr;
4668 int needs_redo = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004669 ktime_t timeout;
4670
4671 /* only 36 bit iova is supported */
4672 if (iova >= (1ULL << 36)) {
4673 dev_err_ratelimited(smmu->dev, "ECATS: address too large: %pad\n",
4674 &iova);
4675 return 0;
4676 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004677
4678 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4679 tbu = qsmmuv500_find_tbu(smmu, sid);
4680 if (!tbu)
4681 return 0;
4682
4683 ret = arm_smmu_power_on(tbu->pwr);
4684 if (ret)
4685 return 0;
4686
Patrick Daly8c1202b2017-05-10 15:42:30 -07004687 ret = qsmmuv500_tbu_halt(tbu, smmu_domain);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004688 if (ret)
4689 goto out_power_off;
4690
Patrick Daly8c1202b2017-05-10 15:42:30 -07004691 /*
4692 * ECATS can trigger the fault interrupt, so disable it temporarily
4693 * and check for an interrupt manually.
4694 */
4695 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4696 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4697 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4698
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004699 /* Only one concurrent atos operation */
4700 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
4701 if (ret)
4702 goto out_resume;
4703
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004704redo:
4705 /* Set address and stream-id */
4706 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
4707 val |= sid & DEBUG_SID_HALT_SID_MASK;
4708 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
4709 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
4710
4711 /*
4712 * Write-back Read and Write-Allocate
4713 * Priviledged, nonsecure, data transaction
4714 * Read operation.
4715 */
4716 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
4717 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
4718 val |= DEBUG_TXN_TRIGGER;
4719 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
4720
4721 ret = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004722 //based on readx_poll_timeout_atomic
4723 timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
4724 for (;;) {
4725 val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
4726 if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
4727 break;
4728 val = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4729 if (val & FSR_FAULT)
4730 break;
4731 if (ktime_compare(ktime_get(), timeout) > 0) {
4732 dev_err(tbu->dev, "ECATS translation timed out!\n");
4733 ret = -ETIMEDOUT;
4734 break;
4735 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004736 }
4737
4738 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4739 if (fsr & FSR_FAULT) {
4740 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
Patrick Daly8c1202b2017-05-10 15:42:30 -07004741 fsr);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004742 ret = -EINVAL;
4743
4744 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
4745 /*
4746 * Clear pending interrupts
4747 * Barrier required to ensure that the FSR is cleared
4748 * before resuming SMMU operation
4749 */
4750 wmb();
4751 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4752 }
4753
4754 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
4755 if (val & DEBUG_PAR_FAULT_VAL) {
4756 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
4757 val);
4758 ret = -EINVAL;
4759 }
4760
4761 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
4762 if (ret < 0)
4763 phys = 0;
4764
4765 /* Reset hardware */
4766 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
4767 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
4768
4769 /*
4770 * After a failed translation, the next successful translation will
4771 * incorrectly be reported as a failure.
4772 */
4773 if (!phys && needs_redo++ < 2)
4774 goto redo;
4775
4776 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4777 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
4778
4779out_resume:
4780 qsmmuv500_tbu_resume(tbu);
4781
4782out_power_off:
4783 arm_smmu_power_off(tbu->pwr);
4784
4785 return phys;
4786}
4787
4788static phys_addr_t qsmmuv500_iova_to_phys_hard(
4789 struct iommu_domain *domain, dma_addr_t iova)
4790{
4791 u16 sid;
4792 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4793 struct iommu_fwspec *fwspec;
4794
4795 /* Select a sid */
4796 fwspec = smmu_domain->dev->iommu_fwspec;
4797 sid = (u16)fwspec->ids[0];
4798
4799 return qsmmuv500_iova_to_phys(domain, iova, sid);
4800}
4801
Patrick Daly03330cc2017-08-11 14:56:38 -07004802static void qsmmuv500_release_group_iommudata(void *data)
4803{
4804 kfree(data);
4805}
4806
4807/* If a device has a valid actlr, it must match */
4808static int qsmmuv500_device_group(struct device *dev,
4809 struct iommu_group *group)
4810{
4811 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
4812 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
4813 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4814 struct qsmmuv500_group_iommudata *iommudata;
4815 u32 actlr, i;
4816 struct arm_smmu_smr *smr;
4817
4818 iommudata = to_qsmmuv500_group_iommudata(group);
4819 if (!iommudata) {
4820 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
4821 if (!iommudata)
4822 return -ENOMEM;
4823
4824 iommu_group_set_iommudata(group, iommudata,
4825 qsmmuv500_release_group_iommudata);
4826 }
4827
4828 for (i = 0; i < data->actlr_tbl_size; i++) {
4829 smr = &data->actlrs[i].smr;
4830 actlr = data->actlrs[i].actlr;
4831
4832 if (!arm_smmu_fwspec_match_smr(fwspec, smr))
4833 continue;
4834
4835 if (!iommudata->has_actlr) {
4836 iommudata->actlr = actlr;
4837 iommudata->has_actlr = true;
4838 } else if (iommudata->actlr != actlr) {
4839 return -EINVAL;
4840 }
4841 }
4842
4843 return 0;
4844}
4845
4846static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
4847 struct device *dev)
4848{
4849 struct arm_smmu_device *smmu = smmu_domain->smmu;
4850 struct qsmmuv500_group_iommudata *iommudata =
4851 to_qsmmuv500_group_iommudata(dev->iommu_group);
4852 void __iomem *cb_base;
4853 const struct iommu_gather_ops *tlb;
4854
4855 if (!iommudata->has_actlr)
4856 return;
4857
4858 tlb = smmu_domain->pgtbl_cfg.tlb;
4859 cb_base = ARM_SMMU_CB_BASE(smmu) +
4860 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
4861
4862 writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR);
4863
4864 /*
Patrick Daly23301482017-10-12 16:18:25 -07004865 * Prefetch only works properly if the start and end of all
4866 * buffers in the page table are aligned to 16 Kb.
4867 */
Patrick Daly27bd9292017-11-22 13:59:59 -08004868 if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
Patrick Daly23301482017-10-12 16:18:25 -07004869 QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
4870 smmu_domain->qsmmuv500_errata2_min_align = true;
4871
4872 /*
Patrick Daly03330cc2017-08-11 14:56:38 -07004873 * Flush the context bank after modifying ACTLR to ensure there
4874 * are no cache entries with stale state
4875 */
4876 tlb->tlb_flush_all(smmu_domain);
4877}
4878
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004879static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004880{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004881 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004882 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004883 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004884
4885 if (!dev->driver) {
4886 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4887 return -EINVAL;
4888 }
4889
4890 tbu = dev_get_drvdata(dev);
4891
4892 INIT_LIST_HEAD(&tbu->list);
4893 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004894 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004895 return 0;
4896}
4897
Patrick Dalyda765c62017-09-11 16:31:07 -07004898static int qsmmuv500_parse_errata1(struct arm_smmu_device *smmu)
4899{
4900 int len, i;
4901 struct device *dev = smmu->dev;
4902 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4903 struct arm_smmu_smr *smrs;
4904 const __be32 *cell;
4905
4906 cell = of_get_property(dev->of_node, "qcom,mmu500-errata-1", NULL);
4907 if (!cell)
4908 return 0;
4909
4910 remote_spin_lock_init(&data->errata1_lock, ERRATA1_REMOTE_SPINLOCK);
4911 len = of_property_count_elems_of_size(
4912 dev->of_node, "qcom,mmu500-errata-1", sizeof(u32) * 2);
4913 if (len < 0)
4914 return 0;
4915
4916 smrs = devm_kzalloc(dev, sizeof(*smrs) * len, GFP_KERNEL);
4917 if (!smrs)
4918 return -ENOMEM;
4919
4920 for (i = 0; i < len; i++) {
4921 smrs[i].id = of_read_number(cell++, 1);
4922 smrs[i].mask = of_read_number(cell++, 1);
4923 }
4924
4925 data->errata1_clients = smrs;
4926 data->num_errata1_clients = len;
4927 return 0;
4928}
4929
Patrick Daly03330cc2017-08-11 14:56:38 -07004930static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
4931{
4932 int len, i;
4933 struct device *dev = smmu->dev;
4934 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4935 struct actlr_setting *actlrs;
4936 const __be32 *cell;
4937
4938 cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
4939 if (!cell)
4940 return 0;
4941
4942 len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
4943 sizeof(u32) * 3);
4944 if (len < 0)
4945 return 0;
4946
4947 actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
4948 if (!actlrs)
4949 return -ENOMEM;
4950
4951 for (i = 0; i < len; i++) {
4952 actlrs[i].smr.id = of_read_number(cell++, 1);
4953 actlrs[i].smr.mask = of_read_number(cell++, 1);
4954 actlrs[i].actlr = of_read_number(cell++, 1);
4955 }
4956
4957 data->actlrs = actlrs;
4958 data->actlr_tbl_size = len;
4959 return 0;
4960}
4961
Patrick Daly1f8a2882016-09-12 17:32:05 -07004962static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4963{
Patrick Dalya0fddb62017-03-27 19:26:59 -07004964 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004965 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004966 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004967 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004968 int ret;
Patrick Daly03330cc2017-08-11 14:56:38 -07004969 u32 val;
4970 void __iomem *reg;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004971
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004972 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
4973 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004974 return -ENOMEM;
4975
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004976 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07004977
4978 pdev = container_of(dev, struct platform_device, dev);
4979 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
4980 data->tcu_base = devm_ioremap_resource(dev, res);
4981 if (IS_ERR(data->tcu_base))
4982 return PTR_ERR(data->tcu_base);
4983
4984 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004985 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004986
Patrick Dalyda765c62017-09-11 16:31:07 -07004987 ret = qsmmuv500_parse_errata1(smmu);
4988 if (ret)
4989 return ret;
4990
Patrick Daly03330cc2017-08-11 14:56:38 -07004991 ret = qsmmuv500_read_actlr_tbl(smmu);
4992 if (ret)
4993 return ret;
4994
4995 reg = ARM_SMMU_GR0(smmu);
4996 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
4997 val &= ~ARM_MMU500_ACR_CACHE_LOCK;
4998 writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
4999 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5000 /*
5001 * Modifiying the nonsecure copy of the sACR register is only
5002 * allowed if permission is given in the secure sACR register.
5003 * Attempt to detect if we were able to update the value.
5004 */
5005 WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
5006
Patrick Daly1f8a2882016-09-12 17:32:05 -07005007 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
5008 if (ret)
5009 return ret;
5010
5011 /* Attempt to register child devices */
5012 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
5013 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07005014 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005015
5016 return 0;
5017}
5018
5019struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
5020 .init = qsmmuv500_arch_init,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005021 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly03330cc2017-08-11 14:56:38 -07005022 .init_context_bank = qsmmuv500_init_cb,
5023 .device_group = qsmmuv500_device_group,
Patrick Daly1f8a2882016-09-12 17:32:05 -07005024};
5025
5026static const struct of_device_id qsmmuv500_tbu_of_match[] = {
5027 {.compatible = "qcom,qsmmuv500-tbu"},
5028 {}
5029};
5030
5031static int qsmmuv500_tbu_probe(struct platform_device *pdev)
5032{
5033 struct resource *res;
5034 struct device *dev = &pdev->dev;
5035 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005036 const __be32 *cell;
5037 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005038
5039 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
5040 if (!tbu)
5041 return -ENOMEM;
5042
5043 INIT_LIST_HEAD(&tbu->list);
5044 tbu->dev = dev;
5045 spin_lock_init(&tbu->halt_lock);
5046
5047 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
5048 tbu->base = devm_ioremap_resource(dev, res);
5049 if (IS_ERR(tbu->base))
5050 return PTR_ERR(tbu->base);
5051
5052 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
5053 tbu->status_reg = devm_ioremap_resource(dev, res);
5054 if (IS_ERR(tbu->status_reg))
5055 return PTR_ERR(tbu->status_reg);
5056
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005057 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
5058 if (!cell || len < 8)
5059 return -EINVAL;
5060
5061 tbu->sid_start = of_read_number(cell, 1);
5062 tbu->num_sids = of_read_number(cell + 1, 1);
5063
Patrick Daly1f8a2882016-09-12 17:32:05 -07005064 tbu->pwr = arm_smmu_init_power_resources(pdev);
5065 if (IS_ERR(tbu->pwr))
5066 return PTR_ERR(tbu->pwr);
5067
5068 dev_set_drvdata(dev, tbu);
5069 return 0;
5070}
5071
5072static struct platform_driver qsmmuv500_tbu_driver = {
5073 .driver = {
5074 .name = "qsmmuv500-tbu",
5075 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
5076 },
5077 .probe = qsmmuv500_tbu_probe,
5078};
5079
Will Deacon45ae7cf2013-06-24 18:31:25 +01005080MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
5081MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
5082MODULE_LICENSE("GPL v2");