blob: 6324728b71e685170b56147ff34477cd12b0e537 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Daly86960052017-12-04 18:53:13 -080050#include <soc/qcom/scm.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070051#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070052#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070053#include <linux/msm-bus.h>
54#include <dt-bindings/msm/msm-bus-ids.h>
Patrick Dalyda765c62017-09-11 16:31:07 -070055#include <linux/remote_spinlock.h>
56#include <linux/ktime.h>
57#include <trace/events/iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
59#include <linux/amba/bus.h>
60
Will Deacon518f7132014-11-14 17:17:54 +000061#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010062
Will Deacon45ae7cf2013-06-24 18:31:25 +010063/* Maximum number of context banks per SMMU */
64#define ARM_SMMU_MAX_CBS 128
65
Will Deacon45ae7cf2013-06-24 18:31:25 +010066/* SMMU global address space */
67#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010068#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010069
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000070/*
71 * SMMU global address space with conditional offset to access secure
72 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
73 * nsGFSYNR0: 0x450)
74 */
75#define ARM_SMMU_GR0_NS(smmu) \
76 ((smmu)->base + \
77 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
78 ? 0x400 : 0))
79
Robin Murphyf9a05f02016-04-13 18:13:01 +010080/*
81 * Some 64-bit registers only make sense to write atomically, but in such
82 * cases all the data relevant to AArch32 formats lies within the lower word,
83 * therefore this actually makes more sense than it might first appear.
84 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010086#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010087#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010088#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010089#endif
90
Will Deacon45ae7cf2013-06-24 18:31:25 +010091/* Configuration registers */
92#define ARM_SMMU_GR0_sCR0 0x0
93#define sCR0_CLIENTPD (1 << 0)
94#define sCR0_GFRE (1 << 1)
95#define sCR0_GFIE (1 << 2)
96#define sCR0_GCFGFRE (1 << 4)
97#define sCR0_GCFGFIE (1 << 5)
98#define sCR0_USFCFG (1 << 10)
99#define sCR0_VMIDPNE (1 << 11)
100#define sCR0_PTM (1 << 12)
101#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800102#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100103#define sCR0_BSU_SHIFT 14
104#define sCR0_BSU_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700105#define sCR0_SHCFG_SHIFT 22
106#define sCR0_SHCFG_MASK 0x3
107#define sCR0_SHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100108
Peng Fan3ca37122016-05-03 21:50:30 +0800109/* Auxiliary Configuration register */
110#define ARM_SMMU_GR0_sACR 0x10
111
Will Deacon45ae7cf2013-06-24 18:31:25 +0100112/* Identification registers */
113#define ARM_SMMU_GR0_ID0 0x20
114#define ARM_SMMU_GR0_ID1 0x24
115#define ARM_SMMU_GR0_ID2 0x28
116#define ARM_SMMU_GR0_ID3 0x2c
117#define ARM_SMMU_GR0_ID4 0x30
118#define ARM_SMMU_GR0_ID5 0x34
119#define ARM_SMMU_GR0_ID6 0x38
120#define ARM_SMMU_GR0_ID7 0x3c
121#define ARM_SMMU_GR0_sGFSR 0x48
122#define ARM_SMMU_GR0_sGFSYNR0 0x50
123#define ARM_SMMU_GR0_sGFSYNR1 0x54
124#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125
126#define ID0_S1TS (1 << 30)
127#define ID0_S2TS (1 << 29)
128#define ID0_NTS (1 << 28)
129#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000130#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100131#define ID0_PTFS_NO_AARCH32 (1 << 25)
132#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100133#define ID0_CTTW (1 << 14)
134#define ID0_NUMIRPT_SHIFT 16
135#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700136#define ID0_NUMSIDB_SHIFT 9
137#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100138#define ID0_NUMSMRG_SHIFT 0
139#define ID0_NUMSMRG_MASK 0xff
140
141#define ID1_PAGESIZE (1 << 31)
142#define ID1_NUMPAGENDXB_SHIFT 28
143#define ID1_NUMPAGENDXB_MASK 7
144#define ID1_NUMS2CB_SHIFT 16
145#define ID1_NUMS2CB_MASK 0xff
146#define ID1_NUMCB_SHIFT 0
147#define ID1_NUMCB_MASK 0xff
148
149#define ID2_OAS_SHIFT 4
150#define ID2_OAS_MASK 0xf
151#define ID2_IAS_SHIFT 0
152#define ID2_IAS_MASK 0xf
153#define ID2_UBS_SHIFT 8
154#define ID2_UBS_MASK 0xf
155#define ID2_PTFS_4K (1 << 12)
156#define ID2_PTFS_16K (1 << 13)
157#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800158#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100159
Peng Fan3ca37122016-05-03 21:50:30 +0800160#define ID7_MAJOR_SHIFT 4
161#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100162
Will Deacon45ae7cf2013-06-24 18:31:25 +0100163/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164#define ARM_SMMU_GR0_TLBIVMID 0x64
165#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
166#define ARM_SMMU_GR0_TLBIALLH 0x6c
167#define ARM_SMMU_GR0_sTLBGSYNC 0x70
168#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
169#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800170#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100171
172/* Stream mapping registers */
173#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
174#define SMR_VALID (1 << 31)
175#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700176#define SMR_MASK_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100177#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100178
179#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
180#define S2CR_CBNDX_SHIFT 0
181#define S2CR_CBNDX_MASK 0xff
182#define S2CR_TYPE_SHIFT 16
183#define S2CR_TYPE_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700184#define S2CR_SHCFG_SHIFT 8
185#define S2CR_SHCFG_MASK 0x3
186#define S2CR_SHCFG_NSH 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100187enum arm_smmu_s2cr_type {
188 S2CR_TYPE_TRANS,
189 S2CR_TYPE_BYPASS,
190 S2CR_TYPE_FAULT,
191};
192
193#define S2CR_PRIVCFG_SHIFT 24
194#define S2CR_PRIVCFG_MASK 0x3
195enum arm_smmu_s2cr_privcfg {
196 S2CR_PRIVCFG_DEFAULT,
197 S2CR_PRIVCFG_DIPAN,
198 S2CR_PRIVCFG_UNPRIV,
199 S2CR_PRIVCFG_PRIV,
200};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100201
202/* Context bank attribute registers */
203#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
204#define CBAR_VMID_SHIFT 0
205#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000206#define CBAR_S1_BPSHCFG_SHIFT 8
207#define CBAR_S1_BPSHCFG_MASK 3
208#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100209#define CBAR_S1_MEMATTR_SHIFT 12
210#define CBAR_S1_MEMATTR_MASK 0xf
211#define CBAR_S1_MEMATTR_WB 0xf
212#define CBAR_TYPE_SHIFT 16
213#define CBAR_TYPE_MASK 0x3
214#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
215#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
216#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
217#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
218#define CBAR_IRPTNDX_SHIFT 24
219#define CBAR_IRPTNDX_MASK 0xff
220
Shalaj Jain04059c52015-03-03 13:34:59 -0800221#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
222#define CBFRSYNRA_SID_MASK (0xffff)
223
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
225#define CBA2R_RW64_32BIT (0 << 0)
226#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800227#define CBA2R_VMID_SHIFT 16
228#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229
230/* Translation context bank */
231#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100232#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233
234#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100235#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100236#define ARM_SMMU_CB_RESUME 0x8
237#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100238#define ARM_SMMU_CB_TTBR0 0x20
239#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100240#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600241#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100242#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000243#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100244#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100245#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700246#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100247#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000249#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100250#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700251#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000252#define ARM_SMMU_CB_S1_TLBIVAL 0x620
253#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
254#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700255#define ARM_SMMU_CB_TLBSYNC 0x7f0
256#define ARM_SMMU_CB_TLBSTATUS 0x7f4
257#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100258#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000259#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100260
Patrick Daly7f377fe2017-10-06 17:37:10 -0700261#define SCTLR_SHCFG_SHIFT 22
262#define SCTLR_SHCFG_MASK 0x3
263#define SCTLR_SHCFG_NSH 0x3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264#define SCTLR_S1_ASIDPNE (1 << 12)
265#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530266#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267#define SCTLR_CFIE (1 << 6)
268#define SCTLR_CFRE (1 << 5)
269#define SCTLR_E (1 << 4)
270#define SCTLR_AFE (1 << 2)
271#define SCTLR_TRE (1 << 1)
272#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100274#define ARM_MMU500_ACTLR_CPRE (1 << 1)
275
Peng Fan3ca37122016-05-03 21:50:30 +0800276#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
277
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700278#define ARM_SMMU_IMPL_DEF0(smmu) \
279 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
280#define ARM_SMMU_IMPL_DEF1(smmu) \
281 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000282#define CB_PAR_F (1 << 0)
283
284#define ATSR_ACTIVE (1 << 0)
285
Will Deacon45ae7cf2013-06-24 18:31:25 +0100286#define RESUME_RETRY (0 << 0)
287#define RESUME_TERMINATE (1 << 0)
288
Will Deacon45ae7cf2013-06-24 18:31:25 +0100289#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100290#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100291
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100292#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293
294#define FSR_MULTI (1 << 31)
295#define FSR_SS (1 << 30)
296#define FSR_UUT (1 << 8)
297#define FSR_ASF (1 << 7)
298#define FSR_TLBLKF (1 << 6)
299#define FSR_TLBMCF (1 << 5)
300#define FSR_EF (1 << 4)
301#define FSR_PF (1 << 3)
302#define FSR_AFF (1 << 2)
303#define FSR_TF (1 << 1)
304
Mitchel Humpherys29073202014-07-08 09:52:18 -0700305#define FSR_IGN (FSR_AFF | FSR_ASF | \
306 FSR_TLBMCF | FSR_TLBLKF)
307#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100308 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100309
310#define FSYNR0_WNR (1 << 4)
311
Will Deacon4cf740b2014-07-14 19:47:39 +0100312static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000313module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100314MODULE_PARM_DESC(force_stage,
315 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800316static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000317module_param(disable_bypass, bool, S_IRUGO);
318MODULE_PARM_DESC(disable_bypass,
319 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100320
Robin Murphy09360402014-08-28 17:51:59 +0100321enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100322 ARM_SMMU_V1,
323 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100324 ARM_SMMU_V2,
325};
326
Robin Murphy67b65a32016-04-13 18:12:57 +0100327enum arm_smmu_implementation {
328 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100329 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100330 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700331 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700332 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100333};
334
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700335struct arm_smmu_impl_def_reg {
336 u32 offset;
337 u32 value;
338};
339
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700340/*
341 * attach_count
342 * The SMR and S2CR registers are only programmed when the number of
343 * devices attached to the iommu using these registers is > 0. This
344 * is required for the "SID switch" use case for secure display.
345 * Protected by stream_map_mutex.
346 */
Robin Murphya754fd12016-09-12 17:13:50 +0100347struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100348 struct iommu_group *group;
349 int count;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700350 int attach_count;
Robin Murphya754fd12016-09-12 17:13:50 +0100351 enum arm_smmu_s2cr_type type;
352 enum arm_smmu_s2cr_privcfg privcfg;
353 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700354 bool cb_handoff;
Robin Murphya754fd12016-09-12 17:13:50 +0100355};
356
357#define s2cr_init_val (struct arm_smmu_s2cr){ \
358 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700359 .cb_handoff = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100360}
361
Will Deacon45ae7cf2013-06-24 18:31:25 +0100362struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100363 u16 mask;
364 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100365 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100366};
367
Will Deacona9a1b0b2014-05-01 18:05:08 +0100368struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100369 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100370 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371};
Robin Murphy468f4942016-09-12 17:13:49 +0100372#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100373#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
374#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000375#define fwspec_smendx(fw, i) \
376 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100377#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000378 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100379
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700380/*
381 * Describes resources required for on/off power operation.
382 * Separate reference count is provided for atomic/nonatomic
383 * operations.
384 */
385struct arm_smmu_power_resources {
386 struct platform_device *pdev;
387 struct device *dev;
388
389 struct clk **clocks;
390 int num_clocks;
391
392 struct regulator_bulk_data *gdscs;
393 int num_gdscs;
394
395 uint32_t bus_client;
396 struct msm_bus_scale_pdata *bus_dt_data;
397
398 /* Protects power_count */
399 struct mutex power_lock;
400 int power_count;
401
402 /* Protects clock_refs_count */
403 spinlock_t clock_refs_lock;
404 int clock_refs_count;
Prakash Guptafad87ca2017-05-16 12:13:02 +0530405 int regulator_defer;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700406};
407
Patrick Daly03330cc2017-08-11 14:56:38 -0700408struct arm_smmu_arch_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100409struct arm_smmu_device {
410 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411
412 void __iomem *base;
413 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100414 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100415
416#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
417#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
418#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
419#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
420#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000421#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800422#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100423#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
424#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
425#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
426#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
427#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100428 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000429
430#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800431#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Patrick Daly59b6d202017-06-12 13:12:15 -0700432#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700433#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Patrick Daly4423d3e2017-05-04 18:17:51 -0700434#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
Patrick Dalye7069342017-07-11 12:35:55 -0700435#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
Patrick Daly62ba1922017-08-30 16:47:18 -0700436#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 6)
Patrick Daly83174c12017-10-26 12:31:15 -0700437#define ARM_SMMU_OPT_MMU500_ERRATA1 (1 << 7)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000438 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100439 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100440 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100441
442 u32 num_context_banks;
443 u32 num_s2_context_banks;
444 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
445 atomic_t irptndx;
446
447 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100448 u16 streamid_mask;
449 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100450 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100451 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100452 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100453
Will Deacon518f7132014-11-14 17:17:54 +0000454 unsigned long va_size;
455 unsigned long ipa_size;
456 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100457 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100458
459 u32 num_global_irqs;
460 u32 num_context_irqs;
461 unsigned int *irqs;
462
Patrick Daly8e3371a2017-02-13 22:14:53 -0800463 struct list_head list;
464
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800465 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700466 /* Specific to QCOM */
467 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
468 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800469
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700470 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700471
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800472 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700473
474 /* protects idr */
475 struct mutex idr_mutex;
476 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700477
478 struct arm_smmu_arch_ops *arch_ops;
479 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480};
481
Robin Murphy7602b872016-04-28 17:12:09 +0100482enum arm_smmu_context_fmt {
483 ARM_SMMU_CTX_FMT_NONE,
484 ARM_SMMU_CTX_FMT_AARCH64,
485 ARM_SMMU_CTX_FMT_AARCH32_L,
486 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100487};
488
489struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100490 u8 cbndx;
491 u8 irptndx;
492 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600493 u32 procid;
494 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100495 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100496};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100497#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600498#define INVALID_CBNDX 0xff
499#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700500/*
501 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
502 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
503 */
504#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100505
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600506#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800507#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100508
Will Deaconc752ce42014-06-25 22:46:31 +0100509enum arm_smmu_domain_stage {
510 ARM_SMMU_DOMAIN_S1 = 0,
511 ARM_SMMU_DOMAIN_S2,
512 ARM_SMMU_DOMAIN_NESTED,
513};
514
Patrick Dalyc11d1082016-09-01 15:52:44 -0700515struct arm_smmu_pte_info {
516 void *virt_addr;
517 size_t size;
518 struct list_head entry;
519};
520
Will Deacon45ae7cf2013-06-24 18:31:25 +0100521struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100522 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800523 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000524 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700525 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000526 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100527 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100528 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000529 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700530 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700531 u32 secure_vmid;
532 struct list_head pte_info_list;
533 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700534 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700535 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100536 struct iommu_domain domain;
Patrick Dalyda765c62017-09-11 16:31:07 -0700537
538 bool qsmmuv500_errata1_init;
539 bool qsmmuv500_errata1_client;
Patrick Daly23301482017-10-12 16:18:25 -0700540 bool qsmmuv500_errata2_min_align;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100541};
542
Patrick Daly8e3371a2017-02-13 22:14:53 -0800543static DEFINE_SPINLOCK(arm_smmu_devices_lock);
544static LIST_HEAD(arm_smmu_devices);
545
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000546struct arm_smmu_option_prop {
547 u32 opt;
548 const char *prop;
549};
550
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800551static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
552
Robin Murphy7e96c742016-09-14 15:26:46 +0100553static bool using_legacy_binding, using_generic_binding;
554
Mitchel Humpherys29073202014-07-08 09:52:18 -0700555static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000556 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800557 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700558 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700559 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700560 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700561 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Patrick Daly62ba1922017-08-30 16:47:18 -0700562 { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
Patrick Daly83174c12017-10-26 12:31:15 -0700563 { ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000564 { 0, NULL},
565};
566
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800567static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
568 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700569static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
570 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600571static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800572
Patrick Dalyc11d1082016-09-01 15:52:44 -0700573static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
574static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700575static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700576static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
577
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700578static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
579 dma_addr_t iova);
580
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800581static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
582
Patrick Dalyda688822017-05-17 20:12:48 -0700583static int arm_smmu_alloc_cb(struct iommu_domain *domain,
584 struct arm_smmu_device *smmu,
585 struct device *dev);
Patrick Dalyda765c62017-09-11 16:31:07 -0700586static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops;
Patrick Dalyda688822017-05-17 20:12:48 -0700587
Joerg Roedel1d672632015-03-26 13:43:10 +0100588static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
589{
590 return container_of(dom, struct arm_smmu_domain, domain);
591}
592
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000593static void parse_driver_options(struct arm_smmu_device *smmu)
594{
595 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700596
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000597 do {
598 if (of_property_read_bool(smmu->dev->of_node,
599 arm_smmu_options[i].prop)) {
600 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700601 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000602 arm_smmu_options[i].prop);
603 }
604 } while (arm_smmu_options[++i].opt);
605}
606
Patrick Dalyc190d932016-08-30 17:23:28 -0700607static bool is_dynamic_domain(struct iommu_domain *domain)
608{
609 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
610
611 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
612}
613
Liam Mark53cf2342016-12-20 11:36:07 -0800614static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
615{
616 if (smmu_domain->attributes &
617 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
618 return true;
619 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
620 return smmu_domain->smmu->dev->archdata.dma_coherent;
621 else
622 return false;
623}
624
Patrick Dalye271f212016-10-04 13:24:49 -0700625static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
626{
627 return (smmu_domain->secure_vmid != VMID_INVAL);
628}
629
630static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
631{
632 if (arm_smmu_is_domain_secure(smmu_domain))
633 mutex_lock(&smmu_domain->assign_lock);
634}
635
636static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
637{
638 if (arm_smmu_is_domain_secure(smmu_domain))
639 mutex_unlock(&smmu_domain->assign_lock);
640}
641
Patrick Daly03330cc2017-08-11 14:56:38 -0700642/*
643 * init()
644 * Hook for additional device tree parsing at probe time.
645 *
646 * device_reset()
647 * Hook for one-time architecture-specific register settings.
648 *
649 * iova_to_phys_hard()
650 * Provides debug information. May be called from the context fault irq handler.
651 *
652 * init_context_bank()
653 * Hook for architecture-specific settings which require knowledge of the
654 * dynamically allocated context bank number.
655 *
656 * device_group()
657 * Hook for checking whether a device is compatible with a said group.
658 */
659struct arm_smmu_arch_ops {
660 int (*init)(struct arm_smmu_device *smmu);
661 void (*device_reset)(struct arm_smmu_device *smmu);
662 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
663 dma_addr_t iova);
664 void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
665 struct device *dev);
666 int (*device_group)(struct device *dev, struct iommu_group *group);
667};
668
669static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
670{
671 if (!smmu->arch_ops)
672 return 0;
673 if (!smmu->arch_ops->init)
674 return 0;
675 return smmu->arch_ops->init(smmu);
676}
677
678static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
679{
680 if (!smmu->arch_ops)
681 return;
682 if (!smmu->arch_ops->device_reset)
683 return;
684 return smmu->arch_ops->device_reset(smmu);
685}
686
687static void arm_smmu_arch_init_context_bank(
688 struct arm_smmu_domain *smmu_domain, struct device *dev)
689{
690 struct arm_smmu_device *smmu = smmu_domain->smmu;
691
692 if (!smmu->arch_ops)
693 return;
694 if (!smmu->arch_ops->init_context_bank)
695 return;
696 return smmu->arch_ops->init_context_bank(smmu_domain, dev);
697}
698
699static int arm_smmu_arch_device_group(struct device *dev,
700 struct iommu_group *group)
701{
702 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
703 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
704
705 if (!smmu->arch_ops)
706 return 0;
707 if (!smmu->arch_ops->device_group)
708 return 0;
709 return smmu->arch_ops->device_group(dev, group);
710}
711
Will Deacon8f68f8e2014-07-15 11:27:08 +0100712static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100713{
714 if (dev_is_pci(dev)) {
715 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700716
Will Deacona9a1b0b2014-05-01 18:05:08 +0100717 while (!pci_is_root_bus(bus))
718 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100719 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100720 }
721
Robin Murphyd5b41782016-09-14 15:21:39 +0100722 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100723}
724
Robin Murphyd5b41782016-09-14 15:21:39 +0100725static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100726{
Robin Murphyd5b41782016-09-14 15:21:39 +0100727 *((__be32 *)data) = cpu_to_be32(alias);
728 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100729}
730
Robin Murphyd5b41782016-09-14 15:21:39 +0100731static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100732{
Robin Murphyd5b41782016-09-14 15:21:39 +0100733 struct of_phandle_iterator *it = *(void **)data;
734 struct device_node *np = it->node;
735 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100736
Robin Murphyd5b41782016-09-14 15:21:39 +0100737 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
738 "#stream-id-cells", 0)
739 if (it->node == np) {
740 *(void **)data = dev;
741 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700742 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100743 it->node = np;
744 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100745}
746
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100747static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100748static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100749
Robin Murphy06e393e2016-09-12 17:13:55 +0100750static int arm_smmu_register_legacy_master(struct device *dev,
751 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100752{
Robin Murphy06e393e2016-09-12 17:13:55 +0100753 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100754 struct device_node *np;
755 struct of_phandle_iterator it;
756 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100757 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100758 __be32 pci_sid;
759 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100760
Stephen Boydfecdeef2017-03-01 16:53:19 -0800761 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100762 np = dev_get_dev_node(dev);
763 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
764 of_node_put(np);
765 return -ENODEV;
766 }
767
768 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100769 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
770 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100771 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100772 of_node_put(np);
773 if (err == 0)
774 return -ENODEV;
775 if (err < 0)
776 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100777
Robin Murphyd5b41782016-09-14 15:21:39 +0100778 if (dev_is_pci(dev)) {
779 /* "mmu-masters" assumes Stream ID == Requester ID */
780 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
781 &pci_sid);
782 it.cur = &pci_sid;
783 it.cur_count = 1;
784 }
785
Robin Murphy06e393e2016-09-12 17:13:55 +0100786 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
787 &arm_smmu_ops);
788 if (err)
789 return err;
790
791 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
792 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100793 return -ENOMEM;
794
Robin Murphy06e393e2016-09-12 17:13:55 +0100795 *smmu = dev_get_drvdata(smmu_dev);
796 of_phandle_iterator_args(&it, sids, it.cur_count);
797 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
798 kfree(sids);
799 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100800}
801
802static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
803{
804 int idx;
805
806 do {
807 idx = find_next_zero_bit(map, end, start);
808 if (idx == end)
809 return -ENOSPC;
810 } while (test_and_set_bit(idx, map));
811
812 return idx;
813}
814
815static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
816{
817 clear_bit(idx, map);
818}
819
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700820static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700821{
822 int i, ret = 0;
823
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700824 for (i = 0; i < pwr->num_clocks; ++i) {
825 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700826 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700827 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700828 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700829 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700830 break;
831 }
832 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700833 return ret;
834}
835
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700836static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700837{
838 int i;
839
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700840 for (i = pwr->num_clocks; i; --i)
841 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700842}
843
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700844static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700845{
846 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700847
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700848 for (i = 0; i < pwr->num_clocks; ++i) {
849 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700850 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700851 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700852 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700853 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700854 break;
855 }
856 }
857
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700858 return ret;
859}
Patrick Daly8befb662016-08-17 20:03:28 -0700860
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700861static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
862{
863 int i;
864
865 for (i = pwr->num_clocks; i; --i)
866 clk_disable(pwr->clocks[i - 1]);
867}
868
869static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
870{
871 if (!pwr->bus_client)
872 return 0;
873 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
874}
875
876static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
877{
878 if (!pwr->bus_client)
879 return;
880 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
881}
882
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700883static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
884{
885 struct regulator_bulk_data *consumers;
886 int num_consumers, ret;
887 int i;
888
889 num_consumers = pwr->num_gdscs;
890 consumers = pwr->gdscs;
891 for (i = 0; i < num_consumers; i++) {
892 ret = regulator_enable(consumers[i].consumer);
893 if (ret)
894 goto out;
895 }
896 return 0;
897
898out:
899 i -= 1;
900 for (; i >= 0; i--)
901 regulator_disable(consumers[i].consumer);
902 return ret;
903}
904
Prakash Guptafad87ca2017-05-16 12:13:02 +0530905static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
906{
907 struct regulator_bulk_data *consumers;
908 int i;
909 int num_consumers, ret, r;
910
911 num_consumers = pwr->num_gdscs;
912 consumers = pwr->gdscs;
913 for (i = num_consumers - 1; i >= 0; --i) {
914 ret = regulator_disable_deferred(consumers[i].consumer,
915 pwr->regulator_defer);
916 if (ret != 0)
917 goto err;
918 }
919
920 return 0;
921
922err:
923 pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
924 for (++i; i < num_consumers; ++i) {
925 r = regulator_enable(consumers[i].consumer);
926 if (r != 0)
927 pr_err("Failed to reename %s: %d\n",
928 consumers[i].supply, r);
929 }
930
931 return ret;
932}
933
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700934/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
935static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
936{
937 int ret = 0;
938 unsigned long flags;
939
940 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
941 if (pwr->clock_refs_count > 0) {
942 pwr->clock_refs_count++;
943 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
944 return 0;
945 }
946
947 ret = arm_smmu_enable_clocks(pwr);
948 if (!ret)
949 pwr->clock_refs_count = 1;
950
951 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700952 return ret;
953}
954
955/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700956static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700957{
Patrick Daly8befb662016-08-17 20:03:28 -0700958 unsigned long flags;
959
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700960 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
961 if (pwr->clock_refs_count == 0) {
962 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
963 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
964 return;
965
966 } else if (pwr->clock_refs_count > 1) {
967 pwr->clock_refs_count--;
968 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700969 return;
970 }
971
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700972 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700973
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700974 pwr->clock_refs_count = 0;
975 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700976}
977
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700978static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700979{
980 int ret;
981
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700982 mutex_lock(&pwr->power_lock);
983 if (pwr->power_count > 0) {
984 pwr->power_count += 1;
985 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700986 return 0;
987 }
988
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700989 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700990 if (ret)
991 goto out_unlock;
992
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700993 ret = arm_smmu_enable_regulators(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700994 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700995 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700996
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700997 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700998 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700999 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -07001000
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001001 pwr->power_count = 1;
1002 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001003 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001004
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001005out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001006 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001007out_disable_bus:
1008 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001009out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001010 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001011 return ret;
1012}
1013
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001014static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001015{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001016 mutex_lock(&pwr->power_lock);
1017 if (pwr->power_count == 0) {
1018 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
1019 mutex_unlock(&pwr->power_lock);
1020 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001021
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001022 } else if (pwr->power_count > 1) {
1023 pwr->power_count--;
1024 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001025 return;
1026 }
1027
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001028 arm_smmu_unprepare_clocks(pwr);
Prakash Guptafad87ca2017-05-16 12:13:02 +05301029 arm_smmu_disable_regulators(pwr);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001030 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -07001031 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001032 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001033}
1034
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001035static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001036{
1037 int ret;
1038
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001039 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001040 if (ret)
1041 return ret;
1042
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001043 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001044 if (ret)
1045 goto out_disable;
1046
1047 return 0;
1048
1049out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001050 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001051 return ret;
1052}
1053
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001054static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001055{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001056 arm_smmu_power_off_atomic(pwr);
1057 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001058}
1059
1060/*
1061 * Must be used instead of arm_smmu_power_on if it may be called from
1062 * atomic context
1063 */
1064static int arm_smmu_domain_power_on(struct iommu_domain *domain,
1065 struct arm_smmu_device *smmu)
1066{
1067 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1068 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1069
1070 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001071 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001072
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001073 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001074}
1075
1076/*
1077 * Must be used instead of arm_smmu_power_on if it may be called from
1078 * atomic context
1079 */
1080static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1081 struct arm_smmu_device *smmu)
1082{
1083 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1084 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1085
1086 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001087 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001088 return;
1089 }
1090
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001091 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001092}
1093
Will Deacon45ae7cf2013-06-24 18:31:25 +01001094/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001095static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1096 int cbndx)
1097{
1098 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1099 u32 val;
1100
1101 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1102 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1103 !(val & TLBSTATUS_SACTIVE),
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301104 0, TLB_LOOP_TIMEOUT)) {
1105 trace_tlbsync_timeout(smmu->dev, 0);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001106 dev_err(smmu->dev, "TLBSYNC timeout!\n");
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301107 }
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001108}
1109
Will Deacon518f7132014-11-14 17:17:54 +00001110static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001111{
1112 int count = 0;
1113 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1114
1115 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1116 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1117 & sTLBGSTATUS_GSACTIVE) {
1118 cpu_relax();
1119 if (++count == TLB_LOOP_TIMEOUT) {
1120 dev_err_ratelimited(smmu->dev,
1121 "TLB sync timed out -- SMMU may be deadlocked\n");
1122 return;
1123 }
1124 udelay(1);
1125 }
1126}
1127
Will Deacon518f7132014-11-14 17:17:54 +00001128static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001129{
Will Deacon518f7132014-11-14 17:17:54 +00001130 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001131 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001132}
1133
Patrick Daly8befb662016-08-17 20:03:28 -07001134/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001135static void arm_smmu_tlb_inv_context(void *cookie)
1136{
1137 struct arm_smmu_domain *smmu_domain = cookie;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301138 struct device *dev = smmu_domain->dev;
Will Deacon44680ee2014-06-25 11:29:12 +01001139 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1140 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001141 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001142 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -07001143 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301144 ktime_t cur = ktime_get();
1145
1146 trace_tlbi_start(dev, 0);
Will Deacon1463fe42013-07-31 19:21:27 +01001147
Patrick Dalye7069342017-07-11 12:35:55 -07001148 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001149 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001150 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001151 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001152 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001153 } else if (stage1 && use_tlbiall) {
1154 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1155 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1156 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001157 } else {
1158 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001159 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001160 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001161 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001162 }
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301163
1164 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Will Deacon1463fe42013-07-31 19:21:27 +01001165}
1166
Will Deacon518f7132014-11-14 17:17:54 +00001167static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001168 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001169{
1170 struct arm_smmu_domain *smmu_domain = cookie;
1171 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1172 struct arm_smmu_device *smmu = smmu_domain->smmu;
1173 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1174 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001175 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001176
Patrick Dalye7069342017-07-11 12:35:55 -07001177 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001178 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1179 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1180
Robin Murphy7602b872016-04-28 17:12:09 +01001181 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001182 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001183 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001184 do {
1185 writel_relaxed(iova, reg);
1186 iova += granule;
1187 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001188 } else {
1189 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001190 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001191 do {
1192 writeq_relaxed(iova, reg);
1193 iova += granule >> 12;
1194 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001195 }
Patrick Dalye7069342017-07-11 12:35:55 -07001196 } else if (stage1 && use_tlbiall) {
1197 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1198 reg += ARM_SMMU_CB_S1_TLBIALL;
1199 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001200 } else if (smmu->version == ARM_SMMU_V2) {
1201 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1202 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1203 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001204 iova >>= 12;
1205 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001206 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001207 iova += granule >> 12;
1208 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001209 } else {
1210 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001211 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001212 }
1213}
1214
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001215struct arm_smmu_secure_pool_chunk {
1216 void *addr;
1217 size_t size;
1218 struct list_head list;
1219};
1220
1221static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1222 size_t size)
1223{
1224 struct arm_smmu_secure_pool_chunk *it;
1225
1226 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1227 if (it->size == size) {
1228 void *addr = it->addr;
1229
1230 list_del(&it->list);
1231 kfree(it);
1232 return addr;
1233 }
1234 }
1235
1236 return NULL;
1237}
1238
1239static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1240 void *addr, size_t size)
1241{
1242 struct arm_smmu_secure_pool_chunk *chunk;
1243
1244 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1245 if (!chunk)
1246 return -ENOMEM;
1247
1248 chunk->addr = addr;
1249 chunk->size = size;
1250 memset(addr, 0, size);
1251 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1252
1253 return 0;
1254}
1255
1256static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1257{
1258 struct arm_smmu_secure_pool_chunk *it, *i;
1259
1260 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1261 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1262 /* pages will be freed later (after being unassigned) */
Prakash Gupta8e827be2017-10-04 12:37:11 +05301263 list_del(&it->list);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001264 kfree(it);
1265 }
1266}
1267
Patrick Dalyc11d1082016-09-01 15:52:44 -07001268static void *arm_smmu_alloc_pages_exact(void *cookie,
1269 size_t size, gfp_t gfp_mask)
1270{
1271 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001272 void *page;
1273 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001274
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001275 if (!arm_smmu_is_domain_secure(smmu_domain))
1276 return alloc_pages_exact(size, gfp_mask);
1277
1278 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1279 if (page)
1280 return page;
1281
1282 page = alloc_pages_exact(size, gfp_mask);
1283 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001284 ret = arm_smmu_prepare_pgtable(page, cookie);
1285 if (ret) {
1286 free_pages_exact(page, size);
1287 return NULL;
1288 }
1289 }
1290
1291 return page;
1292}
1293
1294static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1295{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001296 struct arm_smmu_domain *smmu_domain = cookie;
1297
1298 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1299 free_pages_exact(virt, size);
1300 return;
1301 }
1302
1303 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1304 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001305}
1306
Will Deacon518f7132014-11-14 17:17:54 +00001307static struct iommu_gather_ops arm_smmu_gather_ops = {
1308 .tlb_flush_all = arm_smmu_tlb_inv_context,
1309 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1310 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001311 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1312 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001313};
1314
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001315static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1316 dma_addr_t iova, u32 fsr)
1317{
1318 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001319 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyda765c62017-09-11 16:31:07 -07001320 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001321 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001322 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001323
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001324 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyda765c62017-09-11 16:31:07 -07001325 tlb->tlb_flush_all(smmu_domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001326 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001327
Patrick Dalyad441dd2016-09-15 15:50:46 -07001328 if (phys != phys_post_tlbiall) {
1329 dev_err(smmu->dev,
1330 "ATOS results differed across TLBIALL...\n"
1331 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1332 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001333
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001334 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001335}
1336
Will Deacon45ae7cf2013-06-24 18:31:25 +01001337static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1338{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001339 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001340 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001341 unsigned long iova;
1342 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001343 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001344 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1345 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001347 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001348 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001349 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001350 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001351 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001352 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001353
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001354 static DEFINE_RATELIMIT_STATE(_rs,
1355 DEFAULT_RATELIMIT_INTERVAL,
1356 DEFAULT_RATELIMIT_BURST);
1357
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001358 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001359 if (ret)
1360 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001361
Shalaj Jain04059c52015-03-03 13:34:59 -08001362 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001363 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1365
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001366 if (!(fsr & FSR_FAULT)) {
1367 ret = IRQ_NONE;
1368 goto out_power_off;
1369 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001370
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001371 if (fatal_asf && (fsr & FSR_ASF)) {
1372 dev_err(smmu->dev,
1373 "Took an address size fault. Refusing to recover.\n");
1374 BUG();
1375 }
1376
Will Deacon45ae7cf2013-06-24 18:31:25 +01001377 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001378 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001379 if (fsr & FSR_TF)
1380 flags |= IOMMU_FAULT_TRANSLATION;
1381 if (fsr & FSR_PF)
1382 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001383 if (fsr & FSR_EF)
1384 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001385 if (fsr & FSR_SS)
1386 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001387
Robin Murphyf9a05f02016-04-13 18:13:01 +01001388 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001389 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001390 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1391 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001392 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1393 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001394 dev_dbg(smmu->dev,
1395 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1396 iova, fsr, fsynr, cfg->cbndx);
1397 dev_dbg(smmu->dev,
1398 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001399 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001400 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001401 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001402 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1403 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001404 if (__ratelimit(&_rs)) {
1405 dev_err(smmu->dev,
1406 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1407 iova, fsr, fsynr, cfg->cbndx);
1408 dev_err(smmu->dev, "FAR = %016lx\n",
1409 (unsigned long)iova);
1410 dev_err(smmu->dev,
1411 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1412 fsr,
1413 (fsr & 0x02) ? "TF " : "",
1414 (fsr & 0x04) ? "AFF " : "",
1415 (fsr & 0x08) ? "PF " : "",
1416 (fsr & 0x10) ? "EF " : "",
1417 (fsr & 0x20) ? "TLBMCF " : "",
1418 (fsr & 0x40) ? "TLBLKF " : "",
1419 (fsr & 0x80) ? "MHF " : "",
1420 (fsr & 0x40000000) ? "SS " : "",
1421 (fsr & 0x80000000) ? "MULTI " : "");
1422 dev_err(smmu->dev,
1423 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001424 if (!phys_soft)
1425 dev_err(smmu->dev,
1426 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1427 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001428 if (phys_atos)
1429 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1430 &phys_atos);
1431 else
1432 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001433 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1434 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001435 ret = IRQ_NONE;
1436 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001437 if (!non_fatal_fault) {
1438 dev_err(smmu->dev,
1439 "Unhandled arm-smmu context fault!\n");
1440 BUG();
1441 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001442 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001443
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001444 /*
1445 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1446 * if stalled. This is required to keep the IOMMU client stalled on
1447 * the outstanding fault. This gives the client a chance to take any
1448 * debug action and then terminate the stalled transaction.
1449 * So, the sequence in case of stall on fault should be:
1450 * 1) Do not clear FSR or write to RESUME here
1451 * 2) Client takes any debug action
1452 * 3) Client terminates the stalled transaction and resumes the IOMMU
1453 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1454 * not before so that the fault remains outstanding. This ensures
1455 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1456 * need to be terminated.
1457 */
1458 if (tmp != -EBUSY) {
1459 /* Clear the faulting FSR */
1460 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001461
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001462 /*
1463 * Barrier required to ensure that the FSR is cleared
1464 * before resuming SMMU operation
1465 */
1466 wmb();
1467
1468 /* Retry or terminate any stalled transactions */
1469 if (fsr & FSR_SS)
1470 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1471 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001472
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001473out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001474 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001475
Patrick Daly5ba28112016-08-30 19:18:52 -07001476 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001477}
1478
1479static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1480{
1481 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1482 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001483 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001484
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001485 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001486 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001487
Will Deacon45ae7cf2013-06-24 18:31:25 +01001488 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1489 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1490 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1491 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1492
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001493 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001494 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001495 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001496 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001497
Will Deacon45ae7cf2013-06-24 18:31:25 +01001498 dev_err_ratelimited(smmu->dev,
1499 "Unexpected global fault, this could be serious\n");
1500 dev_err_ratelimited(smmu->dev,
1501 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1502 gfsr, gfsynr0, gfsynr1, gfsynr2);
1503
1504 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001505 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001506 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001507}
1508
Will Deacon518f7132014-11-14 17:17:54 +00001509static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1510 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001511{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001512 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001513 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001514 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001515 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1516 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001517 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001518
Will Deacon45ae7cf2013-06-24 18:31:25 +01001519 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001520 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1521 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001522
Will Deacon4a1c93c2015-03-04 12:21:03 +00001523 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001524 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1525 reg = CBA2R_RW64_64BIT;
1526 else
1527 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001528 /* 16-bit VMIDs live in CBA2R */
1529 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001530 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001531
Will Deacon4a1c93c2015-03-04 12:21:03 +00001532 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1533 }
1534
Will Deacon45ae7cf2013-06-24 18:31:25 +01001535 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001536 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001537 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001538 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001539
Will Deacon57ca90f2014-02-06 14:59:05 +00001540 /*
1541 * Use the weakest shareability/memory types, so they are
1542 * overridden by the ttbcr/pte.
1543 */
1544 if (stage1) {
1545 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1546 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001547 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1548 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001549 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001550 }
Will Deacon44680ee2014-06-25 11:29:12 +01001551 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001552
Will Deacon518f7132014-11-14 17:17:54 +00001553 /* TTBRs */
1554 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001555 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001556
Robin Murphyb94df6f2016-08-11 17:44:06 +01001557 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1558 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1559 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1560 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1561 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1562 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1563 } else {
1564 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1565 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1566 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1567 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1568 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1569 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1570 }
Will Deacon518f7132014-11-14 17:17:54 +00001571 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001572 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001573 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001574 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001575
Will Deacon518f7132014-11-14 17:17:54 +00001576 /* TTBCR */
1577 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001578 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1579 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1580 reg2 = 0;
1581 } else {
1582 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1583 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1584 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001585 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001586 if (smmu->version > ARM_SMMU_V1)
1587 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001588 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001589 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001590 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001591 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001592
Will Deacon518f7132014-11-14 17:17:54 +00001593 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001594 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001595 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1596 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1597 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1598 } else {
1599 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1600 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1601 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001602 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001603 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001604 }
1605
Will Deacon45ae7cf2013-06-24 18:31:25 +01001606 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001607 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001608
Patrick Daly7f377fe2017-10-06 17:37:10 -07001609 /* Ensure bypass transactions are Non-shareable */
1610 reg |= SCTLR_SHCFG_NSH << SCTLR_SHCFG_SHIFT;
1611
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301612 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
1613 reg &= ~SCTLR_CFCFG;
1614 reg |= SCTLR_HUPCF;
1615 }
1616
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001617 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1618 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1619 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001620 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001621 if (stage1)
1622 reg |= SCTLR_S1_ASIDPNE;
1623#ifdef __BIG_ENDIAN
1624 reg |= SCTLR_E;
1625#endif
Will Deacon25724842013-08-21 13:49:53 +01001626 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001627}
1628
Patrick Dalyc190d932016-08-30 17:23:28 -07001629static int arm_smmu_init_asid(struct iommu_domain *domain,
1630 struct arm_smmu_device *smmu)
1631{
1632 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1633 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1634 bool dynamic = is_dynamic_domain(domain);
1635 int ret;
1636
1637 if (!dynamic) {
1638 cfg->asid = cfg->cbndx + 1;
1639 } else {
1640 mutex_lock(&smmu->idr_mutex);
1641 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1642 smmu->num_context_banks + 2,
1643 MAX_ASID + 1, GFP_KERNEL);
1644
1645 mutex_unlock(&smmu->idr_mutex);
1646 if (ret < 0) {
1647 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1648 ret);
1649 return ret;
1650 }
1651 cfg->asid = ret;
1652 }
1653 return 0;
1654}
1655
1656static void arm_smmu_free_asid(struct iommu_domain *domain)
1657{
1658 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1659 struct arm_smmu_device *smmu = smmu_domain->smmu;
1660 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1661 bool dynamic = is_dynamic_domain(domain);
1662
1663 if (cfg->asid == INVALID_ASID || !dynamic)
1664 return;
1665
1666 mutex_lock(&smmu->idr_mutex);
1667 idr_remove(&smmu->asid_idr, cfg->asid);
1668 mutex_unlock(&smmu->idr_mutex);
1669}
1670
Will Deacon45ae7cf2013-06-24 18:31:25 +01001671static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001672 struct arm_smmu_device *smmu,
1673 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001674{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001675 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001676 unsigned long ias, oas;
1677 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001678 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001679 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001680 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001681 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001682 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001683 bool dynamic;
Patrick Dalyda765c62017-09-11 16:31:07 -07001684 const struct iommu_gather_ops *tlb;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001685
Will Deacon518f7132014-11-14 17:17:54 +00001686 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001687 if (smmu_domain->smmu)
1688 goto out_unlock;
1689
Patrick Dalyc190d932016-08-30 17:23:28 -07001690 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1691 smmu_domain->cfg.asid = INVALID_ASID;
1692
Patrick Dalyc190d932016-08-30 17:23:28 -07001693 dynamic = is_dynamic_domain(domain);
1694 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1695 dev_err(smmu->dev, "dynamic domains not supported\n");
1696 ret = -EPERM;
1697 goto out_unlock;
1698 }
1699
Will Deaconc752ce42014-06-25 22:46:31 +01001700 /*
1701 * Mapping the requested stage onto what we support is surprisingly
1702 * complicated, mainly because the spec allows S1+S2 SMMUs without
1703 * support for nested translation. That means we end up with the
1704 * following table:
1705 *
1706 * Requested Supported Actual
1707 * S1 N S1
1708 * S1 S1+S2 S1
1709 * S1 S2 S2
1710 * S1 S1 S1
1711 * N N N
1712 * N S1+S2 S2
1713 * N S2 S2
1714 * N S1 S1
1715 *
1716 * Note that you can't actually request stage-2 mappings.
1717 */
1718 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1719 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1720 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1721 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1722
Robin Murphy7602b872016-04-28 17:12:09 +01001723 /*
1724 * Choosing a suitable context format is even more fiddly. Until we
1725 * grow some way for the caller to express a preference, and/or move
1726 * the decision into the io-pgtable code where it arguably belongs,
1727 * just aim for the closest thing to the rest of the system, and hope
1728 * that the hardware isn't esoteric enough that we can't assume AArch64
1729 * support to be a superset of AArch32 support...
1730 */
1731 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1732 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001733 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1734 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1735 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1736 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1737 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001738 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1739 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1740 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1741 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1742 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1743
1744 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1745 ret = -EINVAL;
1746 goto out_unlock;
1747 }
1748
Will Deaconc752ce42014-06-25 22:46:31 +01001749 switch (smmu_domain->stage) {
1750 case ARM_SMMU_DOMAIN_S1:
1751 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1752 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001753 ias = smmu->va_size;
1754 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001755 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001756 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001757 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1758 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001759 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001760 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001761 ias = min(ias, 32UL);
1762 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001763 } else {
1764 fmt = ARM_V7S;
1765 ias = min(ias, 32UL);
1766 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001767 }
Will Deaconc752ce42014-06-25 22:46:31 +01001768 break;
1769 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001770 /*
1771 * We will likely want to change this if/when KVM gets
1772 * involved.
1773 */
Will Deaconc752ce42014-06-25 22:46:31 +01001774 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001775 cfg->cbar = CBAR_TYPE_S2_TRANS;
1776 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001777 ias = smmu->ipa_size;
1778 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001779 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001780 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001781 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001782 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001783 ias = min(ias, 40UL);
1784 oas = min(oas, 40UL);
1785 }
Will Deaconc752ce42014-06-25 22:46:31 +01001786 break;
1787 default:
1788 ret = -EINVAL;
1789 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001790 }
1791
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001792 if (is_fast)
1793 fmt = ARM_V8L_FAST;
1794
Patrick Dalyce6786f2016-11-09 14:19:23 -08001795 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1796 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001797 if (is_iommu_pt_coherent(smmu_domain))
1798 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Patrick Daly49ccf332017-09-27 15:10:29 -07001799 if ((quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
1800 (smmu->model == QCOM_SMMUV500))
1801 quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001802
Patrick Dalyda765c62017-09-11 16:31:07 -07001803 tlb = &arm_smmu_gather_ops;
Patrick Daly83174c12017-10-26 12:31:15 -07001804 if (smmu->options & ARM_SMMU_OPT_MMU500_ERRATA1)
Patrick Dalyda765c62017-09-11 16:31:07 -07001805 tlb = &qsmmuv500_errata1_smmu_gather_ops;
1806
Patrick Dalyda688822017-05-17 20:12:48 -07001807 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1808 if (ret < 0)
1809 goto out_unlock;
1810 cfg->cbndx = ret;
1811
Robin Murphyb7862e32016-04-13 18:13:03 +01001812 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001813 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1814 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001815 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001816 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001817 }
1818
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001819 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001820 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001821 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001822 .ias = ias,
1823 .oas = oas,
Patrick Dalyda765c62017-09-11 16:31:07 -07001824 .tlb = tlb,
Robin Murphy2df7a252015-07-29 19:46:06 +01001825 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001826 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001827
Will Deacon518f7132014-11-14 17:17:54 +00001828 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001829 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001830 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1831 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001832 if (!pgtbl_ops) {
1833 ret = -ENOMEM;
1834 goto out_clear_smmu;
1835 }
1836
Patrick Dalyc11d1082016-09-01 15:52:44 -07001837 /*
1838 * assign any page table memory that might have been allocated
1839 * during alloc_io_pgtable_ops
1840 */
Patrick Dalye271f212016-10-04 13:24:49 -07001841 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001842 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001843 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001844
Robin Murphyd5466352016-05-09 17:20:09 +01001845 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001846 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001847 domain->geometry.aperture_end = (1UL << ias) - 1;
1848 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001849
Patrick Dalyc190d932016-08-30 17:23:28 -07001850 /* Assign an asid */
1851 ret = arm_smmu_init_asid(domain, smmu);
1852 if (ret)
1853 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001854
Patrick Dalyc190d932016-08-30 17:23:28 -07001855 if (!dynamic) {
1856 /* Initialise the context bank with our page table cfg */
1857 arm_smmu_init_context_bank(smmu_domain,
1858 &smmu_domain->pgtbl_cfg);
1859
Patrick Daly03330cc2017-08-11 14:56:38 -07001860 arm_smmu_arch_init_context_bank(smmu_domain, dev);
1861
Patrick Dalyc190d932016-08-30 17:23:28 -07001862 /*
1863 * Request context fault interrupt. Do this last to avoid the
1864 * handler seeing a half-initialised domain state.
1865 */
1866 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1867 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001868 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1869 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001870 if (ret < 0) {
1871 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1872 cfg->irptndx, irq);
1873 cfg->irptndx = INVALID_IRPTNDX;
1874 goto out_clear_smmu;
1875 }
1876 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001877 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001878 }
Will Deacon518f7132014-11-14 17:17:54 +00001879 mutex_unlock(&smmu_domain->init_mutex);
1880
1881 /* Publish page table ops for map/unmap */
1882 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001883 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001884
Will Deacon518f7132014-11-14 17:17:54 +00001885out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001886 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001887 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001888out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001889 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001890 return ret;
1891}
1892
Patrick Daly77db4f92016-10-14 15:34:10 -07001893static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1894{
1895 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1896 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1897 smmu_domain->secure_vmid = VMID_INVAL;
1898}
1899
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1901{
Joerg Roedel1d672632015-03-26 13:43:10 +01001902 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001903 struct arm_smmu_device *smmu = smmu_domain->smmu;
1904 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001905 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001906 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001907 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001908 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001909
Robin Murphy7e96c742016-09-14 15:26:46 +01001910 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001911 return;
1912
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001913 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001914 if (ret) {
1915 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1916 smmu);
1917 return;
1918 }
1919
Patrick Dalyc190d932016-08-30 17:23:28 -07001920 dynamic = is_dynamic_domain(domain);
1921 if (dynamic) {
1922 arm_smmu_free_asid(domain);
1923 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001924 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001925 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001926 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001927 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001928 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001929 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001930 return;
1931 }
1932
Will Deacon518f7132014-11-14 17:17:54 +00001933 /*
1934 * Disable the context bank and free the page tables before freeing
1935 * it.
1936 */
Will Deacon44680ee2014-06-25 11:29:12 +01001937 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001938 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001939
Will Deacon44680ee2014-06-25 11:29:12 +01001940 if (cfg->irptndx != INVALID_IRPTNDX) {
1941 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001942 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001943 }
1944
Markus Elfring44830b02015-11-06 18:32:41 +01001945 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001946 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001947 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001948 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001949 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001950 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001951
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001952 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001953 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954}
1955
Joerg Roedel1d672632015-03-26 13:43:10 +01001956static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001957{
1958 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959
Patrick Daly09801312016-08-29 17:02:52 -07001960 /* Do not support DOMAIN_DMA for now */
1961 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001962 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963 /*
1964 * Allocate the domain and initialise some of its data structures.
1965 * We can't really do anything meaningful until we've added a
1966 * master.
1967 */
1968 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1969 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001970 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001971
Robin Murphy7e96c742016-09-14 15:26:46 +01001972 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1973 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001974 kfree(smmu_domain);
1975 return NULL;
1976 }
1977
Will Deacon518f7132014-11-14 17:17:54 +00001978 mutex_init(&smmu_domain->init_mutex);
1979 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001980 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1981 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001982 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001983 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001984 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001985
1986 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001987}
1988
Joerg Roedel1d672632015-03-26 13:43:10 +01001989static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001990{
Joerg Roedel1d672632015-03-26 13:43:10 +01001991 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001992
1993 /*
1994 * Free the domain resources. We assume that all devices have
1995 * already been detached.
1996 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001997 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001998 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001999 kfree(smmu_domain);
2000}
2001
Robin Murphy468f4942016-09-12 17:13:49 +01002002static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
2003{
2004 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01002005 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01002006
2007 if (smr->valid)
2008 reg |= SMR_VALID;
2009 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2010}
2011
Robin Murphya754fd12016-09-12 17:13:50 +01002012static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
2013{
2014 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
2015 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
2016 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
Patrick Daly7f377fe2017-10-06 17:37:10 -07002017 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT |
2018 S2CR_SHCFG_NSH << S2CR_SHCFG_SHIFT;
Robin Murphya754fd12016-09-12 17:13:50 +01002019
2020 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2021}
2022
2023static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
2024{
2025 arm_smmu_write_s2cr(smmu, idx);
2026 if (smmu->smrs)
2027 arm_smmu_write_smr(smmu, idx);
2028}
2029
Robin Murphy6668f692016-09-12 17:13:54 +01002030static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01002031{
2032 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01002033 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002034
Robin Murphy6668f692016-09-12 17:13:54 +01002035 /* Stream indexing is blissfully easy */
2036 if (!smrs)
2037 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01002038
Robin Murphy6668f692016-09-12 17:13:54 +01002039 /* Validating SMRs is... less so */
2040 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2041 if (!smrs[i].valid) {
2042 /*
2043 * Note the first free entry we come across, which
2044 * we'll claim in the end if nothing else matches.
2045 */
2046 if (free_idx < 0)
2047 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01002048 continue;
2049 }
Robin Murphy6668f692016-09-12 17:13:54 +01002050 /*
2051 * If the new entry is _entirely_ matched by an existing entry,
2052 * then reuse that, with the guarantee that there also cannot
2053 * be any subsequent conflicting entries. In normal use we'd
2054 * expect simply identical entries for this case, but there's
2055 * no harm in accommodating the generalisation.
2056 */
2057 if ((mask & smrs[i].mask) == mask &&
2058 !((id ^ smrs[i].id) & ~smrs[i].mask))
2059 return i;
2060 /*
2061 * If the new entry has any other overlap with an existing one,
2062 * though, then there always exists at least one stream ID
2063 * which would cause a conflict, and we can't allow that risk.
2064 */
2065 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
2066 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002067 }
2068
Robin Murphy6668f692016-09-12 17:13:54 +01002069 return free_idx;
2070}
2071
2072static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
2073{
2074 if (--smmu->s2crs[idx].count)
2075 return false;
2076
2077 smmu->s2crs[idx] = s2cr_init_val;
2078 if (smmu->smrs)
2079 smmu->smrs[idx].valid = false;
2080
2081 return true;
2082}
2083
2084static int arm_smmu_master_alloc_smes(struct device *dev)
2085{
Robin Murphy06e393e2016-09-12 17:13:55 +01002086 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2087 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01002088 struct arm_smmu_device *smmu = cfg->smmu;
2089 struct arm_smmu_smr *smrs = smmu->smrs;
2090 struct iommu_group *group;
2091 int i, idx, ret;
2092
2093 mutex_lock(&smmu->stream_map_mutex);
2094 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01002095 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002096 u16 sid = fwspec->ids[i];
2097 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
2098
Robin Murphy6668f692016-09-12 17:13:54 +01002099 if (idx != INVALID_SMENDX) {
2100 ret = -EEXIST;
2101 goto out_err;
2102 }
2103
Robin Murphy7e96c742016-09-14 15:26:46 +01002104 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01002105 if (ret < 0)
2106 goto out_err;
2107
2108 idx = ret;
2109 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002110 smrs[idx].id = sid;
2111 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01002112 smrs[idx].valid = true;
2113 }
2114 smmu->s2crs[idx].count++;
2115 cfg->smendx[i] = (s16)idx;
2116 }
2117
2118 group = iommu_group_get_for_dev(dev);
2119 if (!group)
2120 group = ERR_PTR(-ENOMEM);
2121 if (IS_ERR(group)) {
2122 ret = PTR_ERR(group);
2123 goto out_err;
2124 }
2125 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01002126
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002127 /* It worked! Don't poke the actual hardware until we've attached */
2128 for_each_cfg_sme(fwspec, i, idx)
Robin Murphy6668f692016-09-12 17:13:54 +01002129 smmu->s2crs[idx].group = group;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002130
Robin Murphy6668f692016-09-12 17:13:54 +01002131 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002132 return 0;
2133
Robin Murphy6668f692016-09-12 17:13:54 +01002134out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01002135 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01002136 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01002137 cfg->smendx[i] = INVALID_SMENDX;
2138 }
Robin Murphy6668f692016-09-12 17:13:54 +01002139 mutex_unlock(&smmu->stream_map_mutex);
2140 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002141}
2142
Robin Murphy06e393e2016-09-12 17:13:55 +01002143static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002144{
Robin Murphy06e393e2016-09-12 17:13:55 +01002145 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
2146 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01002147 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01002148
Robin Murphy6668f692016-09-12 17:13:54 +01002149 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002150 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002151 if (arm_smmu_free_sme(smmu, idx))
2152 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01002153 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002154 }
Robin Murphy6668f692016-09-12 17:13:54 +01002155 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002156}
2157
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002158static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
2159 struct iommu_fwspec *fwspec)
2160{
2161 struct arm_smmu_device *smmu = smmu_domain->smmu;
2162 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2163 int i, idx;
2164 const struct iommu_gather_ops *tlb;
2165
2166 tlb = smmu_domain->pgtbl_cfg.tlb;
2167
2168 mutex_lock(&smmu->stream_map_mutex);
2169 for_each_cfg_sme(fwspec, i, idx) {
2170 WARN_ON(s2cr[idx].attach_count == 0);
2171 s2cr[idx].attach_count -= 1;
2172
2173 if (s2cr[idx].attach_count > 0)
2174 continue;
2175
2176 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2177 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2178 }
2179 mutex_unlock(&smmu->stream_map_mutex);
2180
2181 /* Ensure there are no stale mappings for this context bank */
2182 tlb->tlb_flush_all(smmu_domain);
2183}
2184
Will Deacon45ae7cf2013-06-24 18:31:25 +01002185static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01002186 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002187{
Will Deacon44680ee2014-06-25 11:29:12 +01002188 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002189 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2190 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2191 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002192 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002193
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002194 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002195 for_each_cfg_sme(fwspec, i, idx) {
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002196 if (s2cr[idx].attach_count++ > 0)
Robin Murphy6668f692016-09-12 17:13:54 +01002197 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002198
2199 s2cr[idx].type = type;
2200 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2201 s2cr[idx].cbndx = cbndx;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002202 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002203 }
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002204 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002205
2206 return 0;
2207}
2208
Patrick Daly09801312016-08-29 17:02:52 -07002209static void arm_smmu_detach_dev(struct iommu_domain *domain,
2210 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002211{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002212 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002213 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002214 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly09801312016-08-29 17:02:52 -07002215 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002216 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002217
2218 if (dynamic)
2219 return;
2220
Patrick Daly09801312016-08-29 17:02:52 -07002221 if (!smmu) {
2222 dev_err(dev, "Domain not attached; cannot detach!\n");
2223 return;
2224 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002225
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302226 if (atomic_domain)
2227 arm_smmu_power_on_atomic(smmu->pwr);
2228 else
2229 arm_smmu_power_on(smmu->pwr);
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002230
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302231 arm_smmu_domain_remove_master(smmu_domain, fwspec);
2232 arm_smmu_power_off(smmu->pwr);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002233}
2234
Patrick Dalye271f212016-10-04 13:24:49 -07002235static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002236{
Patrick Dalye271f212016-10-04 13:24:49 -07002237 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002238 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2239 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2240 int source_vmid = VMID_HLOS;
2241 struct arm_smmu_pte_info *pte_info, *temp;
2242
Patrick Dalye271f212016-10-04 13:24:49 -07002243 if (!arm_smmu_is_domain_secure(smmu_domain))
2244 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002245
Patrick Dalye271f212016-10-04 13:24:49 -07002246 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002247 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2248 PAGE_SIZE, &source_vmid, 1,
2249 dest_vmids, dest_perms, 2);
2250 if (WARN_ON(ret))
2251 break;
2252 }
2253
2254 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2255 entry) {
2256 list_del(&pte_info->entry);
2257 kfree(pte_info);
2258 }
Patrick Dalye271f212016-10-04 13:24:49 -07002259 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002260}
2261
2262static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2263{
2264 int ret;
2265 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002266 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002267 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2268 struct arm_smmu_pte_info *pte_info, *temp;
2269
Patrick Dalye271f212016-10-04 13:24:49 -07002270 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002271 return;
2272
2273 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2274 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2275 PAGE_SIZE, source_vmlist, 2,
2276 &dest_vmids, &dest_perms, 1);
2277 if (WARN_ON(ret))
2278 break;
2279 free_pages_exact(pte_info->virt_addr, pte_info->size);
2280 }
2281
2282 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2283 entry) {
2284 list_del(&pte_info->entry);
2285 kfree(pte_info);
2286 }
2287}
2288
2289static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2290{
2291 struct arm_smmu_domain *smmu_domain = cookie;
2292 struct arm_smmu_pte_info *pte_info;
2293
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002294 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002295
2296 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2297 if (!pte_info)
2298 return;
2299
2300 pte_info->virt_addr = addr;
2301 pte_info->size = size;
2302 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2303}
2304
2305static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2306{
2307 struct arm_smmu_domain *smmu_domain = cookie;
2308 struct arm_smmu_pte_info *pte_info;
2309
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002310 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002311
2312 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2313 if (!pte_info)
2314 return -ENOMEM;
2315 pte_info->virt_addr = addr;
2316 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2317 return 0;
2318}
2319
Will Deacon45ae7cf2013-06-24 18:31:25 +01002320static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2321{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002322 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002323 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002324 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002325 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002326 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002327
Robin Murphy06e393e2016-09-12 17:13:55 +01002328 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002329 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2330 return -ENXIO;
2331 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002332
Robin Murphy4f79b142016-10-17 12:06:21 +01002333 /*
2334 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2335 * domains between of_xlate() and add_device() - we have no way to cope
2336 * with that, so until ARM gets converted to rely on groups and default
2337 * domains, just say no (but more politely than by dereferencing NULL).
2338 * This should be at least a WARN_ON once that's sorted.
2339 */
2340 if (!fwspec->iommu_priv)
2341 return -ENODEV;
2342
Robin Murphy06e393e2016-09-12 17:13:55 +01002343 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002344
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002345 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002346 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002347 if (ret)
2348 return ret;
2349
Will Deacon518f7132014-11-14 17:17:54 +00002350 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002351 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002352 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002353 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002354
Patrick Dalyc190d932016-08-30 17:23:28 -07002355 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002356 if (is_dynamic_domain(domain)) {
2357 ret = 0;
2358 goto out_power_off;
2359 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002360
Will Deacon45ae7cf2013-06-24 18:31:25 +01002361 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002362 * Sanity check the domain. We don't support domains across
2363 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002364 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002365 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002366 dev_err(dev,
2367 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002368 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002369 ret = -EINVAL;
2370 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002371 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002372
2373 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002374 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002375
2376out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002377 /*
2378 * Keep an additional vote for non-atomic power until domain is
2379 * detached
2380 */
2381 if (!ret && atomic_domain) {
2382 WARN_ON(arm_smmu_power_on(smmu->pwr));
2383 arm_smmu_power_off_atomic(smmu->pwr);
2384 }
2385
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002386 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002387
Will Deacon45ae7cf2013-06-24 18:31:25 +01002388 return ret;
2389}
2390
Will Deacon45ae7cf2013-06-24 18:31:25 +01002391static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002392 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002393{
Will Deacon518f7132014-11-14 17:17:54 +00002394 int ret;
2395 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002396 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002397 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002398
Will Deacon518f7132014-11-14 17:17:54 +00002399 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002400 return -ENODEV;
2401
Patrick Dalye271f212016-10-04 13:24:49 -07002402 arm_smmu_secure_domain_lock(smmu_domain);
2403
Will Deacon518f7132014-11-14 17:17:54 +00002404 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2405 ret = ops->map(ops, iova, paddr, size, prot);
2406 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002407
2408 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002409 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002410
Will Deacon518f7132014-11-14 17:17:54 +00002411 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002412}
2413
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002414static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2415 dma_addr_t iova)
2416{
2417 uint64_t ret;
2418 unsigned long flags;
2419 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2420 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2421
2422 if (!ops)
2423 return 0;
2424
2425 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2426 ret = ops->iova_to_pte(ops, iova);
2427 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2428 return ret;
2429}
2430
Will Deacon45ae7cf2013-06-24 18:31:25 +01002431static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2432 size_t size)
2433{
Will Deacon518f7132014-11-14 17:17:54 +00002434 size_t ret;
2435 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002436 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002437 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002438
Will Deacon518f7132014-11-14 17:17:54 +00002439 if (!ops)
2440 return 0;
2441
Patrick Daly8befb662016-08-17 20:03:28 -07002442 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002443 if (ret)
2444 return ret;
2445
Patrick Dalye271f212016-10-04 13:24:49 -07002446 arm_smmu_secure_domain_lock(smmu_domain);
2447
Will Deacon518f7132014-11-14 17:17:54 +00002448 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2449 ret = ops->unmap(ops, iova, size);
2450 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002451
Patrick Daly8befb662016-08-17 20:03:28 -07002452 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002453 /*
2454 * While splitting up block mappings, we might allocate page table
2455 * memory during unmap, so the vmids needs to be assigned to the
2456 * memory here as well.
2457 */
2458 arm_smmu_assign_table(smmu_domain);
2459 /* Also unassign any pages that were free'd during unmap */
2460 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002461 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002462 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002463}
2464
Patrick Daly88d321d2017-02-09 18:02:13 -08002465#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002466static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2467 struct scatterlist *sg, unsigned int nents, int prot)
2468{
2469 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002470 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002471 unsigned long flags;
2472 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2473 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002474 unsigned int idx_start, idx_end;
2475 struct scatterlist *sg_start, *sg_end;
2476 unsigned long __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002477
2478 if (!ops)
2479 return -ENODEV;
2480
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002481 arm_smmu_secure_domain_lock(smmu_domain);
2482
Patrick Daly88d321d2017-02-09 18:02:13 -08002483 __saved_iova_start = iova;
2484 idx_start = idx_end = 0;
2485 sg_start = sg_end = sg;
2486 while (idx_end < nents) {
2487 batch_size = sg_end->length;
2488 sg_end = sg_next(sg_end);
2489 idx_end++;
2490 while ((idx_end < nents) &&
2491 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002492
Patrick Daly88d321d2017-02-09 18:02:13 -08002493 batch_size += sg_end->length;
2494 sg_end = sg_next(sg_end);
2495 idx_end++;
2496 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002497
Patrick Daly88d321d2017-02-09 18:02:13 -08002498 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2499 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2500 prot, &size);
2501 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2502 /* Returns 0 on error */
2503 if (!ret) {
2504 size_to_unmap = iova + size - __saved_iova_start;
2505 goto out;
2506 }
2507
2508 iova += batch_size;
2509 idx_start = idx_end;
2510 sg_start = sg_end;
2511 }
2512
2513out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002514 arm_smmu_assign_table(smmu_domain);
2515
Patrick Daly88d321d2017-02-09 18:02:13 -08002516 if (size_to_unmap) {
2517 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2518 iova = __saved_iova_start;
2519 }
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002520 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly88d321d2017-02-09 18:02:13 -08002521 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002522}
2523
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002524static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002525 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002526{
Joerg Roedel1d672632015-03-26 13:43:10 +01002527 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002528 struct arm_smmu_device *smmu = smmu_domain->smmu;
2529 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2530 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2531 struct device *dev = smmu->dev;
2532 void __iomem *cb_base;
2533 u32 tmp;
2534 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002535 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002536
2537 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2538
Robin Murphy661d9622015-05-27 17:09:34 +01002539 /* ATS1 registers can only be written atomically */
2540 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002541 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002542 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2543 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002544 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002545
2546 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2547 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002548 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002549 dev_err(dev,
2550 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2551 &iova, &phys);
2552 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002553 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002554 }
2555
Robin Murphyf9a05f02016-04-13 18:13:01 +01002556 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002557 if (phys & CB_PAR_F) {
2558 dev_err(dev, "translation fault!\n");
2559 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002560 phys = 0;
2561 } else {
2562 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002563 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002564
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002565 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002566}
2567
Will Deacon45ae7cf2013-06-24 18:31:25 +01002568static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002569 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002570{
Will Deacon518f7132014-11-14 17:17:54 +00002571 phys_addr_t ret;
2572 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002573 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002574 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002575
Will Deacon518f7132014-11-14 17:17:54 +00002576 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002577 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002578
Will Deacon518f7132014-11-14 17:17:54 +00002579 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002580 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002581 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002582
Will Deacon518f7132014-11-14 17:17:54 +00002583 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002584}
2585
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002586/*
2587 * This function can sleep, and cannot be called from atomic context. Will
2588 * power on register block if required. This restriction does not apply to the
2589 * original iova_to_phys() op.
2590 */
2591static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2592 dma_addr_t iova)
2593{
2594 phys_addr_t ret = 0;
2595 unsigned long flags;
2596 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly62ba1922017-08-30 16:47:18 -07002597 struct arm_smmu_device *smmu = smmu_domain->smmu;
2598
2599 if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS)
2600 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002601
Patrick Dalyad441dd2016-09-15 15:50:46 -07002602 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002603 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2604 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002605 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002606 return ret;
2607 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002608
2609 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2610 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2611 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002612 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002613
2614 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2615
2616 return ret;
2617}
2618
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002619static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002620{
Will Deacond0948942014-06-24 17:30:10 +01002621 switch (cap) {
2622 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002623 /*
2624 * Return true here as the SMMU can always send out coherent
2625 * requests.
2626 */
2627 return true;
Will Deacond0948942014-06-24 17:30:10 +01002628 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002629 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002630 case IOMMU_CAP_NOEXEC:
2631 return true;
Will Deacond0948942014-06-24 17:30:10 +01002632 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002633 return false;
Will Deacond0948942014-06-24 17:30:10 +01002634 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002635}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002636
Patrick Daly8e3371a2017-02-13 22:14:53 -08002637static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2638{
2639 struct arm_smmu_device *smmu;
2640 unsigned long flags;
2641
2642 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2643 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2644 if (smmu->dev->of_node == np) {
2645 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2646 return smmu;
2647 }
2648 }
2649 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2650 return NULL;
2651}
2652
Robin Murphy7e96c742016-09-14 15:26:46 +01002653static int arm_smmu_match_node(struct device *dev, void *data)
2654{
2655 return dev->of_node == data;
2656}
2657
2658static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2659{
2660 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2661 np, arm_smmu_match_node);
2662 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002663 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002664}
2665
Will Deacon03edb222015-01-19 14:27:33 +00002666static int arm_smmu_add_device(struct device *dev)
2667{
Robin Murphy06e393e2016-09-12 17:13:55 +01002668 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002669 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002670 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002671 int i, ret;
2672
Robin Murphy7e96c742016-09-14 15:26:46 +01002673 if (using_legacy_binding) {
2674 ret = arm_smmu_register_legacy_master(dev, &smmu);
2675 fwspec = dev->iommu_fwspec;
2676 if (ret)
2677 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002678 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002679 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2680 if (!smmu)
2681 return -ENODEV;
2682 } else {
2683 return -ENODEV;
2684 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002685
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002686 ret = arm_smmu_power_on(smmu->pwr);
2687 if (ret)
2688 goto out_free;
2689
Robin Murphyd5b41782016-09-14 15:21:39 +01002690 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002691 for (i = 0; i < fwspec->num_ids; i++) {
2692 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002693 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002694
Robin Murphy06e393e2016-09-12 17:13:55 +01002695 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002696 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002697 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002698 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002699 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002700 if (mask & ~smmu->smr_mask_mask) {
2701 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2702 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002703 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002704 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002705 }
Will Deacon03edb222015-01-19 14:27:33 +00002706
Robin Murphy06e393e2016-09-12 17:13:55 +01002707 ret = -ENOMEM;
2708 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2709 GFP_KERNEL);
2710 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002711 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002712
2713 cfg->smmu = smmu;
2714 fwspec->iommu_priv = cfg;
2715 while (i--)
2716 cfg->smendx[i] = INVALID_SMENDX;
2717
Robin Murphy6668f692016-09-12 17:13:54 +01002718 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002719 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002720 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002721
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002722 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002723 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002724
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002725out_pwr_off:
2726 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002727out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002728 if (fwspec)
2729 kfree(fwspec->iommu_priv);
2730 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002731 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002732}
2733
Will Deacon45ae7cf2013-06-24 18:31:25 +01002734static void arm_smmu_remove_device(struct device *dev)
2735{
Robin Murphy06e393e2016-09-12 17:13:55 +01002736 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002737 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002738
Robin Murphy06e393e2016-09-12 17:13:55 +01002739 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002740 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002741
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002742 smmu = fwspec_smmu(fwspec);
2743 if (arm_smmu_power_on(smmu->pwr)) {
2744 WARN_ON(1);
2745 return;
2746 }
2747
Robin Murphy06e393e2016-09-12 17:13:55 +01002748 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002749 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002750 kfree(fwspec->iommu_priv);
2751 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002752 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002753}
2754
Joerg Roedelaf659932015-10-21 23:51:41 +02002755static struct iommu_group *arm_smmu_device_group(struct device *dev)
2756{
Robin Murphy06e393e2016-09-12 17:13:55 +01002757 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2758 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002759 struct iommu_group *group = NULL;
2760 int i, idx;
2761
Robin Murphy06e393e2016-09-12 17:13:55 +01002762 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002763 if (group && smmu->s2crs[idx].group &&
2764 group != smmu->s2crs[idx].group)
2765 return ERR_PTR(-EINVAL);
2766
2767 group = smmu->s2crs[idx].group;
2768 }
2769
Patrick Daly03330cc2017-08-11 14:56:38 -07002770 if (!group) {
2771 if (dev_is_pci(dev))
2772 group = pci_device_group(dev);
2773 else
2774 group = generic_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002775
Patrick Daly03330cc2017-08-11 14:56:38 -07002776 if (IS_ERR(group))
2777 return NULL;
2778 }
2779
2780 if (arm_smmu_arch_device_group(dev, group)) {
2781 iommu_group_put(group);
2782 return ERR_PTR(-EINVAL);
2783 }
Joerg Roedelaf659932015-10-21 23:51:41 +02002784
Joerg Roedelaf659932015-10-21 23:51:41 +02002785 return group;
2786}
2787
Will Deaconc752ce42014-06-25 22:46:31 +01002788static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2789 enum iommu_attr attr, void *data)
2790{
Joerg Roedel1d672632015-03-26 13:43:10 +01002791 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002792 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002793
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002794 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01002795 switch (attr) {
2796 case DOMAIN_ATTR_NESTING:
2797 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002798 ret = 0;
2799 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002800 case DOMAIN_ATTR_PT_BASE_ADDR:
2801 *((phys_addr_t *)data) =
2802 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002803 ret = 0;
2804 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002805 case DOMAIN_ATTR_CONTEXT_BANK:
2806 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002807 if (smmu_domain->smmu == NULL) {
2808 ret = -ENODEV;
2809 break;
2810 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002811 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2812 ret = 0;
2813 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002814 case DOMAIN_ATTR_TTBR0: {
2815 u64 val;
2816 struct arm_smmu_device *smmu = smmu_domain->smmu;
2817 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002818 if (smmu == NULL) {
2819 ret = -ENODEV;
2820 break;
2821 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002822 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2823 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2824 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2825 << (TTBRn_ASID_SHIFT);
2826 *((u64 *)data) = val;
2827 ret = 0;
2828 break;
2829 }
2830 case DOMAIN_ATTR_CONTEXTIDR:
2831 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002832 if (smmu_domain->smmu == NULL) {
2833 ret = -ENODEV;
2834 break;
2835 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002836 *((u32 *)data) = smmu_domain->cfg.procid;
2837 ret = 0;
2838 break;
2839 case DOMAIN_ATTR_PROCID:
2840 *((u32 *)data) = smmu_domain->cfg.procid;
2841 ret = 0;
2842 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002843 case DOMAIN_ATTR_DYNAMIC:
2844 *((int *)data) = !!(smmu_domain->attributes
2845 & (1 << DOMAIN_ATTR_DYNAMIC));
2846 ret = 0;
2847 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002848 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2849 *((int *)data) = !!(smmu_domain->attributes
2850 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2851 ret = 0;
2852 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002853 case DOMAIN_ATTR_S1_BYPASS:
2854 *((int *)data) = !!(smmu_domain->attributes
2855 & (1 << DOMAIN_ATTR_S1_BYPASS));
2856 ret = 0;
2857 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002858 case DOMAIN_ATTR_SECURE_VMID:
2859 *((int *)data) = smmu_domain->secure_vmid;
2860 ret = 0;
2861 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002862 case DOMAIN_ATTR_PGTBL_INFO: {
2863 struct iommu_pgtbl_info *info = data;
2864
2865 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2866 ret = -ENODEV;
2867 break;
2868 }
2869 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2870 ret = 0;
2871 break;
2872 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002873 case DOMAIN_ATTR_FAST:
2874 *((int *)data) = !!(smmu_domain->attributes
2875 & (1 << DOMAIN_ATTR_FAST));
2876 ret = 0;
2877 break;
Patrick Daly1e279922017-09-06 15:57:45 -07002878 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
2879 *((int *)data) = !!(smmu_domain->attributes
2880 & (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
2881 ret = 0;
2882 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002883 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2884 *((int *)data) = !!(smmu_domain->attributes &
2885 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2886 ret = 0;
2887 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002888 case DOMAIN_ATTR_EARLY_MAP:
2889 *((int *)data) = !!(smmu_domain->attributes
2890 & (1 << DOMAIN_ATTR_EARLY_MAP));
2891 ret = 0;
2892 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002893 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002894 if (!smmu_domain->smmu) {
2895 ret = -ENODEV;
2896 break;
2897 }
Liam Mark53cf2342016-12-20 11:36:07 -08002898 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
2899 ret = 0;
2900 break;
2901 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
2902 *((int *)data) = !!(smmu_domain->attributes
2903 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002904 ret = 0;
2905 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05302906 case DOMAIN_ATTR_CB_STALL_DISABLE:
2907 *((int *)data) = !!(smmu_domain->attributes
2908 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
2909 ret = 0;
2910 break;
Patrick Daly83174c12017-10-26 12:31:15 -07002911 case DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN:
Patrick Daly23301482017-10-12 16:18:25 -07002912 *((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
2913 ret = 0;
2914 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002915 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002916 ret = -ENODEV;
2917 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002918 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002919 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002920 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002921}
2922
2923static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2924 enum iommu_attr attr, void *data)
2925{
Will Deacon518f7132014-11-14 17:17:54 +00002926 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002927 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002928
Will Deacon518f7132014-11-14 17:17:54 +00002929 mutex_lock(&smmu_domain->init_mutex);
2930
Will Deaconc752ce42014-06-25 22:46:31 +01002931 switch (attr) {
2932 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002933 if (smmu_domain->smmu) {
2934 ret = -EPERM;
2935 goto out_unlock;
2936 }
2937
Will Deaconc752ce42014-06-25 22:46:31 +01002938 if (*(int *)data)
2939 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2940 else
2941 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2942
Will Deacon518f7132014-11-14 17:17:54 +00002943 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002944 case DOMAIN_ATTR_PROCID:
2945 if (smmu_domain->smmu != NULL) {
2946 dev_err(smmu_domain->smmu->dev,
2947 "cannot change procid attribute while attached\n");
2948 ret = -EBUSY;
2949 break;
2950 }
2951 smmu_domain->cfg.procid = *((u32 *)data);
2952 ret = 0;
2953 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002954 case DOMAIN_ATTR_DYNAMIC: {
2955 int dynamic = *((int *)data);
2956
2957 if (smmu_domain->smmu != NULL) {
2958 dev_err(smmu_domain->smmu->dev,
2959 "cannot change dynamic attribute while attached\n");
2960 ret = -EBUSY;
2961 break;
2962 }
2963
2964 if (dynamic)
2965 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2966 else
2967 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2968 ret = 0;
2969 break;
2970 }
2971 case DOMAIN_ATTR_CONTEXT_BANK:
2972 /* context bank can't be set while attached */
2973 if (smmu_domain->smmu != NULL) {
2974 ret = -EBUSY;
2975 break;
2976 }
2977 /* ... and it can only be set for dynamic contexts. */
2978 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2979 ret = -EINVAL;
2980 break;
2981 }
2982
2983 /* this will be validated during attach */
2984 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2985 ret = 0;
2986 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002987 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2988 u32 non_fatal_faults = *((int *)data);
2989
2990 if (non_fatal_faults)
2991 smmu_domain->attributes |=
2992 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2993 else
2994 smmu_domain->attributes &=
2995 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2996 ret = 0;
2997 break;
2998 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002999 case DOMAIN_ATTR_S1_BYPASS: {
3000 int bypass = *((int *)data);
3001
3002 /* bypass can't be changed while attached */
3003 if (smmu_domain->smmu != NULL) {
3004 ret = -EBUSY;
3005 break;
3006 }
3007 if (bypass)
3008 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
3009 else
3010 smmu_domain->attributes &=
3011 ~(1 << DOMAIN_ATTR_S1_BYPASS);
3012
3013 ret = 0;
3014 break;
3015 }
Patrick Daly8befb662016-08-17 20:03:28 -07003016 case DOMAIN_ATTR_ATOMIC:
3017 {
3018 int atomic_ctx = *((int *)data);
3019
3020 /* can't be changed while attached */
3021 if (smmu_domain->smmu != NULL) {
3022 ret = -EBUSY;
3023 break;
3024 }
3025 if (atomic_ctx)
3026 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
3027 else
3028 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
3029 break;
3030 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07003031 case DOMAIN_ATTR_SECURE_VMID:
3032 if (smmu_domain->secure_vmid != VMID_INVAL) {
3033 ret = -ENODEV;
3034 WARN(1, "secure vmid already set!");
3035 break;
3036 }
3037 smmu_domain->secure_vmid = *((int *)data);
3038 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003039 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3040 if (*((int *)data))
3041 smmu_domain->attributes |=
3042 1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
3043 ret = 0;
3044 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003045 /*
3046 * fast_smmu_unmap_page() and fast_smmu_alloc_iova() both
3047 * expect that the bus/clock/regulator are already on. Thus also
3048 * force DOMAIN_ATTR_ATOMIC to bet set.
3049 */
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003050 case DOMAIN_ATTR_FAST:
Patrick Daly0df84ac2017-10-11 17:32:41 -07003051 {
3052 int fast = *((int *)data);
3053
3054 if (fast) {
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003055 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003056 smmu_domain->attributes |= 1 << DOMAIN_ATTR_ATOMIC;
3057 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003058 ret = 0;
3059 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003060 }
Patrick Dalyce6786f2016-11-09 14:19:23 -08003061 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3062 /* can't be changed while attached */
3063 if (smmu_domain->smmu != NULL) {
3064 ret = -EBUSY;
3065 break;
3066 }
3067 if (*((int *)data))
3068 smmu_domain->attributes |=
3069 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
3070 ret = 0;
3071 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003072 case DOMAIN_ATTR_EARLY_MAP: {
3073 int early_map = *((int *)data);
3074
3075 ret = 0;
3076 if (early_map) {
3077 smmu_domain->attributes |=
3078 1 << DOMAIN_ATTR_EARLY_MAP;
3079 } else {
3080 if (smmu_domain->smmu)
3081 ret = arm_smmu_enable_s1_translations(
3082 smmu_domain);
3083
3084 if (!ret)
3085 smmu_domain->attributes &=
3086 ~(1 << DOMAIN_ATTR_EARLY_MAP);
3087 }
3088 break;
3089 }
Liam Mark53cf2342016-12-20 11:36:07 -08003090 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
3091 int force_coherent = *((int *)data);
3092
3093 if (smmu_domain->smmu != NULL) {
3094 dev_err(smmu_domain->smmu->dev,
3095 "cannot change force coherent attribute while attached\n");
3096 ret = -EBUSY;
3097 break;
3098 }
3099
3100 if (force_coherent)
3101 smmu_domain->attributes |=
3102 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
3103 else
3104 smmu_domain->attributes &=
3105 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
3106
3107 ret = 0;
3108 break;
3109 }
3110
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303111 case DOMAIN_ATTR_CB_STALL_DISABLE:
3112 if (*((int *)data))
3113 smmu_domain->attributes |=
3114 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
3115 ret = 0;
3116 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003117 default:
Will Deacon518f7132014-11-14 17:17:54 +00003118 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01003119 }
Will Deacon518f7132014-11-14 17:17:54 +00003120
3121out_unlock:
3122 mutex_unlock(&smmu_domain->init_mutex);
3123 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003124}
3125
Robin Murphy7e96c742016-09-14 15:26:46 +01003126static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
3127{
3128 u32 fwid = 0;
3129
3130 if (args->args_count > 0)
3131 fwid |= (u16)args->args[0];
3132
3133 if (args->args_count > 1)
3134 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
3135
3136 return iommu_fwspec_add_ids(dev, &fwid, 1);
3137}
3138
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003139static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
3140{
3141 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3142 struct arm_smmu_device *smmu = smmu_domain->smmu;
3143 void __iomem *cb_base;
3144 u32 reg;
3145 int ret;
3146
3147 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3148 ret = arm_smmu_power_on(smmu->pwr);
3149 if (ret)
3150 return ret;
3151
3152 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3153 reg |= SCTLR_M;
3154
3155 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
3156 arm_smmu_power_off(smmu->pwr);
3157 return ret;
3158}
3159
Liam Mark3ba41cf2016-12-09 14:39:04 -08003160static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
3161 dma_addr_t iova)
3162{
3163 bool ret;
3164 unsigned long flags;
3165 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3166 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3167
3168 if (!ops)
3169 return false;
3170
3171 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3172 ret = ops->is_iova_coherent(ops, iova);
3173 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3174 return ret;
3175}
3176
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003177static void arm_smmu_trigger_fault(struct iommu_domain *domain,
3178 unsigned long flags)
3179{
3180 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3181 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3182 struct arm_smmu_device *smmu;
3183 void __iomem *cb_base;
3184
3185 if (!smmu_domain->smmu) {
3186 pr_err("Can't trigger faults on non-attached domains\n");
3187 return;
3188 }
3189
3190 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003191 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003192 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003193
3194 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3195 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
3196 flags, cfg->cbndx);
3197 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07003198 /* give the interrupt time to fire... */
3199 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003200
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003201 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003202}
3203
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003204static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3205{
Patrick Dalyda765c62017-09-11 16:31:07 -07003206 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3207 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
3208
3209 tlb->tlb_flush_all(smmu_domain);
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003210}
3211
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003212static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3213{
3214 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3215
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003216 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003217}
3218
3219static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3220{
3221 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3222
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003223 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003224}
3225
Will Deacon518f7132014-11-14 17:17:54 +00003226static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003227 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003228 .domain_alloc = arm_smmu_domain_alloc,
3229 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003230 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003231 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003232 .map = arm_smmu_map,
3233 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003234 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003235 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003236 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003237 .add_device = arm_smmu_add_device,
3238 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003239 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003240 .domain_get_attr = arm_smmu_domain_get_attr,
3241 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003242 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003243 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003244 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003245 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003246 .enable_config_clocks = arm_smmu_enable_config_clocks,
3247 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003248 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003249 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003250};
3251
Patrick Dalyad441dd2016-09-15 15:50:46 -07003252#define IMPL_DEF1_MICRO_MMU_CTRL 0
3253#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3254#define MICRO_MMU_CTRL_IDLE (1 << 3)
3255
3256/* Definitions for implementation-defined registers */
3257#define ACTLR_QCOM_OSH_SHIFT 28
3258#define ACTLR_QCOM_OSH 1
3259
3260#define ACTLR_QCOM_ISH_SHIFT 29
3261#define ACTLR_QCOM_ISH 1
3262
3263#define ACTLR_QCOM_NSH_SHIFT 30
3264#define ACTLR_QCOM_NSH 1
3265
3266static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003267{
3268 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003269 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003270
3271 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3272 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3273 0, 30000)) {
3274 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3275 return -EBUSY;
3276 }
3277
3278 return 0;
3279}
3280
Patrick Dalyad441dd2016-09-15 15:50:46 -07003281static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003282{
3283 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3284 u32 reg;
3285
3286 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3287 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3288 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3289
Patrick Dalyad441dd2016-09-15 15:50:46 -07003290 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003291}
3292
Patrick Dalyad441dd2016-09-15 15:50:46 -07003293static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003294{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003295 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003296}
3297
Patrick Dalyad441dd2016-09-15 15:50:46 -07003298static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003299{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003300 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003301}
3302
Patrick Dalyad441dd2016-09-15 15:50:46 -07003303static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003304{
3305 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3306 u32 reg;
3307
3308 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3309 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3310 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3311}
3312
Patrick Dalyad441dd2016-09-15 15:50:46 -07003313static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003314{
3315 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003316 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003317 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003318 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003319
Patrick Dalyad441dd2016-09-15 15:50:46 -07003320 /*
3321 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3322 * to prevent table walks with an inconsistent state.
3323 */
3324 for (i = 0; i < smmu->num_context_banks; ++i) {
3325 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3326 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3327 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3328 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3329 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3330 }
3331
3332 /* Program implementation defined registers */
3333 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003334 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3335 writel_relaxed(regs[i].value,
3336 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003337 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003338}
3339
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003340static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3341 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003342{
3343 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3344 struct arm_smmu_device *smmu = smmu_domain->smmu;
3345 int ret;
3346 phys_addr_t phys = 0;
3347 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003348 u32 sctlr, sctlr_orig, fsr;
3349 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003350
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003351 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003352 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003353 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003354
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003355 spin_lock_irqsave(&smmu->atos_lock, flags);
3356 cb_base = ARM_SMMU_CB_BASE(smmu) +
3357 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003358
3359 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003360 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003361 qsmmuv2_wait_for_halt(smmu);
3362
3363 /* clear FSR to allow ATOS to log any faults */
3364 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3365 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3366
3367 /* disable stall mode momentarily */
3368 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3369 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3370 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3371
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003372 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003373
3374 /* restore SCTLR */
3375 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3376
3377 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003378 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3379
3380 arm_smmu_power_off(smmu_domain->smmu->pwr);
3381 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003382}
3383
3384struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3385 .device_reset = qsmmuv2_device_reset,
3386 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003387};
3388
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003389static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003390{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003391 int i;
3392 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003393 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003394 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003395
Peng Fan3ca37122016-05-03 21:50:30 +08003396 /*
3397 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3398 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3399 * bit is only present in MMU-500r2 onwards.
3400 */
3401 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3402 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3403 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3404 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3405 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3406 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3407 }
3408
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003409 /* Make sure all context banks are disabled and clear CB_FSR */
3410 for (i = 0; i < smmu->num_context_banks; ++i) {
3411 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3412 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3413 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003414 /*
3415 * Disable MMU-500's not-particularly-beneficial next-page
3416 * prefetcher for the sake of errata #841119 and #826419.
3417 */
3418 if (smmu->model == ARM_MMU500) {
3419 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3420 reg &= ~ARM_MMU500_ACTLR_CPRE;
3421 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3422 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003423 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003424}
3425
3426static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3427{
3428 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003429 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003430 u32 reg;
3431
3432 /* clear global FSR */
3433 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3434 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3435
Robin Murphy468f4942016-09-12 17:13:49 +01003436 /*
3437 * Reset stream mapping groups: Initial values mark all SMRn as
3438 * invalid and all S2CRn as bypass unless overridden.
3439 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003440 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3441 for (i = 0; i < smmu->num_mapping_groups; ++i)
3442 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003443
Patrick Daly59b6d202017-06-12 13:12:15 -07003444 arm_smmu_context_bank_reset(smmu);
3445 }
Will Deacon1463fe42013-07-31 19:21:27 +01003446
Will Deacon45ae7cf2013-06-24 18:31:25 +01003447 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003448 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3449 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3450
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003451 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003452
Will Deacon45ae7cf2013-06-24 18:31:25 +01003453 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003454 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003455
3456 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003457 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003458
Robin Murphy25a1c962016-02-10 14:25:33 +00003459 /* Enable client access, handling unmatched streams as appropriate */
3460 reg &= ~sCR0_CLIENTPD;
3461 if (disable_bypass)
3462 reg |= sCR0_USFCFG;
3463 else
3464 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003465
3466 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003467 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003468
3469 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003470 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003471
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003472 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3473 reg |= sCR0_VMID16EN;
3474
Patrick Daly7f377fe2017-10-06 17:37:10 -07003475 /* Force bypass transaction to be Non-Shareable & not io-coherent */
3476 reg &= ~(sCR0_SHCFG_MASK << sCR0_SHCFG_SHIFT);
Prakash Gupta673a79f2017-11-16 18:07:00 +05303477 reg |= sCR0_SHCFG_NSH << sCR0_SHCFG_SHIFT;
Patrick Daly7f377fe2017-10-06 17:37:10 -07003478
Will Deacon45ae7cf2013-06-24 18:31:25 +01003479 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003480 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003481 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003482
3483 /* Manage any implementation defined features */
3484 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003485}
3486
3487static int arm_smmu_id_size_to_bits(int size)
3488{
3489 switch (size) {
3490 case 0:
3491 return 32;
3492 case 1:
3493 return 36;
3494 case 2:
3495 return 40;
3496 case 3:
3497 return 42;
3498 case 4:
3499 return 44;
3500 case 5:
3501 default:
3502 return 48;
3503 }
3504}
3505
Patrick Dalyda688822017-05-17 20:12:48 -07003506
3507/*
3508 * Some context banks needs to be transferred from bootloader to HLOS in a way
3509 * that allows ongoing traffic. The current expectation is that these context
3510 * banks operate in bypass mode.
3511 * Additionally, there must be exactly one device in devicetree with stream-ids
3512 * overlapping those used by the bootloader.
3513 */
3514static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3515 struct arm_smmu_device *smmu,
3516 struct device *dev)
3517{
3518 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003519 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003520 u32 i, idx;
3521 int cb = -EINVAL;
3522 bool dynamic;
3523
Patrick Dalye72526b2017-07-18 16:21:44 -07003524 /*
3525 * Dynamic domains have already set cbndx through domain attribute.
3526 * Verify that they picked a valid value.
3527 */
Patrick Dalyda688822017-05-17 20:12:48 -07003528 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07003529 if (dynamic) {
3530 cb = smmu_domain->cfg.cbndx;
3531 if (cb < smmu->num_context_banks)
3532 return cb;
3533 else
3534 return -EINVAL;
3535 }
Patrick Dalyda688822017-05-17 20:12:48 -07003536
3537 mutex_lock(&smmu->stream_map_mutex);
3538 for_each_cfg_sme(fwspec, i, idx) {
3539 if (smmu->s2crs[idx].cb_handoff)
3540 cb = smmu->s2crs[idx].cbndx;
3541 }
3542
3543 if (cb < 0) {
3544 mutex_unlock(&smmu->stream_map_mutex);
3545 return __arm_smmu_alloc_bitmap(smmu->context_map,
3546 smmu->num_s2_context_banks,
3547 smmu->num_context_banks);
3548 }
3549
3550 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07003551 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Patrick Dalyda688822017-05-17 20:12:48 -07003552 smmu->s2crs[i].cb_handoff = false;
3553 smmu->s2crs[i].count -= 1;
3554 }
3555 }
3556 mutex_unlock(&smmu->stream_map_mutex);
3557
3558 return cb;
3559}
3560
3561static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
3562{
3563 u32 i, raw_smr, raw_s2cr;
3564 struct arm_smmu_smr smr;
3565 struct arm_smmu_s2cr s2cr;
3566
3567 for (i = 0; i < smmu->num_mapping_groups; i++) {
3568 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3569 ARM_SMMU_GR0_SMR(i));
3570 if (!(raw_smr & SMR_VALID))
3571 continue;
3572
3573 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3574 smr.id = (u16)raw_smr;
3575 smr.valid = true;
3576
3577 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3578 ARM_SMMU_GR0_S2CR(i));
Patrick Daly4ea4bdf2017-08-29 19:24:49 -07003579 memset(&s2cr, 0, sizeof(s2cr));
Patrick Dalyda688822017-05-17 20:12:48 -07003580 s2cr.group = NULL;
3581 s2cr.count = 1;
3582 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
3583 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
3584 S2CR_PRIVCFG_MASK;
3585 s2cr.cbndx = (u8)raw_s2cr;
3586 s2cr.cb_handoff = true;
3587
3588 if (s2cr.type != S2CR_TYPE_TRANS)
3589 continue;
3590
3591 smmu->smrs[i] = smr;
3592 smmu->s2crs[i] = s2cr;
3593 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
3594 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
3595 raw_smr, raw_s2cr, s2cr.cbndx);
3596 }
3597
3598 return 0;
3599}
3600
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003601static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3602{
3603 struct device *dev = smmu->dev;
3604 int i, ntuples, ret;
3605 u32 *tuples;
3606 struct arm_smmu_impl_def_reg *regs, *regit;
3607
3608 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3609 return 0;
3610
3611 ntuples /= sizeof(u32);
3612 if (ntuples % 2) {
3613 dev_err(dev,
3614 "Invalid number of attach-impl-defs registers: %d\n",
3615 ntuples);
3616 return -EINVAL;
3617 }
3618
3619 regs = devm_kmalloc(
3620 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3621 GFP_KERNEL);
3622 if (!regs)
3623 return -ENOMEM;
3624
3625 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3626 if (!tuples)
3627 return -ENOMEM;
3628
3629 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3630 tuples, ntuples);
3631 if (ret)
3632 return ret;
3633
3634 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3635 regit->offset = tuples[i];
3636 regit->value = tuples[i + 1];
3637 }
3638
3639 devm_kfree(dev, tuples);
3640
3641 smmu->impl_def_attach_registers = regs;
3642 smmu->num_impl_def_attach_registers = ntuples / 2;
3643
3644 return 0;
3645}
3646
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003647
3648static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003649{
3650 const char *cname;
3651 struct property *prop;
3652 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003653 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003654
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003655 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003656 of_property_count_strings(dev->of_node, "clock-names");
3657
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003658 if (pwr->num_clocks < 1) {
3659 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003660 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003661 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003662
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003663 pwr->clocks = devm_kzalloc(
3664 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003665 GFP_KERNEL);
3666
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003667 if (!pwr->clocks)
3668 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003669
3670 i = 0;
3671 of_property_for_each_string(dev->of_node, "clock-names",
3672 prop, cname) {
3673 struct clk *c = devm_clk_get(dev, cname);
3674
3675 if (IS_ERR(c)) {
3676 dev_err(dev, "Couldn't get clock: %s",
3677 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003678 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003679 }
3680
3681 if (clk_get_rate(c) == 0) {
3682 long rate = clk_round_rate(c, 1000);
3683
3684 clk_set_rate(c, rate);
3685 }
3686
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003687 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003688
3689 ++i;
3690 }
3691 return 0;
3692}
3693
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003694static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003695{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003696 const char *cname;
3697 struct property *prop;
3698 int i, ret = 0;
3699 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003700
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003701 pwr->num_gdscs =
3702 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3703
3704 if (pwr->num_gdscs < 1) {
3705 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003706 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003707 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003708
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003709 pwr->gdscs = devm_kzalloc(
3710 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3711
3712 if (!pwr->gdscs)
3713 return -ENOMEM;
3714
Prakash Guptafad87ca2017-05-16 12:13:02 +05303715 if (!of_property_read_u32(dev->of_node,
3716 "qcom,deferred-regulator-disable-delay",
3717 &(pwr->regulator_defer)))
3718 dev_info(dev, "regulator defer delay %d\n",
3719 pwr->regulator_defer);
3720
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003721 i = 0;
3722 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3723 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07003724 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003725
3726 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3727 return ret;
3728}
3729
3730static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3731{
3732 struct device *dev = pwr->dev;
3733
3734 /* We don't want the bus APIs to print an error message */
3735 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3736 dev_dbg(dev, "No bus scaling info\n");
3737 return 0;
3738 }
3739
3740 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3741 if (!pwr->bus_dt_data) {
3742 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3743 return -EINVAL;
3744 }
3745
3746 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3747 if (!pwr->bus_client) {
3748 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003749 return -EINVAL;
3750 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003751
3752 return 0;
3753}
3754
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003755/*
3756 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3757 */
3758static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3759 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003760{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003761 struct arm_smmu_power_resources *pwr;
3762 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003763
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003764 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3765 if (!pwr)
3766 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003767
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003768 pwr->dev = &pdev->dev;
3769 pwr->pdev = pdev;
3770 mutex_init(&pwr->power_lock);
3771 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003772
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003773 ret = arm_smmu_init_clocks(pwr);
3774 if (ret)
3775 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003776
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003777 ret = arm_smmu_init_regulators(pwr);
3778 if (ret)
3779 return ERR_PTR(ret);
3780
3781 ret = arm_smmu_init_bus_scaling(pwr);
3782 if (ret)
3783 return ERR_PTR(ret);
3784
3785 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003786}
3787
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003788/*
Patrick Dalyabeee952017-04-13 18:14:59 -07003789 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003790 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003791static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003792{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003793 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003794}
3795
Will Deacon45ae7cf2013-06-24 18:31:25 +01003796static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3797{
3798 unsigned long size;
3799 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3800 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003801 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003802 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003803
Mitchel Humpherysba822582015-10-20 11:37:41 -07003804 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3805 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003806 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003807
3808 /* ID0 */
3809 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003810
3811 /* Restrict available stages based on module parameter */
3812 if (force_stage == 1)
3813 id &= ~(ID0_S2TS | ID0_NTS);
3814 else if (force_stage == 2)
3815 id &= ~(ID0_S1TS | ID0_NTS);
3816
Will Deacon45ae7cf2013-06-24 18:31:25 +01003817 if (id & ID0_S1TS) {
3818 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003819 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003820 }
3821
3822 if (id & ID0_S2TS) {
3823 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003824 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003825 }
3826
3827 if (id & ID0_NTS) {
3828 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003829 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003830 }
3831
3832 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003833 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003834 dev_err(smmu->dev, "\tno translation support!\n");
3835 return -ENODEV;
3836 }
3837
Robin Murphyb7862e32016-04-13 18:13:03 +01003838 if ((id & ID0_S1TS) &&
3839 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003840 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003841 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003842 }
3843
Robin Murphybae2c2d2015-07-29 19:46:05 +01003844 /*
3845 * In order for DMA API calls to work properly, we must defer to what
3846 * the DT says about coherency, regardless of what the hardware claims.
3847 * Fortunately, this also opens up a workaround for systems where the
3848 * ID register value has ended up configured incorrectly.
3849 */
3850 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3851 cttw_reg = !!(id & ID0_CTTW);
3852 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003853 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003854 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003855 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003856 cttw_dt ? "" : "non-");
3857 if (cttw_dt != cttw_reg)
3858 dev_notice(smmu->dev,
3859 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003860
Robin Murphy53867802016-09-12 17:13:48 +01003861 /* Max. number of entries we have for stream matching/indexing */
3862 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3863 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003864 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003865 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08003866 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003867
3868 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003869 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3870 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003871 dev_err(smmu->dev,
3872 "stream-matching supported, but no SMRs present!\n");
3873 return -ENODEV;
3874 }
3875
Robin Murphy53867802016-09-12 17:13:48 +01003876 /*
3877 * SMR.ID bits may not be preserved if the corresponding MASK
3878 * bits are set, so check each one separately. We can reject
3879 * masters later if they try to claim IDs outside these masks.
3880 */
Patrick Daly937de532016-12-12 18:44:09 -08003881 for (i = 0; i < size; i++) {
3882 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
3883 if (!(smr & SMR_VALID))
3884 break;
3885 }
3886 if (i == size) {
3887 dev_err(smmu->dev,
3888 "Unable to compute streamid_masks\n");
3889 return -ENODEV;
3890 }
3891
Robin Murphy53867802016-09-12 17:13:48 +01003892 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003893 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3894 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003895 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003896
Robin Murphy53867802016-09-12 17:13:48 +01003897 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003898 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3899 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003900 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003901
Robin Murphy468f4942016-09-12 17:13:49 +01003902 /* Zero-initialised to mark as invalid */
3903 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3904 GFP_KERNEL);
3905 if (!smmu->smrs)
3906 return -ENOMEM;
3907
Robin Murphy53867802016-09-12 17:13:48 +01003908 dev_notice(smmu->dev,
3909 "\tstream matching with %lu register groups, mask 0x%x",
3910 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003911 }
Robin Murphya754fd12016-09-12 17:13:50 +01003912 /* s2cr->type == 0 means translation, so initialise explicitly */
3913 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3914 GFP_KERNEL);
3915 if (!smmu->s2crs)
3916 return -ENOMEM;
3917 for (i = 0; i < size; i++)
3918 smmu->s2crs[i] = s2cr_init_val;
3919
Robin Murphy53867802016-09-12 17:13:48 +01003920 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003921 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003922
Robin Murphy7602b872016-04-28 17:12:09 +01003923 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3924 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3925 if (!(id & ID0_PTFS_NO_AARCH32S))
3926 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3927 }
3928
Will Deacon45ae7cf2013-06-24 18:31:25 +01003929 /* ID1 */
3930 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003931 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003932
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003933 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003934 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003935 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003936 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003937 dev_warn(smmu->dev,
3938 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3939 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003940
Will Deacon518f7132014-11-14 17:17:54 +00003941 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003942 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3943 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3944 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3945 return -ENODEV;
3946 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003947 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003948 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003949 /*
3950 * Cavium CN88xx erratum #27704.
3951 * Ensure ASID and VMID allocation is unique across all SMMUs in
3952 * the system.
3953 */
3954 if (smmu->model == CAVIUM_SMMUV2) {
3955 smmu->cavium_id_base =
3956 atomic_add_return(smmu->num_context_banks,
3957 &cavium_smmu_context_count);
3958 smmu->cavium_id_base -= smmu->num_context_banks;
3959 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003960
3961 /* ID2 */
3962 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3963 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003964 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003965
Will Deacon518f7132014-11-14 17:17:54 +00003966 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003967 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003968 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003969
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003970 if (id & ID2_VMID16)
3971 smmu->features |= ARM_SMMU_FEAT_VMID16;
3972
Robin Murphyf1d84542015-03-04 16:41:05 +00003973 /*
3974 * What the page table walker can address actually depends on which
3975 * descriptor format is in use, but since a) we don't know that yet,
3976 * and b) it can vary per context bank, this will have to do...
3977 */
3978 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3979 dev_warn(smmu->dev,
3980 "failed to set DMA mask for table walker\n");
3981
Robin Murphyb7862e32016-04-13 18:13:03 +01003982 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003983 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003984 if (smmu->version == ARM_SMMU_V1_64K)
3985 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003986 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003987 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003988 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003989 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003990 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003991 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003992 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003993 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003994 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003995 }
3996
Robin Murphy7602b872016-04-28 17:12:09 +01003997 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003998 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003999 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01004000 if (smmu->features &
4001 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01004002 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01004003 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01004004 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01004005 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01004006 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01004007
Robin Murphyd5466352016-05-09 17:20:09 +01004008 if (arm_smmu_ops.pgsize_bitmap == -1UL)
4009 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
4010 else
4011 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004012 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01004013 smmu->pgsize_bitmap);
4014
Will Deacon518f7132014-11-14 17:17:54 +00004015
Will Deacon28d60072014-09-01 16:24:48 +01004016 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004017 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
4018 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004019
4020 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004021 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
4022 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004023
Will Deacon45ae7cf2013-06-24 18:31:25 +01004024 return 0;
4025}
4026
Robin Murphy67b65a32016-04-13 18:12:57 +01004027struct arm_smmu_match_data {
4028 enum arm_smmu_arch_version version;
4029 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004030 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01004031};
4032
Patrick Dalyd7476202016-09-08 18:23:28 -07004033#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
4034static struct arm_smmu_match_data name = { \
4035.version = ver, \
4036.model = imp, \
4037.arch_ops = ops, \
4038} \
Robin Murphy67b65a32016-04-13 18:12:57 +01004039
Patrick Daly1f8a2882016-09-12 17:32:05 -07004040struct arm_smmu_arch_ops qsmmuv500_arch_ops;
4041
Patrick Dalyd7476202016-09-08 18:23:28 -07004042ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
4043ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
4044ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
4045ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
4046ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07004047ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004048ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
4049 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01004050
Joerg Roedel09b52692014-10-02 12:24:45 +02004051static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01004052 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
4053 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
4054 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01004055 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01004056 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01004057 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07004058 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07004059 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01004060 { },
4061};
4062MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
4063
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004064
4065static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
4066{
4067 if (!dev->iommu_fwspec)
4068 of_iommu_configure(dev, dev->of_node);
4069 return 0;
4070}
4071
Patrick Daly000a2f22017-02-13 22:18:12 -08004072static int arm_smmu_add_device_fixup(struct device *dev, void *data)
4073{
4074 struct iommu_ops *ops = data;
4075
4076 ops->add_device(dev);
4077 return 0;
4078}
4079
Patrick Daly1f8a2882016-09-12 17:32:05 -07004080static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004081static int arm_smmu_device_dt_probe(struct platform_device *pdev)
4082{
Robin Murphy67b65a32016-04-13 18:12:57 +01004083 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004084 struct resource *res;
4085 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004086 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01004087 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01004088 bool legacy_binding;
4089
4090 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
4091 if (legacy_binding && !using_generic_binding) {
4092 if (!using_legacy_binding)
4093 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
4094 using_legacy_binding = true;
4095 } else if (!legacy_binding && !using_legacy_binding) {
4096 using_generic_binding = true;
4097 } else {
4098 dev_err(dev, "not probing due to mismatched DT properties\n");
4099 return -ENODEV;
4100 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004101
4102 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4103 if (!smmu) {
4104 dev_err(dev, "failed to allocate arm_smmu_device\n");
4105 return -ENOMEM;
4106 }
4107 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08004108 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07004109 idr_init(&smmu->asid_idr);
4110 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004111
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004112 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01004113 smmu->version = data->version;
4114 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004115 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01004116
Will Deacon45ae7cf2013-06-24 18:31:25 +01004117 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01004118 smmu->base = devm_ioremap_resource(dev, res);
4119 if (IS_ERR(smmu->base))
4120 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004121 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004122
4123 if (of_property_read_u32(dev->of_node, "#global-interrupts",
4124 &smmu->num_global_irqs)) {
4125 dev_err(dev, "missing #global-interrupts property\n");
4126 return -ENODEV;
4127 }
4128
4129 num_irqs = 0;
4130 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
4131 num_irqs++;
4132 if (num_irqs > smmu->num_global_irqs)
4133 smmu->num_context_irqs++;
4134 }
4135
Andreas Herrmann44a08de2013-10-01 13:39:07 +01004136 if (!smmu->num_context_irqs) {
4137 dev_err(dev, "found %d interrupts but expected at least %d\n",
4138 num_irqs, smmu->num_global_irqs + 1);
4139 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004140 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004141
4142 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
4143 GFP_KERNEL);
4144 if (!smmu->irqs) {
4145 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
4146 return -ENOMEM;
4147 }
4148
4149 for (i = 0; i < num_irqs; ++i) {
4150 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07004151
Will Deacon45ae7cf2013-06-24 18:31:25 +01004152 if (irq < 0) {
4153 dev_err(dev, "failed to get irq index %d\n", i);
4154 return -ENODEV;
4155 }
4156 smmu->irqs[i] = irq;
4157 }
4158
Dhaval Patel031d7462015-05-09 14:47:29 -07004159 parse_driver_options(smmu);
4160
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004161 smmu->pwr = arm_smmu_init_power_resources(pdev);
4162 if (IS_ERR(smmu->pwr))
4163 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004164
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004165 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004166 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004167 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004168
4169 err = arm_smmu_device_cfg_probe(smmu);
4170 if (err)
4171 goto out_power_off;
4172
Patrick Dalyda688822017-05-17 20:12:48 -07004173 err = arm_smmu_handoff_cbs(smmu);
4174 if (err)
4175 goto out_power_off;
4176
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004177 err = arm_smmu_parse_impl_def_registers(smmu);
4178 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004179 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004180
Robin Murphyb7862e32016-04-13 18:13:03 +01004181 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004182 smmu->num_context_banks != smmu->num_context_irqs) {
4183 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004184 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4185 smmu->num_context_irqs, smmu->num_context_banks,
4186 smmu->num_context_banks);
4187 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004188 }
4189
Will Deacon45ae7cf2013-06-24 18:31:25 +01004190 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004191 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4192 NULL, arm_smmu_global_fault,
4193 IRQF_ONESHOT | IRQF_SHARED,
4194 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004195 if (err) {
4196 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4197 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004198 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004199 }
4200 }
4201
Patrick Dalyd7476202016-09-08 18:23:28 -07004202 err = arm_smmu_arch_init(smmu);
4203 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004204 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004205
Robin Murphy06e393e2016-09-12 17:13:55 +01004206 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004207 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004208 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004209 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004210
Patrick Daly8e3371a2017-02-13 22:14:53 -08004211 INIT_LIST_HEAD(&smmu->list);
4212 spin_lock(&arm_smmu_devices_lock);
4213 list_add(&smmu->list, &arm_smmu_devices);
4214 spin_unlock(&arm_smmu_devices_lock);
4215
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004216 /* bus_set_iommu depends on this. */
4217 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4218 arm_smmu_of_iommu_configure_fixup);
4219
Robin Murphy7e96c742016-09-14 15:26:46 +01004220 /* Oh, for a proper bus abstraction */
4221 if (!iommu_present(&platform_bus_type))
4222 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004223 else
4224 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4225 arm_smmu_add_device_fixup);
Robin Murphy7e96c742016-09-14 15:26:46 +01004226#ifdef CONFIG_ARM_AMBA
4227 if (!iommu_present(&amba_bustype))
4228 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4229#endif
4230#ifdef CONFIG_PCI
4231 if (!iommu_present(&pci_bus_type)) {
4232 pci_request_acs();
4233 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4234 }
4235#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004236 return 0;
4237
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004238out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004239 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004240
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004241out_exit_power_resources:
4242 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004243
Will Deacon45ae7cf2013-06-24 18:31:25 +01004244 return err;
4245}
4246
4247static int arm_smmu_device_remove(struct platform_device *pdev)
4248{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004249 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004250
4251 if (!smmu)
4252 return -ENODEV;
4253
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004254 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004255 return -EINVAL;
4256
Will Deaconecfadb62013-07-31 19:21:28 +01004257 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004258 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004259
Patrick Dalyc190d932016-08-30 17:23:28 -07004260 idr_destroy(&smmu->asid_idr);
4261
Will Deacon45ae7cf2013-06-24 18:31:25 +01004262 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004263 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004264 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004265
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004266 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004267
Will Deacon45ae7cf2013-06-24 18:31:25 +01004268 return 0;
4269}
4270
Will Deacon45ae7cf2013-06-24 18:31:25 +01004271static struct platform_driver arm_smmu_driver = {
4272 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004273 .name = "arm-smmu",
4274 .of_match_table = of_match_ptr(arm_smmu_of_match),
4275 },
4276 .probe = arm_smmu_device_dt_probe,
4277 .remove = arm_smmu_device_remove,
4278};
4279
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004280static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004281static int __init arm_smmu_init(void)
4282{
Robin Murphy7e96c742016-09-14 15:26:46 +01004283 static bool registered;
4284 int ret = 0;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004285 ktime_t cur;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004286
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004287 if (registered)
4288 return 0;
4289
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004290 cur = ktime_get();
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004291 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4292 if (ret)
4293 return ret;
4294
4295 ret = platform_driver_register(&arm_smmu_driver);
4296 registered = !ret;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004297 trace_smmu_init(ktime_us_delta(ktime_get(), cur));
4298
Robin Murphy7e96c742016-09-14 15:26:46 +01004299 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004300}
4301
4302static void __exit arm_smmu_exit(void)
4303{
4304 return platform_driver_unregister(&arm_smmu_driver);
4305}
4306
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004307subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004308module_exit(arm_smmu_exit);
4309
Robin Murphy7e96c742016-09-14 15:26:46 +01004310static int __init arm_smmu_of_init(struct device_node *np)
4311{
4312 int ret = arm_smmu_init();
4313
4314 if (ret)
4315 return ret;
4316
4317 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4318 return -ENODEV;
4319
4320 return 0;
4321}
4322IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4323IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4324IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4325IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4326IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4327IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004328
Patrick Dalya0fddb62017-03-27 19:26:59 -07004329#define TCU_HW_VERSION_HLOS1 (0x18)
4330
Patrick Daly1f8a2882016-09-12 17:32:05 -07004331#define DEBUG_SID_HALT_REG 0x0
4332#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004333#define DEBUG_SID_HALT_SID_MASK 0x3ff
4334
4335#define DEBUG_VA_ADDR_REG 0x8
4336
4337#define DEBUG_TXN_TRIGG_REG 0x18
4338#define DEBUG_TXN_AXPROT_SHIFT 6
4339#define DEBUG_TXN_AXCACHE_SHIFT 2
4340#define DEBUG_TRX_WRITE (0x1 << 1)
4341#define DEBUG_TXN_READ (0x0 << 1)
4342#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004343
4344#define DEBUG_SR_HALT_ACK_REG 0x20
4345#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004346#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4347
4348#define DEBUG_PAR_REG 0x28
4349#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4350#define DEBUG_PAR_PA_SHIFT 12
4351#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004352
Patrick Daly8c1202b2017-05-10 15:42:30 -07004353#define TBU_DBG_TIMEOUT_US 100
Patrick Daly1f8a2882016-09-12 17:32:05 -07004354
Patrick Daly23301482017-10-12 16:18:25 -07004355#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK 0x3
4356#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT 0x8
4357
Patrick Daly03330cc2017-08-11 14:56:38 -07004358
4359struct actlr_setting {
4360 struct arm_smmu_smr smr;
4361 u32 actlr;
4362};
4363
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004364struct qsmmuv500_archdata {
4365 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004366 void __iomem *tcu_base;
4367 u32 version;
Patrick Dalyda765c62017-09-11 16:31:07 -07004368
4369 struct actlr_setting *actlrs;
4370 u32 actlr_tbl_size;
4371
4372 struct arm_smmu_smr *errata1_clients;
4373 u32 num_errata1_clients;
4374 remote_spinlock_t errata1_lock;
4375 ktime_t last_tlbi_ktime;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004376};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004377#define get_qsmmuv500_archdata(smmu) \
4378 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004379
Patrick Daly1f8a2882016-09-12 17:32:05 -07004380struct qsmmuv500_tbu_device {
4381 struct list_head list;
4382 struct device *dev;
4383 struct arm_smmu_device *smmu;
4384 void __iomem *base;
4385 void __iomem *status_reg;
4386
4387 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004388 u32 sid_start;
4389 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004390
4391 /* Protects halt count */
4392 spinlock_t halt_lock;
4393 u32 halt_count;
4394};
4395
Patrick Daly03330cc2017-08-11 14:56:38 -07004396struct qsmmuv500_group_iommudata {
4397 bool has_actlr;
4398 u32 actlr;
4399};
4400#define to_qsmmuv500_group_iommudata(group) \
4401 ((struct qsmmuv500_group_iommudata *) \
4402 (iommu_group_get_iommudata(group)))
4403
4404
4405static bool arm_smmu_fwspec_match_smr(struct iommu_fwspec *fwspec,
Patrick Dalyda765c62017-09-11 16:31:07 -07004406 struct arm_smmu_smr *smr)
4407{
4408 struct arm_smmu_smr *smr2;
Patrick Daly03330cc2017-08-11 14:56:38 -07004409 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Patrick Dalyda765c62017-09-11 16:31:07 -07004410 int i, idx;
4411
Patrick Daly03330cc2017-08-11 14:56:38 -07004412 for_each_cfg_sme(fwspec, i, idx) {
4413 smr2 = &smmu->smrs[idx];
Patrick Dalyda765c62017-09-11 16:31:07 -07004414 /* Continue if table entry does not match */
4415 if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
4416 continue;
4417 return true;
4418 }
4419 return false;
4420}
4421
4422#define ERRATA1_REMOTE_SPINLOCK "S:6"
4423#define ERRATA1_TLBI_INTERVAL_US 10
4424static bool
4425qsmmuv500_errata1_required(struct arm_smmu_domain *smmu_domain,
4426 struct qsmmuv500_archdata *data)
4427{
4428 bool ret = false;
4429 int j;
4430 struct arm_smmu_smr *smr;
Patrick Daly03330cc2017-08-11 14:56:38 -07004431 struct iommu_fwspec *fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004432
4433 if (smmu_domain->qsmmuv500_errata1_init)
4434 return smmu_domain->qsmmuv500_errata1_client;
4435
Patrick Daly03330cc2017-08-11 14:56:38 -07004436 fwspec = smmu_domain->dev->iommu_fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004437 for (j = 0; j < data->num_errata1_clients; j++) {
4438 smr = &data->errata1_clients[j];
Patrick Daly03330cc2017-08-11 14:56:38 -07004439 if (arm_smmu_fwspec_match_smr(fwspec, smr)) {
Patrick Dalyda765c62017-09-11 16:31:07 -07004440 ret = true;
4441 break;
4442 }
4443 }
4444
4445 smmu_domain->qsmmuv500_errata1_init = true;
4446 smmu_domain->qsmmuv500_errata1_client = ret;
4447 return ret;
4448}
4449
Patrick Daly86960052017-12-04 18:53:13 -08004450#define SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
4451#define SCM_CONFIG_ERRATA1 0x3
Patrick Dalyda765c62017-09-11 16:31:07 -07004452static void __qsmmuv500_errata1_tlbiall(struct arm_smmu_domain *smmu_domain)
4453{
4454 struct arm_smmu_device *smmu = smmu_domain->smmu;
4455 struct device *dev = smmu_domain->dev;
4456 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4457 void __iomem *base;
Patrick Daly86960052017-12-04 18:53:13 -08004458 int ret;
Patrick Dalyda765c62017-09-11 16:31:07 -07004459 ktime_t cur;
4460 u32 val;
Patrick Daly86960052017-12-04 18:53:13 -08004461 struct scm_desc desc = {
4462 .args[0] = SCM_CONFIG_ERRATA1_CLIENT_ALL,
4463 .args[1] = false,
4464 .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
4465 };
Patrick Dalyda765c62017-09-11 16:31:07 -07004466
4467 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4468 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
4469 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
Patrick Daly86960052017-12-04 18:53:13 -08004470 if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
4471 !(val & TLBSTATUS_SACTIVE), 0, 100))
4472 return;
4473
4474 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
4475 SCM_CONFIG_ERRATA1),
4476 &desc);
4477 if (ret) {
4478 dev_err(smmu->dev, "Calling into TZ to disable ERRATA1 failed - IOMMU hardware in bad state\n");
4479 BUG();
4480 return;
4481 }
4482
4483 cur = ktime_get();
4484 trace_tlbi_throttle_start(dev, 0);
4485 msm_bus_noc_throttle_wa(true);
4486
Patrick Dalyda765c62017-09-11 16:31:07 -07004487 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
Patrick Daly86960052017-12-04 18:53:13 -08004488 !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
4489 dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout - IOMMU hardware in bad state");
4490 trace_tlbsync_timeout(dev, 0);
4491 BUG();
4492 }
Patrick Dalyda765c62017-09-11 16:31:07 -07004493
Patrick Daly86960052017-12-04 18:53:13 -08004494 msm_bus_noc_throttle_wa(false);
4495 trace_tlbi_throttle_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07004496
Patrick Daly86960052017-12-04 18:53:13 -08004497 desc.args[1] = true;
4498 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
4499 SCM_CONFIG_ERRATA1),
4500 &desc);
4501 if (ret) {
4502 dev_err(smmu->dev, "Calling into TZ to reenable ERRATA1 failed - IOMMU hardware in bad state\n");
4503 BUG();
Patrick Dalyda765c62017-09-11 16:31:07 -07004504 }
4505}
4506
4507/* Must be called with clocks/regulators enabled */
4508static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
4509{
4510 struct arm_smmu_domain *smmu_domain = cookie;
4511 struct device *dev = smmu_domain->dev;
4512 struct qsmmuv500_archdata *data =
4513 get_qsmmuv500_archdata(smmu_domain->smmu);
4514 ktime_t cur;
Patrick Daly1faa3112017-10-31 16:40:40 -07004515 unsigned long flags;
Patrick Dalyda765c62017-09-11 16:31:07 -07004516 bool errata;
4517
4518 cur = ktime_get();
Prakash Gupta25f90512017-11-20 14:56:54 +05304519 trace_tlbi_start(dev, 0);
Patrick Dalyda765c62017-09-11 16:31:07 -07004520
4521 errata = qsmmuv500_errata1_required(smmu_domain, data);
Patrick Daly1faa3112017-10-31 16:40:40 -07004522 remote_spin_lock_irqsave(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07004523 if (errata) {
4524 s64 delta;
4525
4526 delta = ktime_us_delta(ktime_get(), data->last_tlbi_ktime);
4527 if (delta < ERRATA1_TLBI_INTERVAL_US)
4528 udelay(ERRATA1_TLBI_INTERVAL_US - delta);
4529
4530 __qsmmuv500_errata1_tlbiall(smmu_domain);
4531
4532 data->last_tlbi_ktime = ktime_get();
4533 } else {
4534 __qsmmuv500_errata1_tlbiall(smmu_domain);
4535 }
Patrick Daly1faa3112017-10-31 16:40:40 -07004536 remote_spin_unlock_irqrestore(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07004537
Prakash Gupta25f90512017-11-20 14:56:54 +05304538 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07004539}
4540
4541static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
4542 .tlb_flush_all = qsmmuv500_errata1_tlb_inv_context,
4543 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
4544 .free_pages_exact = arm_smmu_free_pages_exact,
4545};
4546
Patrick Daly8c1202b2017-05-10 15:42:30 -07004547static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
4548 struct arm_smmu_domain *smmu_domain)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004549{
4550 unsigned long flags;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004551 u32 halt, fsr, sctlr_orig, sctlr, status;
4552 void __iomem *base, *cb_base;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004553
4554 spin_lock_irqsave(&tbu->halt_lock, flags);
4555 if (tbu->halt_count) {
4556 tbu->halt_count++;
4557 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4558 return 0;
4559 }
4560
Patrick Daly8c1202b2017-05-10 15:42:30 -07004561 cb_base = ARM_SMMU_CB_BASE(smmu_domain->smmu) +
4562 ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004563 base = tbu->base;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004564 halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
4565 halt |= DEBUG_SID_HALT_VAL;
4566 writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004567
Patrick Daly8c1202b2017-05-10 15:42:30 -07004568 if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
4569 (status & DEBUG_SR_HALT_ACK_VAL),
4570 0, TBU_DBG_TIMEOUT_US))
4571 goto out;
4572
4573 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4574 if (!(fsr & FSR_FAULT)) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004575 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4576 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4577 return -ETIMEDOUT;
4578 }
4579
Patrick Daly8c1202b2017-05-10 15:42:30 -07004580 /*
4581 * We are in a fault; Our request to halt the bus will not complete
4582 * until transactions in front of us (such as the fault itself) have
4583 * completed. Disable iommu faults and terminate any existing
4584 * transactions.
4585 */
4586 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4587 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4588 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4589
4590 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4591 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4592
4593 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
4594 (status & DEBUG_SR_HALT_ACK_VAL),
4595 0, TBU_DBG_TIMEOUT_US)) {
4596 dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
4597 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4598 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4599 return -ETIMEDOUT;
4600 }
4601
4602 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4603out:
Patrick Daly1f8a2882016-09-12 17:32:05 -07004604 tbu->halt_count = 1;
4605 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4606 return 0;
4607}
4608
4609static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4610{
4611 unsigned long flags;
4612 u32 val;
4613 void __iomem *base;
4614
4615 spin_lock_irqsave(&tbu->halt_lock, flags);
4616 if (!tbu->halt_count) {
4617 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4618 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4619 return;
4620
4621 } else if (tbu->halt_count > 1) {
4622 tbu->halt_count--;
4623 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4624 return;
4625 }
4626
4627 base = tbu->base;
4628 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4629 val &= ~DEBUG_SID_HALT_VAL;
4630 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4631
4632 tbu->halt_count = 0;
4633 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4634}
4635
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004636static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4637 struct arm_smmu_device *smmu, u32 sid)
4638{
4639 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004640 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004641
4642 list_for_each_entry(tbu, &data->tbus, list) {
4643 if (tbu->sid_start <= sid &&
4644 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004645 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004646 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004647 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004648}
4649
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004650static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
4651 struct qsmmuv500_tbu_device *tbu,
4652 unsigned long *flags)
4653{
4654 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004655 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004656 u32 val;
4657
4658 spin_lock_irqsave(&smmu->atos_lock, *flags);
4659 /* The status register is not accessible on version 1.0 */
4660 if (data->version == 0x01000000)
4661 return 0;
4662
4663 if (readl_poll_timeout_atomic(tbu->status_reg,
4664 val, (val == 0x1), 0,
4665 TBU_DBG_TIMEOUT_US)) {
4666 dev_err(tbu->dev, "ECATS hw busy!\n");
4667 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4668 return -ETIMEDOUT;
4669 }
4670
4671 return 0;
4672}
4673
4674static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
4675 struct qsmmuv500_tbu_device *tbu,
4676 unsigned long *flags)
4677{
4678 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004679 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004680
4681 /* The status register is not accessible on version 1.0 */
4682 if (data->version != 0x01000000)
4683 writel_relaxed(0, tbu->status_reg);
4684 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4685}
4686
4687/*
4688 * Zero means failure.
4689 */
4690static phys_addr_t qsmmuv500_iova_to_phys(
4691 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
4692{
4693 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4694 struct arm_smmu_device *smmu = smmu_domain->smmu;
4695 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4696 struct qsmmuv500_tbu_device *tbu;
4697 int ret;
4698 phys_addr_t phys = 0;
4699 u64 val, fsr;
4700 unsigned long flags;
4701 void __iomem *cb_base;
4702 u32 sctlr_orig, sctlr;
4703 int needs_redo = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004704 ktime_t timeout;
4705
4706 /* only 36 bit iova is supported */
4707 if (iova >= (1ULL << 36)) {
4708 dev_err_ratelimited(smmu->dev, "ECATS: address too large: %pad\n",
4709 &iova);
4710 return 0;
4711 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004712
4713 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4714 tbu = qsmmuv500_find_tbu(smmu, sid);
4715 if (!tbu)
4716 return 0;
4717
4718 ret = arm_smmu_power_on(tbu->pwr);
4719 if (ret)
4720 return 0;
4721
Patrick Daly8c1202b2017-05-10 15:42:30 -07004722 ret = qsmmuv500_tbu_halt(tbu, smmu_domain);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004723 if (ret)
4724 goto out_power_off;
4725
Patrick Daly8c1202b2017-05-10 15:42:30 -07004726 /*
4727 * ECATS can trigger the fault interrupt, so disable it temporarily
4728 * and check for an interrupt manually.
4729 */
4730 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4731 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4732 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4733
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004734 /* Only one concurrent atos operation */
4735 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
4736 if (ret)
4737 goto out_resume;
4738
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004739redo:
4740 /* Set address and stream-id */
4741 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
4742 val |= sid & DEBUG_SID_HALT_SID_MASK;
4743 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
4744 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
4745
4746 /*
4747 * Write-back Read and Write-Allocate
4748 * Priviledged, nonsecure, data transaction
4749 * Read operation.
4750 */
4751 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
4752 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
4753 val |= DEBUG_TXN_TRIGGER;
4754 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
4755
4756 ret = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004757 //based on readx_poll_timeout_atomic
4758 timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
4759 for (;;) {
4760 val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
4761 if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
4762 break;
4763 val = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4764 if (val & FSR_FAULT)
4765 break;
4766 if (ktime_compare(ktime_get(), timeout) > 0) {
4767 dev_err(tbu->dev, "ECATS translation timed out!\n");
4768 ret = -ETIMEDOUT;
4769 break;
4770 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004771 }
4772
4773 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4774 if (fsr & FSR_FAULT) {
4775 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
Patrick Daly8c1202b2017-05-10 15:42:30 -07004776 fsr);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004777 ret = -EINVAL;
4778
4779 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
4780 /*
4781 * Clear pending interrupts
4782 * Barrier required to ensure that the FSR is cleared
4783 * before resuming SMMU operation
4784 */
4785 wmb();
4786 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4787 }
4788
4789 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
4790 if (val & DEBUG_PAR_FAULT_VAL) {
4791 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
4792 val);
4793 ret = -EINVAL;
4794 }
4795
4796 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
4797 if (ret < 0)
4798 phys = 0;
4799
4800 /* Reset hardware */
4801 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
4802 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
4803
4804 /*
4805 * After a failed translation, the next successful translation will
4806 * incorrectly be reported as a failure.
4807 */
4808 if (!phys && needs_redo++ < 2)
4809 goto redo;
4810
4811 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4812 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
4813
4814out_resume:
4815 qsmmuv500_tbu_resume(tbu);
4816
4817out_power_off:
4818 arm_smmu_power_off(tbu->pwr);
4819
4820 return phys;
4821}
4822
4823static phys_addr_t qsmmuv500_iova_to_phys_hard(
4824 struct iommu_domain *domain, dma_addr_t iova)
4825{
4826 u16 sid;
4827 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4828 struct iommu_fwspec *fwspec;
4829
4830 /* Select a sid */
4831 fwspec = smmu_domain->dev->iommu_fwspec;
4832 sid = (u16)fwspec->ids[0];
4833
4834 return qsmmuv500_iova_to_phys(domain, iova, sid);
4835}
4836
Patrick Daly03330cc2017-08-11 14:56:38 -07004837static void qsmmuv500_release_group_iommudata(void *data)
4838{
4839 kfree(data);
4840}
4841
4842/* If a device has a valid actlr, it must match */
4843static int qsmmuv500_device_group(struct device *dev,
4844 struct iommu_group *group)
4845{
4846 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
4847 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
4848 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4849 struct qsmmuv500_group_iommudata *iommudata;
4850 u32 actlr, i;
4851 struct arm_smmu_smr *smr;
4852
4853 iommudata = to_qsmmuv500_group_iommudata(group);
4854 if (!iommudata) {
4855 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
4856 if (!iommudata)
4857 return -ENOMEM;
4858
4859 iommu_group_set_iommudata(group, iommudata,
4860 qsmmuv500_release_group_iommudata);
4861 }
4862
4863 for (i = 0; i < data->actlr_tbl_size; i++) {
4864 smr = &data->actlrs[i].smr;
4865 actlr = data->actlrs[i].actlr;
4866
4867 if (!arm_smmu_fwspec_match_smr(fwspec, smr))
4868 continue;
4869
4870 if (!iommudata->has_actlr) {
4871 iommudata->actlr = actlr;
4872 iommudata->has_actlr = true;
4873 } else if (iommudata->actlr != actlr) {
4874 return -EINVAL;
4875 }
4876 }
4877
4878 return 0;
4879}
4880
4881static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
4882 struct device *dev)
4883{
4884 struct arm_smmu_device *smmu = smmu_domain->smmu;
4885 struct qsmmuv500_group_iommudata *iommudata =
4886 to_qsmmuv500_group_iommudata(dev->iommu_group);
4887 void __iomem *cb_base;
4888 const struct iommu_gather_ops *tlb;
4889
4890 if (!iommudata->has_actlr)
4891 return;
4892
4893 tlb = smmu_domain->pgtbl_cfg.tlb;
4894 cb_base = ARM_SMMU_CB_BASE(smmu) +
4895 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
4896
4897 writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR);
4898
4899 /*
Patrick Daly23301482017-10-12 16:18:25 -07004900 * Prefetch only works properly if the start and end of all
4901 * buffers in the page table are aligned to 16 Kb.
4902 */
Patrick Daly27bd9292017-11-22 13:59:59 -08004903 if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
Patrick Daly23301482017-10-12 16:18:25 -07004904 QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
4905 smmu_domain->qsmmuv500_errata2_min_align = true;
4906
4907 /*
Patrick Daly03330cc2017-08-11 14:56:38 -07004908 * Flush the context bank after modifying ACTLR to ensure there
4909 * are no cache entries with stale state
4910 */
4911 tlb->tlb_flush_all(smmu_domain);
4912}
4913
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004914static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004915{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004916 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004917 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004918 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004919
4920 if (!dev->driver) {
4921 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4922 return -EINVAL;
4923 }
4924
4925 tbu = dev_get_drvdata(dev);
4926
4927 INIT_LIST_HEAD(&tbu->list);
4928 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004929 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004930 return 0;
4931}
4932
Patrick Dalyda765c62017-09-11 16:31:07 -07004933static int qsmmuv500_parse_errata1(struct arm_smmu_device *smmu)
4934{
4935 int len, i;
4936 struct device *dev = smmu->dev;
4937 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4938 struct arm_smmu_smr *smrs;
4939 const __be32 *cell;
4940
4941 cell = of_get_property(dev->of_node, "qcom,mmu500-errata-1", NULL);
4942 if (!cell)
4943 return 0;
4944
4945 remote_spin_lock_init(&data->errata1_lock, ERRATA1_REMOTE_SPINLOCK);
4946 len = of_property_count_elems_of_size(
4947 dev->of_node, "qcom,mmu500-errata-1", sizeof(u32) * 2);
4948 if (len < 0)
4949 return 0;
4950
4951 smrs = devm_kzalloc(dev, sizeof(*smrs) * len, GFP_KERNEL);
4952 if (!smrs)
4953 return -ENOMEM;
4954
4955 for (i = 0; i < len; i++) {
4956 smrs[i].id = of_read_number(cell++, 1);
4957 smrs[i].mask = of_read_number(cell++, 1);
4958 }
4959
4960 data->errata1_clients = smrs;
4961 data->num_errata1_clients = len;
4962 return 0;
4963}
4964
Patrick Daly03330cc2017-08-11 14:56:38 -07004965static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
4966{
4967 int len, i;
4968 struct device *dev = smmu->dev;
4969 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4970 struct actlr_setting *actlrs;
4971 const __be32 *cell;
4972
4973 cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
4974 if (!cell)
4975 return 0;
4976
4977 len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
4978 sizeof(u32) * 3);
4979 if (len < 0)
4980 return 0;
4981
4982 actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
4983 if (!actlrs)
4984 return -ENOMEM;
4985
4986 for (i = 0; i < len; i++) {
4987 actlrs[i].smr.id = of_read_number(cell++, 1);
4988 actlrs[i].smr.mask = of_read_number(cell++, 1);
4989 actlrs[i].actlr = of_read_number(cell++, 1);
4990 }
4991
4992 data->actlrs = actlrs;
4993 data->actlr_tbl_size = len;
4994 return 0;
4995}
4996
Patrick Daly1f8a2882016-09-12 17:32:05 -07004997static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4998{
Patrick Dalya0fddb62017-03-27 19:26:59 -07004999 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005000 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005001 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07005002 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005003 int ret;
Patrick Daly03330cc2017-08-11 14:56:38 -07005004 u32 val;
5005 void __iomem *reg;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005006
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005007 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
5008 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005009 return -ENOMEM;
5010
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005011 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07005012
5013 pdev = container_of(dev, struct platform_device, dev);
5014 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
5015 data->tcu_base = devm_ioremap_resource(dev, res);
5016 if (IS_ERR(data->tcu_base))
5017 return PTR_ERR(data->tcu_base);
5018
5019 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005020 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005021
Patrick Dalyda765c62017-09-11 16:31:07 -07005022 ret = qsmmuv500_parse_errata1(smmu);
5023 if (ret)
5024 return ret;
5025
Patrick Daly03330cc2017-08-11 14:56:38 -07005026 ret = qsmmuv500_read_actlr_tbl(smmu);
5027 if (ret)
5028 return ret;
5029
5030 reg = ARM_SMMU_GR0(smmu);
5031 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5032 val &= ~ARM_MMU500_ACR_CACHE_LOCK;
5033 writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
5034 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5035 /*
5036 * Modifiying the nonsecure copy of the sACR register is only
5037 * allowed if permission is given in the secure sACR register.
5038 * Attempt to detect if we were able to update the value.
5039 */
5040 WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
5041
Patrick Daly1f8a2882016-09-12 17:32:05 -07005042 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
5043 if (ret)
5044 return ret;
5045
5046 /* Attempt to register child devices */
5047 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
5048 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07005049 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005050
5051 return 0;
5052}
5053
5054struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
5055 .init = qsmmuv500_arch_init,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005056 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly03330cc2017-08-11 14:56:38 -07005057 .init_context_bank = qsmmuv500_init_cb,
5058 .device_group = qsmmuv500_device_group,
Patrick Daly1f8a2882016-09-12 17:32:05 -07005059};
5060
5061static const struct of_device_id qsmmuv500_tbu_of_match[] = {
5062 {.compatible = "qcom,qsmmuv500-tbu"},
5063 {}
5064};
5065
5066static int qsmmuv500_tbu_probe(struct platform_device *pdev)
5067{
5068 struct resource *res;
5069 struct device *dev = &pdev->dev;
5070 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005071 const __be32 *cell;
5072 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005073
5074 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
5075 if (!tbu)
5076 return -ENOMEM;
5077
5078 INIT_LIST_HEAD(&tbu->list);
5079 tbu->dev = dev;
5080 spin_lock_init(&tbu->halt_lock);
5081
5082 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
5083 tbu->base = devm_ioremap_resource(dev, res);
5084 if (IS_ERR(tbu->base))
5085 return PTR_ERR(tbu->base);
5086
5087 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
5088 tbu->status_reg = devm_ioremap_resource(dev, res);
5089 if (IS_ERR(tbu->status_reg))
5090 return PTR_ERR(tbu->status_reg);
5091
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005092 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
5093 if (!cell || len < 8)
5094 return -EINVAL;
5095
5096 tbu->sid_start = of_read_number(cell, 1);
5097 tbu->num_sids = of_read_number(cell + 1, 1);
5098
Patrick Daly1f8a2882016-09-12 17:32:05 -07005099 tbu->pwr = arm_smmu_init_power_resources(pdev);
5100 if (IS_ERR(tbu->pwr))
5101 return PTR_ERR(tbu->pwr);
5102
5103 dev_set_drvdata(dev, tbu);
5104 return 0;
5105}
5106
5107static struct platform_driver qsmmuv500_tbu_driver = {
5108 .driver = {
5109 .name = "qsmmuv500-tbu",
5110 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
5111 },
5112 .probe = qsmmuv500_tbu_probe,
5113};
5114
Will Deacon45ae7cf2013-06-24 18:31:25 +01005115MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
5116MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
5117MODULE_LICENSE("GPL v2");