blob: 0be0779ee60312277072876bf562e98d58b9c891 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Daly86960052017-12-04 18:53:13 -080050#include <soc/qcom/scm.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070051#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070052#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070053#include <linux/msm-bus.h>
54#include <dt-bindings/msm/msm-bus-ids.h>
Patrick Dalyda765c62017-09-11 16:31:07 -070055#include <linux/remote_spinlock.h>
56#include <linux/ktime.h>
57#include <trace/events/iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
59#include <linux/amba/bus.h>
60
Will Deacon518f7132014-11-14 17:17:54 +000061#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010062
Will Deacon45ae7cf2013-06-24 18:31:25 +010063/* Maximum number of context banks per SMMU */
64#define ARM_SMMU_MAX_CBS 128
65
Will Deacon45ae7cf2013-06-24 18:31:25 +010066/* SMMU global address space */
67#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010068#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010069
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000070/*
71 * SMMU global address space with conditional offset to access secure
72 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
73 * nsGFSYNR0: 0x450)
74 */
75#define ARM_SMMU_GR0_NS(smmu) \
76 ((smmu)->base + \
77 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
78 ? 0x400 : 0))
79
Robin Murphyf9a05f02016-04-13 18:13:01 +010080/*
81 * Some 64-bit registers only make sense to write atomically, but in such
82 * cases all the data relevant to AArch32 formats lies within the lower word,
83 * therefore this actually makes more sense than it might first appear.
84 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010086#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010087#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010088#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010089#endif
90
Will Deacon45ae7cf2013-06-24 18:31:25 +010091/* Configuration registers */
92#define ARM_SMMU_GR0_sCR0 0x0
93#define sCR0_CLIENTPD (1 << 0)
94#define sCR0_GFRE (1 << 1)
95#define sCR0_GFIE (1 << 2)
96#define sCR0_GCFGFRE (1 << 4)
97#define sCR0_GCFGFIE (1 << 5)
98#define sCR0_USFCFG (1 << 10)
99#define sCR0_VMIDPNE (1 << 11)
100#define sCR0_PTM (1 << 12)
101#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800102#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100103#define sCR0_BSU_SHIFT 14
104#define sCR0_BSU_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700105#define sCR0_SHCFG_SHIFT 22
106#define sCR0_SHCFG_MASK 0x3
107#define sCR0_SHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100108
Peng Fan3ca37122016-05-03 21:50:30 +0800109/* Auxiliary Configuration register */
110#define ARM_SMMU_GR0_sACR 0x10
111
Will Deacon45ae7cf2013-06-24 18:31:25 +0100112/* Identification registers */
113#define ARM_SMMU_GR0_ID0 0x20
114#define ARM_SMMU_GR0_ID1 0x24
115#define ARM_SMMU_GR0_ID2 0x28
116#define ARM_SMMU_GR0_ID3 0x2c
117#define ARM_SMMU_GR0_ID4 0x30
118#define ARM_SMMU_GR0_ID5 0x34
119#define ARM_SMMU_GR0_ID6 0x38
120#define ARM_SMMU_GR0_ID7 0x3c
121#define ARM_SMMU_GR0_sGFSR 0x48
122#define ARM_SMMU_GR0_sGFSYNR0 0x50
123#define ARM_SMMU_GR0_sGFSYNR1 0x54
124#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125
126#define ID0_S1TS (1 << 30)
127#define ID0_S2TS (1 << 29)
128#define ID0_NTS (1 << 28)
129#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000130#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100131#define ID0_PTFS_NO_AARCH32 (1 << 25)
132#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100133#define ID0_CTTW (1 << 14)
134#define ID0_NUMIRPT_SHIFT 16
135#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700136#define ID0_NUMSIDB_SHIFT 9
137#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100138#define ID0_NUMSMRG_SHIFT 0
139#define ID0_NUMSMRG_MASK 0xff
140
141#define ID1_PAGESIZE (1 << 31)
142#define ID1_NUMPAGENDXB_SHIFT 28
143#define ID1_NUMPAGENDXB_MASK 7
144#define ID1_NUMS2CB_SHIFT 16
145#define ID1_NUMS2CB_MASK 0xff
146#define ID1_NUMCB_SHIFT 0
147#define ID1_NUMCB_MASK 0xff
148
149#define ID2_OAS_SHIFT 4
150#define ID2_OAS_MASK 0xf
151#define ID2_IAS_SHIFT 0
152#define ID2_IAS_MASK 0xf
153#define ID2_UBS_SHIFT 8
154#define ID2_UBS_MASK 0xf
155#define ID2_PTFS_4K (1 << 12)
156#define ID2_PTFS_16K (1 << 13)
157#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800158#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100159
Peng Fan3ca37122016-05-03 21:50:30 +0800160#define ID7_MAJOR_SHIFT 4
161#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100162
Will Deacon45ae7cf2013-06-24 18:31:25 +0100163/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164#define ARM_SMMU_GR0_TLBIVMID 0x64
165#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
166#define ARM_SMMU_GR0_TLBIALLH 0x6c
167#define ARM_SMMU_GR0_sTLBGSYNC 0x70
168#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
169#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800170#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100171
172/* Stream mapping registers */
173#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
174#define SMR_VALID (1 << 31)
175#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700176#define SMR_MASK_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100177#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100178
179#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
180#define S2CR_CBNDX_SHIFT 0
181#define S2CR_CBNDX_MASK 0xff
182#define S2CR_TYPE_SHIFT 16
183#define S2CR_TYPE_MASK 0x3
Patrick Daly7f377fe2017-10-06 17:37:10 -0700184#define S2CR_SHCFG_SHIFT 8
185#define S2CR_SHCFG_MASK 0x3
186#define S2CR_SHCFG_NSH 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100187enum arm_smmu_s2cr_type {
188 S2CR_TYPE_TRANS,
189 S2CR_TYPE_BYPASS,
190 S2CR_TYPE_FAULT,
191};
192
193#define S2CR_PRIVCFG_SHIFT 24
194#define S2CR_PRIVCFG_MASK 0x3
195enum arm_smmu_s2cr_privcfg {
196 S2CR_PRIVCFG_DEFAULT,
197 S2CR_PRIVCFG_DIPAN,
198 S2CR_PRIVCFG_UNPRIV,
199 S2CR_PRIVCFG_PRIV,
200};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100201
202/* Context bank attribute registers */
203#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
204#define CBAR_VMID_SHIFT 0
205#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000206#define CBAR_S1_BPSHCFG_SHIFT 8
207#define CBAR_S1_BPSHCFG_MASK 3
208#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100209#define CBAR_S1_MEMATTR_SHIFT 12
210#define CBAR_S1_MEMATTR_MASK 0xf
211#define CBAR_S1_MEMATTR_WB 0xf
212#define CBAR_TYPE_SHIFT 16
213#define CBAR_TYPE_MASK 0x3
214#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
215#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
216#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
217#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
218#define CBAR_IRPTNDX_SHIFT 24
219#define CBAR_IRPTNDX_MASK 0xff
220
Shalaj Jain04059c52015-03-03 13:34:59 -0800221#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
222#define CBFRSYNRA_SID_MASK (0xffff)
223
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
225#define CBA2R_RW64_32BIT (0 << 0)
226#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800227#define CBA2R_VMID_SHIFT 16
228#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229
230/* Translation context bank */
231#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100232#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233
234#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100235#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100236#define ARM_SMMU_CB_RESUME 0x8
237#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100238#define ARM_SMMU_CB_TTBR0 0x20
239#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100240#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600241#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100242#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000243#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100244#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100245#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700246#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100247#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000249#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100250#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700251#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000252#define ARM_SMMU_CB_S1_TLBIVAL 0x620
253#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
254#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700255#define ARM_SMMU_CB_TLBSYNC 0x7f0
256#define ARM_SMMU_CB_TLBSTATUS 0x7f4
257#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100258#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000259#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100260
Patrick Daly7f377fe2017-10-06 17:37:10 -0700261#define SCTLR_SHCFG_SHIFT 22
262#define SCTLR_SHCFG_MASK 0x3
263#define SCTLR_SHCFG_NSH 0x3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264#define SCTLR_S1_ASIDPNE (1 << 12)
265#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530266#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267#define SCTLR_CFIE (1 << 6)
268#define SCTLR_CFRE (1 << 5)
269#define SCTLR_E (1 << 4)
270#define SCTLR_AFE (1 << 2)
271#define SCTLR_TRE (1 << 1)
272#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100274#define ARM_MMU500_ACTLR_CPRE (1 << 1)
275
Peng Fan3ca37122016-05-03 21:50:30 +0800276#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
277
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700278#define ARM_SMMU_IMPL_DEF0(smmu) \
279 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
280#define ARM_SMMU_IMPL_DEF1(smmu) \
281 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000282#define CB_PAR_F (1 << 0)
283
284#define ATSR_ACTIVE (1 << 0)
285
Will Deacon45ae7cf2013-06-24 18:31:25 +0100286#define RESUME_RETRY (0 << 0)
287#define RESUME_TERMINATE (1 << 0)
288
Will Deacon45ae7cf2013-06-24 18:31:25 +0100289#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100290#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100291
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100292#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293
294#define FSR_MULTI (1 << 31)
295#define FSR_SS (1 << 30)
296#define FSR_UUT (1 << 8)
297#define FSR_ASF (1 << 7)
298#define FSR_TLBLKF (1 << 6)
299#define FSR_TLBMCF (1 << 5)
300#define FSR_EF (1 << 4)
301#define FSR_PF (1 << 3)
302#define FSR_AFF (1 << 2)
303#define FSR_TF (1 << 1)
304
Mitchel Humpherys29073202014-07-08 09:52:18 -0700305#define FSR_IGN (FSR_AFF | FSR_ASF | \
306 FSR_TLBMCF | FSR_TLBLKF)
307#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100308 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100309
310#define FSYNR0_WNR (1 << 4)
311
Will Deacon4cf740b2014-07-14 19:47:39 +0100312static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000313module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100314MODULE_PARM_DESC(force_stage,
315 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800316static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000317module_param(disable_bypass, bool, S_IRUGO);
318MODULE_PARM_DESC(disable_bypass,
319 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100320
Robin Murphy09360402014-08-28 17:51:59 +0100321enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100322 ARM_SMMU_V1,
323 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100324 ARM_SMMU_V2,
325};
326
Robin Murphy67b65a32016-04-13 18:12:57 +0100327enum arm_smmu_implementation {
328 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100329 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100330 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700331 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700332 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100333};
334
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700335struct arm_smmu_impl_def_reg {
336 u32 offset;
337 u32 value;
338};
339
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700340/*
341 * attach_count
342 * The SMR and S2CR registers are only programmed when the number of
343 * devices attached to the iommu using these registers is > 0. This
344 * is required for the "SID switch" use case for secure display.
345 * Protected by stream_map_mutex.
346 */
Robin Murphya754fd12016-09-12 17:13:50 +0100347struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100348 struct iommu_group *group;
349 int count;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700350 int attach_count;
Robin Murphya754fd12016-09-12 17:13:50 +0100351 enum arm_smmu_s2cr_type type;
352 enum arm_smmu_s2cr_privcfg privcfg;
353 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700354 bool cb_handoff;
Robin Murphya754fd12016-09-12 17:13:50 +0100355};
356
357#define s2cr_init_val (struct arm_smmu_s2cr){ \
358 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700359 .cb_handoff = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100360}
361
Will Deacon45ae7cf2013-06-24 18:31:25 +0100362struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100363 u16 mask;
364 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100365 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100366};
367
Will Deacona9a1b0b2014-05-01 18:05:08 +0100368struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100369 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100370 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371};
Robin Murphy468f4942016-09-12 17:13:49 +0100372#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100373#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
374#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000375#define fwspec_smendx(fw, i) \
376 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100377#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000378 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100379
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700380/*
381 * Describes resources required for on/off power operation.
382 * Separate reference count is provided for atomic/nonatomic
383 * operations.
384 */
385struct arm_smmu_power_resources {
386 struct platform_device *pdev;
387 struct device *dev;
388
389 struct clk **clocks;
390 int num_clocks;
391
392 struct regulator_bulk_data *gdscs;
393 int num_gdscs;
394
395 uint32_t bus_client;
396 struct msm_bus_scale_pdata *bus_dt_data;
397
398 /* Protects power_count */
399 struct mutex power_lock;
400 int power_count;
401
402 /* Protects clock_refs_count */
403 spinlock_t clock_refs_lock;
404 int clock_refs_count;
Prakash Guptafad87ca2017-05-16 12:13:02 +0530405 int regulator_defer;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700406};
407
Patrick Daly03330cc2017-08-11 14:56:38 -0700408struct arm_smmu_arch_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100409struct arm_smmu_device {
410 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411
412 void __iomem *base;
413 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100414 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100415
416#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
417#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
418#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
419#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
420#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000421#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800422#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100423#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
424#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
425#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
426#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
427#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100428 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000429
430#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800431#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Patrick Daly59b6d202017-06-12 13:12:15 -0700432#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700433#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Patrick Daly4423d3e2017-05-04 18:17:51 -0700434#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
Patrick Dalye7069342017-07-11 12:35:55 -0700435#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
Patrick Daly62ba1922017-08-30 16:47:18 -0700436#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 6)
Patrick Daly83174c12017-10-26 12:31:15 -0700437#define ARM_SMMU_OPT_MMU500_ERRATA1 (1 << 7)
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530438#define ARM_SMMU_OPT_STATIC_CB (1 << 8)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000439 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100440 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100441 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100442
443 u32 num_context_banks;
444 u32 num_s2_context_banks;
445 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
446 atomic_t irptndx;
447
448 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100449 u16 streamid_mask;
450 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100451 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100452 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100453 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100454
Will Deacon518f7132014-11-14 17:17:54 +0000455 unsigned long va_size;
456 unsigned long ipa_size;
457 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100458 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100459
460 u32 num_global_irqs;
461 u32 num_context_irqs;
462 unsigned int *irqs;
463
Patrick Daly8e3371a2017-02-13 22:14:53 -0800464 struct list_head list;
465
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800466 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700467 /* Specific to QCOM */
468 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
469 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800470
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700471 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700472
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800473 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700474
475 /* protects idr */
476 struct mutex idr_mutex;
477 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700478
479 struct arm_smmu_arch_ops *arch_ops;
480 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100481};
482
Robin Murphy7602b872016-04-28 17:12:09 +0100483enum arm_smmu_context_fmt {
484 ARM_SMMU_CTX_FMT_NONE,
485 ARM_SMMU_CTX_FMT_AARCH64,
486 ARM_SMMU_CTX_FMT_AARCH32_L,
487 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100488};
489
490struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100491 u8 cbndx;
492 u8 irptndx;
493 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600494 u32 procid;
495 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100496 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100497};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100498#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600499#define INVALID_CBNDX 0xff
500#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700501/*
502 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
503 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
504 */
505#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100506
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600507#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800508#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100509
Will Deaconc752ce42014-06-25 22:46:31 +0100510enum arm_smmu_domain_stage {
511 ARM_SMMU_DOMAIN_S1 = 0,
512 ARM_SMMU_DOMAIN_S2,
513 ARM_SMMU_DOMAIN_NESTED,
514};
515
Patrick Dalyc11d1082016-09-01 15:52:44 -0700516struct arm_smmu_pte_info {
517 void *virt_addr;
518 size_t size;
519 struct list_head entry;
520};
521
Will Deacon45ae7cf2013-06-24 18:31:25 +0100522struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100523 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800524 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000525 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700526 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000527 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100528 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100529 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000530 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700531 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700532 u32 secure_vmid;
533 struct list_head pte_info_list;
534 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700535 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700536 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100537 struct iommu_domain domain;
Patrick Dalyda765c62017-09-11 16:31:07 -0700538
539 bool qsmmuv500_errata1_init;
540 bool qsmmuv500_errata1_client;
Patrick Daly23301482017-10-12 16:18:25 -0700541 bool qsmmuv500_errata2_min_align;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100542};
543
Patrick Daly8e3371a2017-02-13 22:14:53 -0800544static DEFINE_SPINLOCK(arm_smmu_devices_lock);
545static LIST_HEAD(arm_smmu_devices);
546
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000547struct arm_smmu_option_prop {
548 u32 opt;
549 const char *prop;
550};
551
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800552static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
553
Robin Murphy7e96c742016-09-14 15:26:46 +0100554static bool using_legacy_binding, using_generic_binding;
555
Mitchel Humpherys29073202014-07-08 09:52:18 -0700556static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000557 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800558 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700559 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700560 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700561 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700562 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Patrick Daly62ba1922017-08-30 16:47:18 -0700563 { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
Patrick Daly83174c12017-10-26 12:31:15 -0700564 { ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530565 { ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000566 { 0, NULL},
567};
568
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800569static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
570 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700571static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
572 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600573static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800574
Patrick Dalyc11d1082016-09-01 15:52:44 -0700575static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
576static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700577static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700578static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
579
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700580static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
581 dma_addr_t iova);
582
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800583static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
584
Patrick Dalyda688822017-05-17 20:12:48 -0700585static int arm_smmu_alloc_cb(struct iommu_domain *domain,
586 struct arm_smmu_device *smmu,
587 struct device *dev);
Patrick Dalyda765c62017-09-11 16:31:07 -0700588static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops;
Patrick Dalyda688822017-05-17 20:12:48 -0700589
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530590static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
591
Joerg Roedel1d672632015-03-26 13:43:10 +0100592static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
593{
594 return container_of(dom, struct arm_smmu_domain, domain);
595}
596
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000597static void parse_driver_options(struct arm_smmu_device *smmu)
598{
599 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700600
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000601 do {
602 if (of_property_read_bool(smmu->dev->of_node,
603 arm_smmu_options[i].prop)) {
604 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700605 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000606 arm_smmu_options[i].prop);
607 }
608 } while (arm_smmu_options[++i].opt);
609}
610
Patrick Dalyc190d932016-08-30 17:23:28 -0700611static bool is_dynamic_domain(struct iommu_domain *domain)
612{
613 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
614
615 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
616}
617
Liam Mark53cf2342016-12-20 11:36:07 -0800618static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
619{
620 if (smmu_domain->attributes &
621 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
622 return true;
623 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
624 return smmu_domain->smmu->dev->archdata.dma_coherent;
625 else
626 return false;
627}
628
Charan Teja Reddyf0758df2017-09-04 18:52:07 +0530629static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu)
630{
631 return smmu->options & ARM_SMMU_OPT_STATIC_CB;
632}
633
Patrick Dalye271f212016-10-04 13:24:49 -0700634static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
635{
636 return (smmu_domain->secure_vmid != VMID_INVAL);
637}
638
639static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
640{
641 if (arm_smmu_is_domain_secure(smmu_domain))
642 mutex_lock(&smmu_domain->assign_lock);
643}
644
645static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
646{
647 if (arm_smmu_is_domain_secure(smmu_domain))
648 mutex_unlock(&smmu_domain->assign_lock);
649}
650
Patrick Daly03330cc2017-08-11 14:56:38 -0700651/*
652 * init()
653 * Hook for additional device tree parsing at probe time.
654 *
655 * device_reset()
656 * Hook for one-time architecture-specific register settings.
657 *
658 * iova_to_phys_hard()
659 * Provides debug information. May be called from the context fault irq handler.
660 *
661 * init_context_bank()
662 * Hook for architecture-specific settings which require knowledge of the
663 * dynamically allocated context bank number.
664 *
665 * device_group()
666 * Hook for checking whether a device is compatible with a said group.
667 */
668struct arm_smmu_arch_ops {
669 int (*init)(struct arm_smmu_device *smmu);
670 void (*device_reset)(struct arm_smmu_device *smmu);
671 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
672 dma_addr_t iova);
673 void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
674 struct device *dev);
675 int (*device_group)(struct device *dev, struct iommu_group *group);
676};
677
678static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
679{
680 if (!smmu->arch_ops)
681 return 0;
682 if (!smmu->arch_ops->init)
683 return 0;
684 return smmu->arch_ops->init(smmu);
685}
686
687static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
688{
689 if (!smmu->arch_ops)
690 return;
691 if (!smmu->arch_ops->device_reset)
692 return;
693 return smmu->arch_ops->device_reset(smmu);
694}
695
696static void arm_smmu_arch_init_context_bank(
697 struct arm_smmu_domain *smmu_domain, struct device *dev)
698{
699 struct arm_smmu_device *smmu = smmu_domain->smmu;
700
701 if (!smmu->arch_ops)
702 return;
703 if (!smmu->arch_ops->init_context_bank)
704 return;
705 return smmu->arch_ops->init_context_bank(smmu_domain, dev);
706}
707
708static int arm_smmu_arch_device_group(struct device *dev,
709 struct iommu_group *group)
710{
711 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
712 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
713
714 if (!smmu->arch_ops)
715 return 0;
716 if (!smmu->arch_ops->device_group)
717 return 0;
718 return smmu->arch_ops->device_group(dev, group);
719}
720
Will Deacon8f68f8e2014-07-15 11:27:08 +0100721static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100722{
723 if (dev_is_pci(dev)) {
724 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700725
Will Deacona9a1b0b2014-05-01 18:05:08 +0100726 while (!pci_is_root_bus(bus))
727 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100728 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100729 }
730
Robin Murphyd5b41782016-09-14 15:21:39 +0100731 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100732}
733
Robin Murphyd5b41782016-09-14 15:21:39 +0100734static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100735{
Robin Murphyd5b41782016-09-14 15:21:39 +0100736 *((__be32 *)data) = cpu_to_be32(alias);
737 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100738}
739
Robin Murphyd5b41782016-09-14 15:21:39 +0100740static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100741{
Robin Murphyd5b41782016-09-14 15:21:39 +0100742 struct of_phandle_iterator *it = *(void **)data;
743 struct device_node *np = it->node;
744 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100745
Robin Murphyd5b41782016-09-14 15:21:39 +0100746 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
747 "#stream-id-cells", 0)
748 if (it->node == np) {
749 *(void **)data = dev;
750 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700751 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100752 it->node = np;
753 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100754}
755
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100756static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100757static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100758
Robin Murphy06e393e2016-09-12 17:13:55 +0100759static int arm_smmu_register_legacy_master(struct device *dev,
760 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100761{
Robin Murphy06e393e2016-09-12 17:13:55 +0100762 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100763 struct device_node *np;
764 struct of_phandle_iterator it;
765 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100766 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100767 __be32 pci_sid;
768 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100769
Stephen Boydfecdeef2017-03-01 16:53:19 -0800770 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100771 np = dev_get_dev_node(dev);
772 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
773 of_node_put(np);
774 return -ENODEV;
775 }
776
777 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100778 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
779 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100780 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100781 of_node_put(np);
782 if (err == 0)
783 return -ENODEV;
784 if (err < 0)
785 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100786
Robin Murphyd5b41782016-09-14 15:21:39 +0100787 if (dev_is_pci(dev)) {
788 /* "mmu-masters" assumes Stream ID == Requester ID */
789 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
790 &pci_sid);
791 it.cur = &pci_sid;
792 it.cur_count = 1;
793 }
794
Robin Murphy06e393e2016-09-12 17:13:55 +0100795 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
796 &arm_smmu_ops);
797 if (err)
798 return err;
799
800 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
801 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100802 return -ENOMEM;
803
Robin Murphy06e393e2016-09-12 17:13:55 +0100804 *smmu = dev_get_drvdata(smmu_dev);
805 of_phandle_iterator_args(&it, sids, it.cur_count);
806 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
807 kfree(sids);
808 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100809}
810
811static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
812{
813 int idx;
814
815 do {
816 idx = find_next_zero_bit(map, end, start);
817 if (idx == end)
818 return -ENOSPC;
819 } while (test_and_set_bit(idx, map));
820
821 return idx;
822}
823
824static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
825{
826 clear_bit(idx, map);
827}
828
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700829static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700830{
831 int i, ret = 0;
832
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700833 for (i = 0; i < pwr->num_clocks; ++i) {
834 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700835 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700836 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700837 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700838 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700839 break;
840 }
841 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700842 return ret;
843}
844
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700845static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700846{
847 int i;
848
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700849 for (i = pwr->num_clocks; i; --i)
850 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700851}
852
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700853static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700854{
855 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700856
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700857 for (i = 0; i < pwr->num_clocks; ++i) {
858 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700859 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700860 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700861 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700862 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700863 break;
864 }
865 }
866
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700867 return ret;
868}
Patrick Daly8befb662016-08-17 20:03:28 -0700869
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700870static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
871{
872 int i;
873
874 for (i = pwr->num_clocks; i; --i)
875 clk_disable(pwr->clocks[i - 1]);
876}
877
878static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
879{
880 if (!pwr->bus_client)
881 return 0;
882 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
883}
884
885static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
886{
887 if (!pwr->bus_client)
888 return;
889 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
890}
891
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700892static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
893{
894 struct regulator_bulk_data *consumers;
895 int num_consumers, ret;
896 int i;
897
898 num_consumers = pwr->num_gdscs;
899 consumers = pwr->gdscs;
900 for (i = 0; i < num_consumers; i++) {
901 ret = regulator_enable(consumers[i].consumer);
902 if (ret)
903 goto out;
904 }
905 return 0;
906
907out:
908 i -= 1;
909 for (; i >= 0; i--)
910 regulator_disable(consumers[i].consumer);
911 return ret;
912}
913
Prakash Guptafad87ca2017-05-16 12:13:02 +0530914static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
915{
916 struct regulator_bulk_data *consumers;
917 int i;
918 int num_consumers, ret, r;
919
920 num_consumers = pwr->num_gdscs;
921 consumers = pwr->gdscs;
922 for (i = num_consumers - 1; i >= 0; --i) {
923 ret = regulator_disable_deferred(consumers[i].consumer,
924 pwr->regulator_defer);
925 if (ret != 0)
926 goto err;
927 }
928
929 return 0;
930
931err:
932 pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
933 for (++i; i < num_consumers; ++i) {
934 r = regulator_enable(consumers[i].consumer);
935 if (r != 0)
936 pr_err("Failed to reename %s: %d\n",
937 consumers[i].supply, r);
938 }
939
940 return ret;
941}
942
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700943/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
944static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
945{
946 int ret = 0;
947 unsigned long flags;
948
949 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
950 if (pwr->clock_refs_count > 0) {
951 pwr->clock_refs_count++;
952 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
953 return 0;
954 }
955
956 ret = arm_smmu_enable_clocks(pwr);
957 if (!ret)
958 pwr->clock_refs_count = 1;
959
960 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700961 return ret;
962}
963
964/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700965static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700966{
Patrick Daly8befb662016-08-17 20:03:28 -0700967 unsigned long flags;
968
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700969 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
970 if (pwr->clock_refs_count == 0) {
971 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
972 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
973 return;
974
975 } else if (pwr->clock_refs_count > 1) {
976 pwr->clock_refs_count--;
977 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700978 return;
979 }
980
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700981 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700982
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700983 pwr->clock_refs_count = 0;
984 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700985}
986
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700987static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700988{
989 int ret;
990
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700991 mutex_lock(&pwr->power_lock);
992 if (pwr->power_count > 0) {
993 pwr->power_count += 1;
994 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700995 return 0;
996 }
997
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700998 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700999 if (ret)
1000 goto out_unlock;
1001
Patrick Dalyb26f97c2017-08-11 15:24:20 -07001002 ret = arm_smmu_enable_regulators(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001003 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001004 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001005
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001006 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07001007 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001008 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -07001009
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001010 pwr->power_count = 1;
1011 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001012 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001013
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001014out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001015 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001016out_disable_bus:
1017 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07001018out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001019 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001020 return ret;
1021}
1022
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001023static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001024{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001025 mutex_lock(&pwr->power_lock);
1026 if (pwr->power_count == 0) {
1027 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
1028 mutex_unlock(&pwr->power_lock);
1029 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001030
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001031 } else if (pwr->power_count > 1) {
1032 pwr->power_count--;
1033 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001034 return;
1035 }
1036
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001037 arm_smmu_unprepare_clocks(pwr);
Prakash Guptafad87ca2017-05-16 12:13:02 +05301038 arm_smmu_disable_regulators(pwr);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001039 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -07001040 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001041 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001042}
1043
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001044static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001045{
1046 int ret;
1047
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001048 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001049 if (ret)
1050 return ret;
1051
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001052 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001053 if (ret)
1054 goto out_disable;
1055
1056 return 0;
1057
1058out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001059 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001060 return ret;
1061}
1062
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001063static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001064{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001065 arm_smmu_power_off_atomic(pwr);
1066 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001067}
1068
1069/*
1070 * Must be used instead of arm_smmu_power_on if it may be called from
1071 * atomic context
1072 */
1073static int arm_smmu_domain_power_on(struct iommu_domain *domain,
1074 struct arm_smmu_device *smmu)
1075{
1076 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1077 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1078
1079 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001080 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001081
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001082 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001083}
1084
1085/*
1086 * Must be used instead of arm_smmu_power_on if it may be called from
1087 * atomic context
1088 */
1089static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1090 struct arm_smmu_device *smmu)
1091{
1092 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1093 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1094
1095 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001096 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001097 return;
1098 }
1099
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001100 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001101}
1102
Will Deacon45ae7cf2013-06-24 18:31:25 +01001103/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001104static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1105 int cbndx)
1106{
1107 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1108 u32 val;
1109
1110 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1111 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1112 !(val & TLBSTATUS_SACTIVE),
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301113 0, TLB_LOOP_TIMEOUT)) {
1114 trace_tlbsync_timeout(smmu->dev, 0);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001115 dev_err(smmu->dev, "TLBSYNC timeout!\n");
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301116 }
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001117}
1118
Will Deacon518f7132014-11-14 17:17:54 +00001119static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001120{
1121 int count = 0;
1122 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1123
1124 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1125 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1126 & sTLBGSTATUS_GSACTIVE) {
1127 cpu_relax();
1128 if (++count == TLB_LOOP_TIMEOUT) {
1129 dev_err_ratelimited(smmu->dev,
1130 "TLB sync timed out -- SMMU may be deadlocked\n");
1131 return;
1132 }
1133 udelay(1);
1134 }
1135}
1136
Will Deacon518f7132014-11-14 17:17:54 +00001137static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001138{
Will Deacon518f7132014-11-14 17:17:54 +00001139 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001140 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001141}
1142
Patrick Daly8befb662016-08-17 20:03:28 -07001143/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001144static void arm_smmu_tlb_inv_context(void *cookie)
1145{
1146 struct arm_smmu_domain *smmu_domain = cookie;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301147 struct device *dev = smmu_domain->dev;
Will Deacon44680ee2014-06-25 11:29:12 +01001148 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1149 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001150 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001151 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -07001152 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301153 ktime_t cur = ktime_get();
1154
1155 trace_tlbi_start(dev, 0);
Will Deacon1463fe42013-07-31 19:21:27 +01001156
Patrick Dalye7069342017-07-11 12:35:55 -07001157 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001158 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001159 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001160 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001161 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001162 } else if (stage1 && use_tlbiall) {
1163 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1164 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1165 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001166 } else {
1167 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001168 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001169 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001170 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001171 }
Prakash Gupta515d9bc2017-11-20 15:00:39 +05301172
1173 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Will Deacon1463fe42013-07-31 19:21:27 +01001174}
1175
Will Deacon518f7132014-11-14 17:17:54 +00001176static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001177 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001178{
1179 struct arm_smmu_domain *smmu_domain = cookie;
1180 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1181 struct arm_smmu_device *smmu = smmu_domain->smmu;
1182 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1183 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001184 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001185
Patrick Dalye7069342017-07-11 12:35:55 -07001186 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001187 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1188 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1189
Robin Murphy7602b872016-04-28 17:12:09 +01001190 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001191 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001192 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001193 do {
1194 writel_relaxed(iova, reg);
1195 iova += granule;
1196 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001197 } else {
1198 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001199 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001200 do {
1201 writeq_relaxed(iova, reg);
1202 iova += granule >> 12;
1203 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001204 }
Patrick Dalye7069342017-07-11 12:35:55 -07001205 } else if (stage1 && use_tlbiall) {
1206 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1207 reg += ARM_SMMU_CB_S1_TLBIALL;
1208 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001209 } else if (smmu->version == ARM_SMMU_V2) {
1210 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1211 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1212 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001213 iova >>= 12;
1214 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001215 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001216 iova += granule >> 12;
1217 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001218 } else {
1219 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001220 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001221 }
1222}
1223
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001224struct arm_smmu_secure_pool_chunk {
1225 void *addr;
1226 size_t size;
1227 struct list_head list;
1228};
1229
1230static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1231 size_t size)
1232{
1233 struct arm_smmu_secure_pool_chunk *it;
1234
1235 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1236 if (it->size == size) {
1237 void *addr = it->addr;
1238
1239 list_del(&it->list);
1240 kfree(it);
1241 return addr;
1242 }
1243 }
1244
1245 return NULL;
1246}
1247
1248static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1249 void *addr, size_t size)
1250{
1251 struct arm_smmu_secure_pool_chunk *chunk;
1252
1253 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1254 if (!chunk)
1255 return -ENOMEM;
1256
1257 chunk->addr = addr;
1258 chunk->size = size;
1259 memset(addr, 0, size);
1260 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1261
1262 return 0;
1263}
1264
1265static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1266{
1267 struct arm_smmu_secure_pool_chunk *it, *i;
1268
1269 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1270 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1271 /* pages will be freed later (after being unassigned) */
Prakash Gupta8e827be2017-10-04 12:37:11 +05301272 list_del(&it->list);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001273 kfree(it);
1274 }
1275}
1276
Patrick Dalyc11d1082016-09-01 15:52:44 -07001277static void *arm_smmu_alloc_pages_exact(void *cookie,
1278 size_t size, gfp_t gfp_mask)
1279{
1280 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001281 void *page;
1282 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001283
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001284 if (!arm_smmu_is_domain_secure(smmu_domain))
1285 return alloc_pages_exact(size, gfp_mask);
1286
1287 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1288 if (page)
1289 return page;
1290
1291 page = alloc_pages_exact(size, gfp_mask);
1292 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001293 ret = arm_smmu_prepare_pgtable(page, cookie);
1294 if (ret) {
1295 free_pages_exact(page, size);
1296 return NULL;
1297 }
1298 }
1299
1300 return page;
1301}
1302
1303static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1304{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001305 struct arm_smmu_domain *smmu_domain = cookie;
1306
1307 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1308 free_pages_exact(virt, size);
1309 return;
1310 }
1311
1312 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1313 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001314}
1315
Will Deacon518f7132014-11-14 17:17:54 +00001316static struct iommu_gather_ops arm_smmu_gather_ops = {
1317 .tlb_flush_all = arm_smmu_tlb_inv_context,
1318 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1319 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001320 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1321 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001322};
1323
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001324static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1325 dma_addr_t iova, u32 fsr)
1326{
1327 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001328 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyda765c62017-09-11 16:31:07 -07001329 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001330 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001331 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001332
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001333 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyda765c62017-09-11 16:31:07 -07001334 tlb->tlb_flush_all(smmu_domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001335 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001336
Patrick Dalyad441dd2016-09-15 15:50:46 -07001337 if (phys != phys_post_tlbiall) {
1338 dev_err(smmu->dev,
1339 "ATOS results differed across TLBIALL...\n"
1340 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1341 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001342
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001343 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001344}
1345
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1347{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001348 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001349 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001350 unsigned long iova;
1351 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001352 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001353 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1354 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001355 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001356 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001357 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001358 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001359 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001360 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001361 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001362
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001363 static DEFINE_RATELIMIT_STATE(_rs,
1364 DEFAULT_RATELIMIT_INTERVAL,
1365 DEFAULT_RATELIMIT_BURST);
1366
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001367 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001368 if (ret)
1369 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001370
Shalaj Jain04059c52015-03-03 13:34:59 -08001371 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001372 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001373 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1374
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001375 if (!(fsr & FSR_FAULT)) {
1376 ret = IRQ_NONE;
1377 goto out_power_off;
1378 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001379
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001380 if (fatal_asf && (fsr & FSR_ASF)) {
1381 dev_err(smmu->dev,
1382 "Took an address size fault. Refusing to recover.\n");
1383 BUG();
1384 }
1385
Will Deacon45ae7cf2013-06-24 18:31:25 +01001386 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001387 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001388 if (fsr & FSR_TF)
1389 flags |= IOMMU_FAULT_TRANSLATION;
1390 if (fsr & FSR_PF)
1391 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001392 if (fsr & FSR_EF)
1393 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001394 if (fsr & FSR_SS)
1395 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001396
Robin Murphyf9a05f02016-04-13 18:13:01 +01001397 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001398 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001399 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1400 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001401 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1402 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001403 dev_dbg(smmu->dev,
1404 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1405 iova, fsr, fsynr, cfg->cbndx);
1406 dev_dbg(smmu->dev,
1407 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001408 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001409 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001410 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001411 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1412 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001413 if (__ratelimit(&_rs)) {
1414 dev_err(smmu->dev,
1415 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1416 iova, fsr, fsynr, cfg->cbndx);
1417 dev_err(smmu->dev, "FAR = %016lx\n",
1418 (unsigned long)iova);
1419 dev_err(smmu->dev,
1420 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1421 fsr,
1422 (fsr & 0x02) ? "TF " : "",
1423 (fsr & 0x04) ? "AFF " : "",
1424 (fsr & 0x08) ? "PF " : "",
1425 (fsr & 0x10) ? "EF " : "",
1426 (fsr & 0x20) ? "TLBMCF " : "",
1427 (fsr & 0x40) ? "TLBLKF " : "",
1428 (fsr & 0x80) ? "MHF " : "",
1429 (fsr & 0x40000000) ? "SS " : "",
1430 (fsr & 0x80000000) ? "MULTI " : "");
1431 dev_err(smmu->dev,
1432 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001433 if (!phys_soft)
1434 dev_err(smmu->dev,
1435 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1436 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001437 if (phys_atos)
1438 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1439 &phys_atos);
1440 else
1441 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001442 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1443 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001444 ret = IRQ_NONE;
1445 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001446 if (!non_fatal_fault) {
1447 dev_err(smmu->dev,
1448 "Unhandled arm-smmu context fault!\n");
1449 BUG();
1450 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001451 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001452
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001453 /*
1454 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1455 * if stalled. This is required to keep the IOMMU client stalled on
1456 * the outstanding fault. This gives the client a chance to take any
1457 * debug action and then terminate the stalled transaction.
1458 * So, the sequence in case of stall on fault should be:
1459 * 1) Do not clear FSR or write to RESUME here
1460 * 2) Client takes any debug action
1461 * 3) Client terminates the stalled transaction and resumes the IOMMU
1462 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1463 * not before so that the fault remains outstanding. This ensures
1464 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1465 * need to be terminated.
1466 */
1467 if (tmp != -EBUSY) {
1468 /* Clear the faulting FSR */
1469 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001470
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001471 /*
1472 * Barrier required to ensure that the FSR is cleared
1473 * before resuming SMMU operation
1474 */
1475 wmb();
1476
1477 /* Retry or terminate any stalled transactions */
1478 if (fsr & FSR_SS)
1479 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1480 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001481
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001482out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001483 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001484
Patrick Daly5ba28112016-08-30 19:18:52 -07001485 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001486}
1487
1488static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1489{
1490 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1491 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001492 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001493
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001494 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001495 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001496
Will Deacon45ae7cf2013-06-24 18:31:25 +01001497 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1498 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1499 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1500 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1501
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001502 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001503 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001504 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001505 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001506
Will Deacon45ae7cf2013-06-24 18:31:25 +01001507 dev_err_ratelimited(smmu->dev,
1508 "Unexpected global fault, this could be serious\n");
1509 dev_err_ratelimited(smmu->dev,
1510 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1511 gfsr, gfsynr0, gfsynr1, gfsynr2);
1512
1513 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001514 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001515 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001516}
1517
Will Deacon518f7132014-11-14 17:17:54 +00001518static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1519 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001520{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001521 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001522 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001523 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001524 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1525 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001526 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001527
Will Deacon45ae7cf2013-06-24 18:31:25 +01001528 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001529 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1530 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001531
Will Deacon4a1c93c2015-03-04 12:21:03 +00001532 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001533 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1534 reg = CBA2R_RW64_64BIT;
1535 else
1536 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001537 /* 16-bit VMIDs live in CBA2R */
1538 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001539 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001540
Will Deacon4a1c93c2015-03-04 12:21:03 +00001541 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1542 }
1543
Will Deacon45ae7cf2013-06-24 18:31:25 +01001544 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001545 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001546 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001547 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001548
Will Deacon57ca90f2014-02-06 14:59:05 +00001549 /*
1550 * Use the weakest shareability/memory types, so they are
1551 * overridden by the ttbcr/pte.
1552 */
1553 if (stage1) {
1554 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1555 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001556 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1557 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001558 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001559 }
Will Deacon44680ee2014-06-25 11:29:12 +01001560 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001561
Will Deacon518f7132014-11-14 17:17:54 +00001562 /* TTBRs */
1563 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001564 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001565
Robin Murphyb94df6f2016-08-11 17:44:06 +01001566 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1567 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1568 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1569 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1570 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1571 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1572 } else {
1573 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1574 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1575 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1576 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1577 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1578 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1579 }
Will Deacon518f7132014-11-14 17:17:54 +00001580 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001581 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001582 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001583 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001584
Will Deacon518f7132014-11-14 17:17:54 +00001585 /* TTBCR */
1586 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001587 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1588 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1589 reg2 = 0;
1590 } else {
1591 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1592 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1593 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001594 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001595 if (smmu->version > ARM_SMMU_V1)
1596 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001597 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001598 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001599 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001600 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001601
Will Deacon518f7132014-11-14 17:17:54 +00001602 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001603 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001604 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1605 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1606 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1607 } else {
1608 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1609 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1610 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001611 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001612 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001613 }
1614
Will Deacon45ae7cf2013-06-24 18:31:25 +01001615 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001616 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001617
Patrick Daly7f377fe2017-10-06 17:37:10 -07001618 /* Ensure bypass transactions are Non-shareable */
1619 reg |= SCTLR_SHCFG_NSH << SCTLR_SHCFG_SHIFT;
1620
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301621 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
1622 reg &= ~SCTLR_CFCFG;
1623 reg |= SCTLR_HUPCF;
1624 }
1625
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001626 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1627 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1628 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001629 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001630 if (stage1)
1631 reg |= SCTLR_S1_ASIDPNE;
1632#ifdef __BIG_ENDIAN
1633 reg |= SCTLR_E;
1634#endif
Will Deacon25724842013-08-21 13:49:53 +01001635 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001636}
1637
Patrick Dalyc190d932016-08-30 17:23:28 -07001638static int arm_smmu_init_asid(struct iommu_domain *domain,
1639 struct arm_smmu_device *smmu)
1640{
1641 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1642 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1643 bool dynamic = is_dynamic_domain(domain);
1644 int ret;
1645
1646 if (!dynamic) {
1647 cfg->asid = cfg->cbndx + 1;
1648 } else {
1649 mutex_lock(&smmu->idr_mutex);
1650 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1651 smmu->num_context_banks + 2,
1652 MAX_ASID + 1, GFP_KERNEL);
1653
1654 mutex_unlock(&smmu->idr_mutex);
1655 if (ret < 0) {
1656 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1657 ret);
1658 return ret;
1659 }
1660 cfg->asid = ret;
1661 }
1662 return 0;
1663}
1664
1665static void arm_smmu_free_asid(struct iommu_domain *domain)
1666{
1667 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1668 struct arm_smmu_device *smmu = smmu_domain->smmu;
1669 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1670 bool dynamic = is_dynamic_domain(domain);
1671
1672 if (cfg->asid == INVALID_ASID || !dynamic)
1673 return;
1674
1675 mutex_lock(&smmu->idr_mutex);
1676 idr_remove(&smmu->asid_idr, cfg->asid);
1677 mutex_unlock(&smmu->idr_mutex);
1678}
1679
Will Deacon45ae7cf2013-06-24 18:31:25 +01001680static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001681 struct arm_smmu_device *smmu,
1682 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001683{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001684 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001685 unsigned long ias, oas;
1686 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001687 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001688 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001689 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001690 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001691 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001692 bool dynamic;
Patrick Dalyda765c62017-09-11 16:31:07 -07001693 const struct iommu_gather_ops *tlb;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001694
Will Deacon518f7132014-11-14 17:17:54 +00001695 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001696 if (smmu_domain->smmu)
1697 goto out_unlock;
1698
Patrick Dalyc190d932016-08-30 17:23:28 -07001699 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1700 smmu_domain->cfg.asid = INVALID_ASID;
1701
Patrick Dalyc190d932016-08-30 17:23:28 -07001702 dynamic = is_dynamic_domain(domain);
1703 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1704 dev_err(smmu->dev, "dynamic domains not supported\n");
1705 ret = -EPERM;
1706 goto out_unlock;
1707 }
1708
Will Deaconc752ce42014-06-25 22:46:31 +01001709 /*
1710 * Mapping the requested stage onto what we support is surprisingly
1711 * complicated, mainly because the spec allows S1+S2 SMMUs without
1712 * support for nested translation. That means we end up with the
1713 * following table:
1714 *
1715 * Requested Supported Actual
1716 * S1 N S1
1717 * S1 S1+S2 S1
1718 * S1 S2 S2
1719 * S1 S1 S1
1720 * N N N
1721 * N S1+S2 S2
1722 * N S2 S2
1723 * N S1 S1
1724 *
1725 * Note that you can't actually request stage-2 mappings.
1726 */
1727 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1728 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1729 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1730 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1731
Robin Murphy7602b872016-04-28 17:12:09 +01001732 /*
1733 * Choosing a suitable context format is even more fiddly. Until we
1734 * grow some way for the caller to express a preference, and/or move
1735 * the decision into the io-pgtable code where it arguably belongs,
1736 * just aim for the closest thing to the rest of the system, and hope
1737 * that the hardware isn't esoteric enough that we can't assume AArch64
1738 * support to be a superset of AArch32 support...
1739 */
1740 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1741 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001742 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1743 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1744 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1745 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1746 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001747 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1748 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1749 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1750 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1751 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1752
1753 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1754 ret = -EINVAL;
1755 goto out_unlock;
1756 }
1757
Will Deaconc752ce42014-06-25 22:46:31 +01001758 switch (smmu_domain->stage) {
1759 case ARM_SMMU_DOMAIN_S1:
1760 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1761 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001762 ias = smmu->va_size;
1763 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001764 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001765 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001766 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1767 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001768 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001769 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001770 ias = min(ias, 32UL);
1771 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001772 } else {
1773 fmt = ARM_V7S;
1774 ias = min(ias, 32UL);
1775 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001776 }
Will Deaconc752ce42014-06-25 22:46:31 +01001777 break;
1778 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001779 /*
1780 * We will likely want to change this if/when KVM gets
1781 * involved.
1782 */
Will Deaconc752ce42014-06-25 22:46:31 +01001783 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001784 cfg->cbar = CBAR_TYPE_S2_TRANS;
1785 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001786 ias = smmu->ipa_size;
1787 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001788 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001789 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001790 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001791 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001792 ias = min(ias, 40UL);
1793 oas = min(oas, 40UL);
1794 }
Will Deaconc752ce42014-06-25 22:46:31 +01001795 break;
1796 default:
1797 ret = -EINVAL;
1798 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799 }
1800
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001801 if (is_fast)
1802 fmt = ARM_V8L_FAST;
1803
Patrick Dalyce6786f2016-11-09 14:19:23 -08001804 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1805 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001806 if (is_iommu_pt_coherent(smmu_domain))
1807 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Patrick Daly49ccf332017-09-27 15:10:29 -07001808 if ((quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
1809 (smmu->model == QCOM_SMMUV500))
1810 quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001811
Patrick Dalyda765c62017-09-11 16:31:07 -07001812 tlb = &arm_smmu_gather_ops;
Patrick Daly83174c12017-10-26 12:31:15 -07001813 if (smmu->options & ARM_SMMU_OPT_MMU500_ERRATA1)
Patrick Dalyda765c62017-09-11 16:31:07 -07001814 tlb = &qsmmuv500_errata1_smmu_gather_ops;
1815
Patrick Dalyda688822017-05-17 20:12:48 -07001816 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1817 if (ret < 0)
1818 goto out_unlock;
1819 cfg->cbndx = ret;
1820
Robin Murphyb7862e32016-04-13 18:13:03 +01001821 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001822 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1823 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001824 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001825 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001826 }
1827
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001828 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001829 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001830 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001831 .ias = ias,
1832 .oas = oas,
Patrick Dalyda765c62017-09-11 16:31:07 -07001833 .tlb = tlb,
Robin Murphy2df7a252015-07-29 19:46:06 +01001834 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001835 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001836
Will Deacon518f7132014-11-14 17:17:54 +00001837 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001838 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001839 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1840 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001841 if (!pgtbl_ops) {
1842 ret = -ENOMEM;
1843 goto out_clear_smmu;
1844 }
1845
Patrick Dalyc11d1082016-09-01 15:52:44 -07001846 /*
1847 * assign any page table memory that might have been allocated
1848 * during alloc_io_pgtable_ops
1849 */
Patrick Dalye271f212016-10-04 13:24:49 -07001850 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001851 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001852 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001853
Robin Murphyd5466352016-05-09 17:20:09 +01001854 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001855 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001856 domain->geometry.aperture_end = (1UL << ias) - 1;
1857 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001858
Patrick Dalyc190d932016-08-30 17:23:28 -07001859 /* Assign an asid */
1860 ret = arm_smmu_init_asid(domain, smmu);
1861 if (ret)
1862 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001863
Patrick Dalyc190d932016-08-30 17:23:28 -07001864 if (!dynamic) {
1865 /* Initialise the context bank with our page table cfg */
1866 arm_smmu_init_context_bank(smmu_domain,
1867 &smmu_domain->pgtbl_cfg);
1868
Patrick Daly03330cc2017-08-11 14:56:38 -07001869 arm_smmu_arch_init_context_bank(smmu_domain, dev);
1870
Patrick Dalyc190d932016-08-30 17:23:28 -07001871 /*
1872 * Request context fault interrupt. Do this last to avoid the
1873 * handler seeing a half-initialised domain state.
1874 */
1875 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1876 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001877 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1878 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001879 if (ret < 0) {
1880 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1881 cfg->irptndx, irq);
1882 cfg->irptndx = INVALID_IRPTNDX;
1883 goto out_clear_smmu;
1884 }
1885 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001886 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001887 }
Will Deacon518f7132014-11-14 17:17:54 +00001888 mutex_unlock(&smmu_domain->init_mutex);
1889
1890 /* Publish page table ops for map/unmap */
1891 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001892 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001893
Will Deacon518f7132014-11-14 17:17:54 +00001894out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001895 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001896 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001897out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001898 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001899 return ret;
1900}
1901
Patrick Daly77db4f92016-10-14 15:34:10 -07001902static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1903{
1904 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1905 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1906 smmu_domain->secure_vmid = VMID_INVAL;
1907}
1908
Will Deacon45ae7cf2013-06-24 18:31:25 +01001909static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1910{
Joerg Roedel1d672632015-03-26 13:43:10 +01001911 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001912 struct arm_smmu_device *smmu = smmu_domain->smmu;
1913 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001914 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001915 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001916 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001917 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001918
Robin Murphy7e96c742016-09-14 15:26:46 +01001919 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001920 return;
1921
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001922 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001923 if (ret) {
1924 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1925 smmu);
1926 return;
1927 }
1928
Patrick Dalyc190d932016-08-30 17:23:28 -07001929 dynamic = is_dynamic_domain(domain);
1930 if (dynamic) {
1931 arm_smmu_free_asid(domain);
1932 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001933 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001934 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001935 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001936 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001937 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001938 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001939 return;
1940 }
1941
Will Deacon518f7132014-11-14 17:17:54 +00001942 /*
1943 * Disable the context bank and free the page tables before freeing
1944 * it.
1945 */
Will Deacon44680ee2014-06-25 11:29:12 +01001946 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001947 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001948
Will Deacon44680ee2014-06-25 11:29:12 +01001949 if (cfg->irptndx != INVALID_IRPTNDX) {
1950 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001951 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001952 }
1953
Markus Elfring44830b02015-11-06 18:32:41 +01001954 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001955 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001956 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001957 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001958 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001959 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001960
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001961 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001962 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963}
1964
Joerg Roedel1d672632015-03-26 13:43:10 +01001965static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001966{
1967 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001968
Patrick Daly09801312016-08-29 17:02:52 -07001969 /* Do not support DOMAIN_DMA for now */
1970 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001971 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972 /*
1973 * Allocate the domain and initialise some of its data structures.
1974 * We can't really do anything meaningful until we've added a
1975 * master.
1976 */
1977 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1978 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001979 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980
Robin Murphy7e96c742016-09-14 15:26:46 +01001981 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1982 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001983 kfree(smmu_domain);
1984 return NULL;
1985 }
1986
Will Deacon518f7132014-11-14 17:17:54 +00001987 mutex_init(&smmu_domain->init_mutex);
1988 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001989 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1990 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001991 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001992 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001993 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001994
1995 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001996}
1997
Joerg Roedel1d672632015-03-26 13:43:10 +01001998static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001999{
Joerg Roedel1d672632015-03-26 13:43:10 +01002000 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01002001
2002 /*
2003 * Free the domain resources. We assume that all devices have
2004 * already been detached.
2005 */
Robin Murphy9adb9592016-01-26 18:06:36 +00002006 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002007 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002008 kfree(smmu_domain);
2009}
2010
Robin Murphy468f4942016-09-12 17:13:49 +01002011static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
2012{
2013 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01002014 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01002015
2016 if (smr->valid)
2017 reg |= SMR_VALID;
2018 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2019}
2020
Robin Murphya754fd12016-09-12 17:13:50 +01002021static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
2022{
2023 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
2024 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
2025 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
Patrick Daly7f377fe2017-10-06 17:37:10 -07002026 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT |
2027 S2CR_SHCFG_NSH << S2CR_SHCFG_SHIFT;
Robin Murphya754fd12016-09-12 17:13:50 +01002028
2029 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2030}
2031
2032static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
2033{
2034 arm_smmu_write_s2cr(smmu, idx);
2035 if (smmu->smrs)
2036 arm_smmu_write_smr(smmu, idx);
2037}
2038
Robin Murphy6668f692016-09-12 17:13:54 +01002039static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01002040{
2041 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01002042 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002043
Robin Murphy6668f692016-09-12 17:13:54 +01002044 /* Stream indexing is blissfully easy */
2045 if (!smrs)
2046 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01002047
Robin Murphy6668f692016-09-12 17:13:54 +01002048 /* Validating SMRs is... less so */
2049 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2050 if (!smrs[i].valid) {
2051 /*
2052 * Note the first free entry we come across, which
2053 * we'll claim in the end if nothing else matches.
2054 */
2055 if (free_idx < 0)
2056 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01002057 continue;
2058 }
Robin Murphy6668f692016-09-12 17:13:54 +01002059 /*
2060 * If the new entry is _entirely_ matched by an existing entry,
2061 * then reuse that, with the guarantee that there also cannot
2062 * be any subsequent conflicting entries. In normal use we'd
2063 * expect simply identical entries for this case, but there's
2064 * no harm in accommodating the generalisation.
2065 */
2066 if ((mask & smrs[i].mask) == mask &&
2067 !((id ^ smrs[i].id) & ~smrs[i].mask))
2068 return i;
2069 /*
2070 * If the new entry has any other overlap with an existing one,
2071 * though, then there always exists at least one stream ID
2072 * which would cause a conflict, and we can't allow that risk.
2073 */
2074 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
2075 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002076 }
2077
Robin Murphy6668f692016-09-12 17:13:54 +01002078 return free_idx;
2079}
2080
2081static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
2082{
2083 if (--smmu->s2crs[idx].count)
2084 return false;
2085
2086 smmu->s2crs[idx] = s2cr_init_val;
2087 if (smmu->smrs)
2088 smmu->smrs[idx].valid = false;
2089
2090 return true;
2091}
2092
2093static int arm_smmu_master_alloc_smes(struct device *dev)
2094{
Robin Murphy06e393e2016-09-12 17:13:55 +01002095 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2096 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01002097 struct arm_smmu_device *smmu = cfg->smmu;
2098 struct arm_smmu_smr *smrs = smmu->smrs;
2099 struct iommu_group *group;
2100 int i, idx, ret;
2101
2102 mutex_lock(&smmu->stream_map_mutex);
2103 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01002104 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002105 u16 sid = fwspec->ids[i];
2106 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
2107
Robin Murphy6668f692016-09-12 17:13:54 +01002108 if (idx != INVALID_SMENDX) {
2109 ret = -EEXIST;
2110 goto out_err;
2111 }
2112
Robin Murphy7e96c742016-09-14 15:26:46 +01002113 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01002114 if (ret < 0)
2115 goto out_err;
2116
2117 idx = ret;
2118 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002119 smrs[idx].id = sid;
2120 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01002121 smrs[idx].valid = true;
2122 }
2123 smmu->s2crs[idx].count++;
2124 cfg->smendx[i] = (s16)idx;
2125 }
2126
2127 group = iommu_group_get_for_dev(dev);
2128 if (!group)
2129 group = ERR_PTR(-ENOMEM);
2130 if (IS_ERR(group)) {
2131 ret = PTR_ERR(group);
2132 goto out_err;
2133 }
2134 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01002135
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002136 /* It worked! Don't poke the actual hardware until we've attached */
2137 for_each_cfg_sme(fwspec, i, idx)
Robin Murphy6668f692016-09-12 17:13:54 +01002138 smmu->s2crs[idx].group = group;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002139
Robin Murphy6668f692016-09-12 17:13:54 +01002140 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002141 return 0;
2142
Robin Murphy6668f692016-09-12 17:13:54 +01002143out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01002144 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01002145 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01002146 cfg->smendx[i] = INVALID_SMENDX;
2147 }
Robin Murphy6668f692016-09-12 17:13:54 +01002148 mutex_unlock(&smmu->stream_map_mutex);
2149 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002150}
2151
Robin Murphy06e393e2016-09-12 17:13:55 +01002152static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002153{
Robin Murphy06e393e2016-09-12 17:13:55 +01002154 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
2155 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01002156 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01002157
Robin Murphy6668f692016-09-12 17:13:54 +01002158 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002159 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002160 if (arm_smmu_free_sme(smmu, idx))
2161 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01002162 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002163 }
Robin Murphy6668f692016-09-12 17:13:54 +01002164 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002165}
2166
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002167static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
2168 struct iommu_fwspec *fwspec)
2169{
2170 struct arm_smmu_device *smmu = smmu_domain->smmu;
2171 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2172 int i, idx;
2173 const struct iommu_gather_ops *tlb;
2174
2175 tlb = smmu_domain->pgtbl_cfg.tlb;
2176
2177 mutex_lock(&smmu->stream_map_mutex);
2178 for_each_cfg_sme(fwspec, i, idx) {
2179 WARN_ON(s2cr[idx].attach_count == 0);
2180 s2cr[idx].attach_count -= 1;
2181
2182 if (s2cr[idx].attach_count > 0)
2183 continue;
2184
2185 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2186 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2187 }
2188 mutex_unlock(&smmu->stream_map_mutex);
2189
2190 /* Ensure there are no stale mappings for this context bank */
2191 tlb->tlb_flush_all(smmu_domain);
2192}
2193
Will Deacon45ae7cf2013-06-24 18:31:25 +01002194static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01002195 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002196{
Will Deacon44680ee2014-06-25 11:29:12 +01002197 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002198 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2199 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2200 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002201 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002202
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002203 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002204 for_each_cfg_sme(fwspec, i, idx) {
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002205 if (s2cr[idx].attach_count++ > 0)
Robin Murphy6668f692016-09-12 17:13:54 +01002206 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002207
2208 s2cr[idx].type = type;
2209 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2210 s2cr[idx].cbndx = cbndx;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002211 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002212 }
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002213 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002214
2215 return 0;
2216}
2217
Patrick Daly09801312016-08-29 17:02:52 -07002218static void arm_smmu_detach_dev(struct iommu_domain *domain,
2219 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002220{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002221 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002222 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002223 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly09801312016-08-29 17:02:52 -07002224 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002225 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002226
2227 if (dynamic)
2228 return;
2229
Patrick Daly09801312016-08-29 17:02:52 -07002230 if (!smmu) {
2231 dev_err(dev, "Domain not attached; cannot detach!\n");
2232 return;
2233 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002234
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302235 if (atomic_domain)
2236 arm_smmu_power_on_atomic(smmu->pwr);
2237 else
2238 arm_smmu_power_on(smmu->pwr);
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002239
Vijayanand Jitta25cd32c2017-11-16 15:14:36 +05302240 arm_smmu_domain_remove_master(smmu_domain, fwspec);
2241 arm_smmu_power_off(smmu->pwr);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002242}
2243
Patrick Dalye271f212016-10-04 13:24:49 -07002244static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002245{
Patrick Dalye271f212016-10-04 13:24:49 -07002246 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002247 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2248 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2249 int source_vmid = VMID_HLOS;
2250 struct arm_smmu_pte_info *pte_info, *temp;
2251
Patrick Dalye271f212016-10-04 13:24:49 -07002252 if (!arm_smmu_is_domain_secure(smmu_domain))
2253 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002254
Patrick Dalye271f212016-10-04 13:24:49 -07002255 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002256 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2257 PAGE_SIZE, &source_vmid, 1,
2258 dest_vmids, dest_perms, 2);
2259 if (WARN_ON(ret))
2260 break;
2261 }
2262
2263 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2264 entry) {
2265 list_del(&pte_info->entry);
2266 kfree(pte_info);
2267 }
Patrick Dalye271f212016-10-04 13:24:49 -07002268 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002269}
2270
2271static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2272{
2273 int ret;
2274 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002275 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002276 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2277 struct arm_smmu_pte_info *pte_info, *temp;
2278
Patrick Dalye271f212016-10-04 13:24:49 -07002279 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002280 return;
2281
2282 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2283 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2284 PAGE_SIZE, source_vmlist, 2,
2285 &dest_vmids, &dest_perms, 1);
2286 if (WARN_ON(ret))
2287 break;
2288 free_pages_exact(pte_info->virt_addr, pte_info->size);
2289 }
2290
2291 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2292 entry) {
2293 list_del(&pte_info->entry);
2294 kfree(pte_info);
2295 }
2296}
2297
2298static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2299{
2300 struct arm_smmu_domain *smmu_domain = cookie;
2301 struct arm_smmu_pte_info *pte_info;
2302
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002303 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002304
2305 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2306 if (!pte_info)
2307 return;
2308
2309 pte_info->virt_addr = addr;
2310 pte_info->size = size;
2311 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2312}
2313
2314static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2315{
2316 struct arm_smmu_domain *smmu_domain = cookie;
2317 struct arm_smmu_pte_info *pte_info;
2318
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002319 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002320
2321 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2322 if (!pte_info)
2323 return -ENOMEM;
2324 pte_info->virt_addr = addr;
2325 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2326 return 0;
2327}
2328
Will Deacon45ae7cf2013-06-24 18:31:25 +01002329static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2330{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002331 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002332 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002333 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002334 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002335 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002336
Robin Murphy06e393e2016-09-12 17:13:55 +01002337 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002338 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2339 return -ENXIO;
2340 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002341
Robin Murphy4f79b142016-10-17 12:06:21 +01002342 /*
2343 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2344 * domains between of_xlate() and add_device() - we have no way to cope
2345 * with that, so until ARM gets converted to rely on groups and default
2346 * domains, just say no (but more politely than by dereferencing NULL).
2347 * This should be at least a WARN_ON once that's sorted.
2348 */
2349 if (!fwspec->iommu_priv)
2350 return -ENODEV;
2351
Robin Murphy06e393e2016-09-12 17:13:55 +01002352 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002353
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002354 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002355 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002356 if (ret)
2357 return ret;
2358
Will Deacon518f7132014-11-14 17:17:54 +00002359 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002360 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002361 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002362 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002363
Patrick Dalyc190d932016-08-30 17:23:28 -07002364 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002365 if (is_dynamic_domain(domain)) {
2366 ret = 0;
2367 goto out_power_off;
2368 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002369
Will Deacon45ae7cf2013-06-24 18:31:25 +01002370 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002371 * Sanity check the domain. We don't support domains across
2372 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002373 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002374 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002375 dev_err(dev,
2376 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002377 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002378 ret = -EINVAL;
2379 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002380 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002381
2382 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002383 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002384
2385out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002386 /*
2387 * Keep an additional vote for non-atomic power until domain is
2388 * detached
2389 */
2390 if (!ret && atomic_domain) {
2391 WARN_ON(arm_smmu_power_on(smmu->pwr));
2392 arm_smmu_power_off_atomic(smmu->pwr);
2393 }
2394
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002395 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002396
Will Deacon45ae7cf2013-06-24 18:31:25 +01002397 return ret;
2398}
2399
Will Deacon45ae7cf2013-06-24 18:31:25 +01002400static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002401 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002402{
Will Deacon518f7132014-11-14 17:17:54 +00002403 int ret;
2404 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002405 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002406 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002407
Will Deacon518f7132014-11-14 17:17:54 +00002408 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002409 return -ENODEV;
2410
Patrick Dalye271f212016-10-04 13:24:49 -07002411 arm_smmu_secure_domain_lock(smmu_domain);
2412
Will Deacon518f7132014-11-14 17:17:54 +00002413 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2414 ret = ops->map(ops, iova, paddr, size, prot);
2415 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002416
2417 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002418 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002419
Will Deacon518f7132014-11-14 17:17:54 +00002420 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002421}
2422
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002423static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2424 dma_addr_t iova)
2425{
2426 uint64_t ret;
2427 unsigned long flags;
2428 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2429 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2430
2431 if (!ops)
2432 return 0;
2433
2434 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2435 ret = ops->iova_to_pte(ops, iova);
2436 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2437 return ret;
2438}
2439
Will Deacon45ae7cf2013-06-24 18:31:25 +01002440static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2441 size_t size)
2442{
Will Deacon518f7132014-11-14 17:17:54 +00002443 size_t ret;
2444 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002445 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002446 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002447
Will Deacon518f7132014-11-14 17:17:54 +00002448 if (!ops)
2449 return 0;
2450
Patrick Daly8befb662016-08-17 20:03:28 -07002451 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002452 if (ret)
2453 return ret;
2454
Patrick Dalye271f212016-10-04 13:24:49 -07002455 arm_smmu_secure_domain_lock(smmu_domain);
2456
Will Deacon518f7132014-11-14 17:17:54 +00002457 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2458 ret = ops->unmap(ops, iova, size);
2459 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002460
Patrick Daly8befb662016-08-17 20:03:28 -07002461 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002462 /*
2463 * While splitting up block mappings, we might allocate page table
2464 * memory during unmap, so the vmids needs to be assigned to the
2465 * memory here as well.
2466 */
2467 arm_smmu_assign_table(smmu_domain);
2468 /* Also unassign any pages that were free'd during unmap */
2469 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002470 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002471 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002472}
2473
Patrick Daly88d321d2017-02-09 18:02:13 -08002474#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002475static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2476 struct scatterlist *sg, unsigned int nents, int prot)
2477{
2478 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002479 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002480 unsigned long flags;
2481 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2482 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002483 unsigned int idx_start, idx_end;
2484 struct scatterlist *sg_start, *sg_end;
2485 unsigned long __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002486
2487 if (!ops)
2488 return -ENODEV;
2489
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002490 arm_smmu_secure_domain_lock(smmu_domain);
2491
Patrick Daly88d321d2017-02-09 18:02:13 -08002492 __saved_iova_start = iova;
2493 idx_start = idx_end = 0;
2494 sg_start = sg_end = sg;
2495 while (idx_end < nents) {
2496 batch_size = sg_end->length;
2497 sg_end = sg_next(sg_end);
2498 idx_end++;
2499 while ((idx_end < nents) &&
2500 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002501
Patrick Daly88d321d2017-02-09 18:02:13 -08002502 batch_size += sg_end->length;
2503 sg_end = sg_next(sg_end);
2504 idx_end++;
2505 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002506
Patrick Daly88d321d2017-02-09 18:02:13 -08002507 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2508 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2509 prot, &size);
2510 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2511 /* Returns 0 on error */
2512 if (!ret) {
2513 size_to_unmap = iova + size - __saved_iova_start;
2514 goto out;
2515 }
2516
2517 iova += batch_size;
2518 idx_start = idx_end;
2519 sg_start = sg_end;
2520 }
2521
2522out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002523 arm_smmu_assign_table(smmu_domain);
2524
Patrick Daly88d321d2017-02-09 18:02:13 -08002525 if (size_to_unmap) {
2526 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2527 iova = __saved_iova_start;
2528 }
Patrick Daly4b9a7ad2017-09-22 17:31:13 -07002529 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly88d321d2017-02-09 18:02:13 -08002530 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002531}
2532
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002533static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002534 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002535{
Joerg Roedel1d672632015-03-26 13:43:10 +01002536 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002537 struct arm_smmu_device *smmu = smmu_domain->smmu;
2538 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2539 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2540 struct device *dev = smmu->dev;
2541 void __iomem *cb_base;
2542 u32 tmp;
2543 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002544 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002545
2546 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2547
Robin Murphy661d9622015-05-27 17:09:34 +01002548 /* ATS1 registers can only be written atomically */
2549 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002550 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002551 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2552 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002553 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002554
2555 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2556 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002557 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002558 dev_err(dev,
2559 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2560 &iova, &phys);
2561 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002562 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002563 }
2564
Robin Murphyf9a05f02016-04-13 18:13:01 +01002565 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002566 if (phys & CB_PAR_F) {
2567 dev_err(dev, "translation fault!\n");
2568 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002569 phys = 0;
2570 } else {
2571 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002572 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002573
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002574 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002575}
2576
Will Deacon45ae7cf2013-06-24 18:31:25 +01002577static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002578 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002579{
Will Deacon518f7132014-11-14 17:17:54 +00002580 phys_addr_t ret;
2581 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002582 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002583 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002584
Will Deacon518f7132014-11-14 17:17:54 +00002585 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002586 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002587
Will Deacon518f7132014-11-14 17:17:54 +00002588 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002589 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002590 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002591
Will Deacon518f7132014-11-14 17:17:54 +00002592 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002593}
2594
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002595/*
2596 * This function can sleep, and cannot be called from atomic context. Will
2597 * power on register block if required. This restriction does not apply to the
2598 * original iova_to_phys() op.
2599 */
2600static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2601 dma_addr_t iova)
2602{
2603 phys_addr_t ret = 0;
2604 unsigned long flags;
2605 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly62ba1922017-08-30 16:47:18 -07002606 struct arm_smmu_device *smmu = smmu_domain->smmu;
2607
2608 if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS)
2609 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002610
Patrick Dalyad441dd2016-09-15 15:50:46 -07002611 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002612 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2613 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002614 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002615 return ret;
2616 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002617
2618 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2619 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2620 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002621 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002622
2623 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2624
2625 return ret;
2626}
2627
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002628static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002629{
Will Deacond0948942014-06-24 17:30:10 +01002630 switch (cap) {
2631 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002632 /*
2633 * Return true here as the SMMU can always send out coherent
2634 * requests.
2635 */
2636 return true;
Will Deacond0948942014-06-24 17:30:10 +01002637 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002638 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002639 case IOMMU_CAP_NOEXEC:
2640 return true;
Will Deacond0948942014-06-24 17:30:10 +01002641 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002642 return false;
Will Deacond0948942014-06-24 17:30:10 +01002643 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002644}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002645
Patrick Daly8e3371a2017-02-13 22:14:53 -08002646static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2647{
2648 struct arm_smmu_device *smmu;
2649 unsigned long flags;
2650
2651 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2652 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2653 if (smmu->dev->of_node == np) {
2654 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2655 return smmu;
2656 }
2657 }
2658 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2659 return NULL;
2660}
2661
Robin Murphy7e96c742016-09-14 15:26:46 +01002662static int arm_smmu_match_node(struct device *dev, void *data)
2663{
2664 return dev->of_node == data;
2665}
2666
2667static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2668{
2669 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2670 np, arm_smmu_match_node);
2671 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002672 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002673}
2674
Will Deacon03edb222015-01-19 14:27:33 +00002675static int arm_smmu_add_device(struct device *dev)
2676{
Robin Murphy06e393e2016-09-12 17:13:55 +01002677 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002678 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002679 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002680 int i, ret;
2681
Robin Murphy7e96c742016-09-14 15:26:46 +01002682 if (using_legacy_binding) {
2683 ret = arm_smmu_register_legacy_master(dev, &smmu);
2684 fwspec = dev->iommu_fwspec;
2685 if (ret)
2686 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002687 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002688 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2689 if (!smmu)
2690 return -ENODEV;
2691 } else {
2692 return -ENODEV;
2693 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002694
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002695 ret = arm_smmu_power_on(smmu->pwr);
2696 if (ret)
2697 goto out_free;
2698
Robin Murphyd5b41782016-09-14 15:21:39 +01002699 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002700 for (i = 0; i < fwspec->num_ids; i++) {
2701 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002702 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002703
Robin Murphy06e393e2016-09-12 17:13:55 +01002704 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002705 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002706 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002707 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002708 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002709 if (mask & ~smmu->smr_mask_mask) {
2710 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2711 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002712 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002713 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002714 }
Will Deacon03edb222015-01-19 14:27:33 +00002715
Robin Murphy06e393e2016-09-12 17:13:55 +01002716 ret = -ENOMEM;
2717 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2718 GFP_KERNEL);
2719 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002720 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002721
2722 cfg->smmu = smmu;
2723 fwspec->iommu_priv = cfg;
2724 while (i--)
2725 cfg->smendx[i] = INVALID_SMENDX;
2726
Robin Murphy6668f692016-09-12 17:13:54 +01002727 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002728 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002729 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002730
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002731 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002732 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002733
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002734out_pwr_off:
2735 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002736out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002737 if (fwspec)
2738 kfree(fwspec->iommu_priv);
2739 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002740 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002741}
2742
Will Deacon45ae7cf2013-06-24 18:31:25 +01002743static void arm_smmu_remove_device(struct device *dev)
2744{
Robin Murphy06e393e2016-09-12 17:13:55 +01002745 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002746 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002747
Robin Murphy06e393e2016-09-12 17:13:55 +01002748 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002749 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002750
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002751 smmu = fwspec_smmu(fwspec);
2752 if (arm_smmu_power_on(smmu->pwr)) {
2753 WARN_ON(1);
2754 return;
2755 }
2756
Robin Murphy06e393e2016-09-12 17:13:55 +01002757 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002758 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002759 kfree(fwspec->iommu_priv);
2760 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002761 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002762}
2763
Joerg Roedelaf659932015-10-21 23:51:41 +02002764static struct iommu_group *arm_smmu_device_group(struct device *dev)
2765{
Robin Murphy06e393e2016-09-12 17:13:55 +01002766 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2767 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002768 struct iommu_group *group = NULL;
2769 int i, idx;
2770
Robin Murphy06e393e2016-09-12 17:13:55 +01002771 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002772 if (group && smmu->s2crs[idx].group &&
2773 group != smmu->s2crs[idx].group)
2774 return ERR_PTR(-EINVAL);
2775
2776 group = smmu->s2crs[idx].group;
2777 }
2778
Patrick Daly03330cc2017-08-11 14:56:38 -07002779 if (!group) {
2780 if (dev_is_pci(dev))
2781 group = pci_device_group(dev);
2782 else
2783 group = generic_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002784
Patrick Daly03330cc2017-08-11 14:56:38 -07002785 if (IS_ERR(group))
2786 return NULL;
2787 }
2788
2789 if (arm_smmu_arch_device_group(dev, group)) {
2790 iommu_group_put(group);
2791 return ERR_PTR(-EINVAL);
2792 }
Joerg Roedelaf659932015-10-21 23:51:41 +02002793
Joerg Roedelaf659932015-10-21 23:51:41 +02002794 return group;
2795}
2796
Will Deaconc752ce42014-06-25 22:46:31 +01002797static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2798 enum iommu_attr attr, void *data)
2799{
Joerg Roedel1d672632015-03-26 13:43:10 +01002800 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002801 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002802
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002803 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01002804 switch (attr) {
2805 case DOMAIN_ATTR_NESTING:
2806 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002807 ret = 0;
2808 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002809 case DOMAIN_ATTR_PT_BASE_ADDR:
2810 *((phys_addr_t *)data) =
2811 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002812 ret = 0;
2813 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002814 case DOMAIN_ATTR_CONTEXT_BANK:
2815 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002816 if (smmu_domain->smmu == NULL) {
2817 ret = -ENODEV;
2818 break;
2819 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002820 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2821 ret = 0;
2822 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002823 case DOMAIN_ATTR_TTBR0: {
2824 u64 val;
2825 struct arm_smmu_device *smmu = smmu_domain->smmu;
2826 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002827 if (smmu == NULL) {
2828 ret = -ENODEV;
2829 break;
2830 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002831 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2832 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2833 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2834 << (TTBRn_ASID_SHIFT);
2835 *((u64 *)data) = val;
2836 ret = 0;
2837 break;
2838 }
2839 case DOMAIN_ATTR_CONTEXTIDR:
2840 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002841 if (smmu_domain->smmu == NULL) {
2842 ret = -ENODEV;
2843 break;
2844 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002845 *((u32 *)data) = smmu_domain->cfg.procid;
2846 ret = 0;
2847 break;
2848 case DOMAIN_ATTR_PROCID:
2849 *((u32 *)data) = smmu_domain->cfg.procid;
2850 ret = 0;
2851 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002852 case DOMAIN_ATTR_DYNAMIC:
2853 *((int *)data) = !!(smmu_domain->attributes
2854 & (1 << DOMAIN_ATTR_DYNAMIC));
2855 ret = 0;
2856 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002857 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2858 *((int *)data) = !!(smmu_domain->attributes
2859 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2860 ret = 0;
2861 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002862 case DOMAIN_ATTR_S1_BYPASS:
2863 *((int *)data) = !!(smmu_domain->attributes
2864 & (1 << DOMAIN_ATTR_S1_BYPASS));
2865 ret = 0;
2866 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002867 case DOMAIN_ATTR_SECURE_VMID:
2868 *((int *)data) = smmu_domain->secure_vmid;
2869 ret = 0;
2870 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002871 case DOMAIN_ATTR_PGTBL_INFO: {
2872 struct iommu_pgtbl_info *info = data;
2873
2874 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2875 ret = -ENODEV;
2876 break;
2877 }
2878 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2879 ret = 0;
2880 break;
2881 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002882 case DOMAIN_ATTR_FAST:
2883 *((int *)data) = !!(smmu_domain->attributes
2884 & (1 << DOMAIN_ATTR_FAST));
2885 ret = 0;
2886 break;
Patrick Daly1e279922017-09-06 15:57:45 -07002887 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
2888 *((int *)data) = !!(smmu_domain->attributes
2889 & (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
2890 ret = 0;
2891 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002892 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2893 *((int *)data) = !!(smmu_domain->attributes &
2894 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2895 ret = 0;
2896 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002897 case DOMAIN_ATTR_EARLY_MAP:
2898 *((int *)data) = !!(smmu_domain->attributes
2899 & (1 << DOMAIN_ATTR_EARLY_MAP));
2900 ret = 0;
2901 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002902 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002903 if (!smmu_domain->smmu) {
2904 ret = -ENODEV;
2905 break;
2906 }
Liam Mark53cf2342016-12-20 11:36:07 -08002907 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
2908 ret = 0;
2909 break;
2910 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
2911 *((int *)data) = !!(smmu_domain->attributes
2912 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002913 ret = 0;
2914 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05302915 case DOMAIN_ATTR_CB_STALL_DISABLE:
2916 *((int *)data) = !!(smmu_domain->attributes
2917 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
2918 ret = 0;
2919 break;
Patrick Daly83174c12017-10-26 12:31:15 -07002920 case DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN:
Patrick Daly23301482017-10-12 16:18:25 -07002921 *((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
2922 ret = 0;
2923 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002924 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002925 ret = -ENODEV;
2926 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002927 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002928 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002929 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002930}
2931
2932static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2933 enum iommu_attr attr, void *data)
2934{
Will Deacon518f7132014-11-14 17:17:54 +00002935 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002936 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002937
Will Deacon518f7132014-11-14 17:17:54 +00002938 mutex_lock(&smmu_domain->init_mutex);
2939
Will Deaconc752ce42014-06-25 22:46:31 +01002940 switch (attr) {
2941 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002942 if (smmu_domain->smmu) {
2943 ret = -EPERM;
2944 goto out_unlock;
2945 }
2946
Will Deaconc752ce42014-06-25 22:46:31 +01002947 if (*(int *)data)
2948 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2949 else
2950 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2951
Will Deacon518f7132014-11-14 17:17:54 +00002952 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002953 case DOMAIN_ATTR_PROCID:
2954 if (smmu_domain->smmu != NULL) {
2955 dev_err(smmu_domain->smmu->dev,
2956 "cannot change procid attribute while attached\n");
2957 ret = -EBUSY;
2958 break;
2959 }
2960 smmu_domain->cfg.procid = *((u32 *)data);
2961 ret = 0;
2962 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002963 case DOMAIN_ATTR_DYNAMIC: {
2964 int dynamic = *((int *)data);
2965
2966 if (smmu_domain->smmu != NULL) {
2967 dev_err(smmu_domain->smmu->dev,
2968 "cannot change dynamic attribute while attached\n");
2969 ret = -EBUSY;
2970 break;
2971 }
2972
2973 if (dynamic)
2974 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2975 else
2976 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2977 ret = 0;
2978 break;
2979 }
2980 case DOMAIN_ATTR_CONTEXT_BANK:
2981 /* context bank can't be set while attached */
2982 if (smmu_domain->smmu != NULL) {
2983 ret = -EBUSY;
2984 break;
2985 }
2986 /* ... and it can only be set for dynamic contexts. */
2987 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2988 ret = -EINVAL;
2989 break;
2990 }
2991
2992 /* this will be validated during attach */
2993 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2994 ret = 0;
2995 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002996 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2997 u32 non_fatal_faults = *((int *)data);
2998
2999 if (non_fatal_faults)
3000 smmu_domain->attributes |=
3001 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
3002 else
3003 smmu_domain->attributes &=
3004 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
3005 ret = 0;
3006 break;
3007 }
Patrick Dalye62d3362016-03-15 18:58:28 -07003008 case DOMAIN_ATTR_S1_BYPASS: {
3009 int bypass = *((int *)data);
3010
3011 /* bypass can't be changed while attached */
3012 if (smmu_domain->smmu != NULL) {
3013 ret = -EBUSY;
3014 break;
3015 }
3016 if (bypass)
3017 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
3018 else
3019 smmu_domain->attributes &=
3020 ~(1 << DOMAIN_ATTR_S1_BYPASS);
3021
3022 ret = 0;
3023 break;
3024 }
Patrick Daly8befb662016-08-17 20:03:28 -07003025 case DOMAIN_ATTR_ATOMIC:
3026 {
3027 int atomic_ctx = *((int *)data);
3028
3029 /* can't be changed while attached */
3030 if (smmu_domain->smmu != NULL) {
3031 ret = -EBUSY;
3032 break;
3033 }
3034 if (atomic_ctx)
3035 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
3036 else
3037 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
3038 break;
3039 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07003040 case DOMAIN_ATTR_SECURE_VMID:
3041 if (smmu_domain->secure_vmid != VMID_INVAL) {
3042 ret = -ENODEV;
3043 WARN(1, "secure vmid already set!");
3044 break;
3045 }
3046 smmu_domain->secure_vmid = *((int *)data);
3047 break;
Patrick Daly1e279922017-09-06 15:57:45 -07003048 case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
3049 if (*((int *)data))
3050 smmu_domain->attributes |=
3051 1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
3052 ret = 0;
3053 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003054 /*
3055 * fast_smmu_unmap_page() and fast_smmu_alloc_iova() both
3056 * expect that the bus/clock/regulator are already on. Thus also
3057 * force DOMAIN_ATTR_ATOMIC to bet set.
3058 */
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003059 case DOMAIN_ATTR_FAST:
Patrick Daly0df84ac2017-10-11 17:32:41 -07003060 {
3061 int fast = *((int *)data);
3062
3063 if (fast) {
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003064 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003065 smmu_domain->attributes |= 1 << DOMAIN_ATTR_ATOMIC;
3066 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07003067 ret = 0;
3068 break;
Patrick Daly0df84ac2017-10-11 17:32:41 -07003069 }
Patrick Dalyce6786f2016-11-09 14:19:23 -08003070 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
3071 /* can't be changed while attached */
3072 if (smmu_domain->smmu != NULL) {
3073 ret = -EBUSY;
3074 break;
3075 }
3076 if (*((int *)data))
3077 smmu_domain->attributes |=
3078 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
3079 ret = 0;
3080 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003081 case DOMAIN_ATTR_EARLY_MAP: {
3082 int early_map = *((int *)data);
3083
3084 ret = 0;
3085 if (early_map) {
3086 smmu_domain->attributes |=
3087 1 << DOMAIN_ATTR_EARLY_MAP;
3088 } else {
3089 if (smmu_domain->smmu)
3090 ret = arm_smmu_enable_s1_translations(
3091 smmu_domain);
3092
3093 if (!ret)
3094 smmu_domain->attributes &=
3095 ~(1 << DOMAIN_ATTR_EARLY_MAP);
3096 }
3097 break;
3098 }
Liam Mark53cf2342016-12-20 11:36:07 -08003099 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
3100 int force_coherent = *((int *)data);
3101
3102 if (smmu_domain->smmu != NULL) {
3103 dev_err(smmu_domain->smmu->dev,
3104 "cannot change force coherent attribute while attached\n");
3105 ret = -EBUSY;
3106 break;
3107 }
3108
3109 if (force_coherent)
3110 smmu_domain->attributes |=
3111 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
3112 else
3113 smmu_domain->attributes &=
3114 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
3115
3116 ret = 0;
3117 break;
3118 }
3119
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303120 case DOMAIN_ATTR_CB_STALL_DISABLE:
3121 if (*((int *)data))
3122 smmu_domain->attributes |=
3123 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
3124 ret = 0;
3125 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003126 default:
Will Deacon518f7132014-11-14 17:17:54 +00003127 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01003128 }
Will Deacon518f7132014-11-14 17:17:54 +00003129
3130out_unlock:
3131 mutex_unlock(&smmu_domain->init_mutex);
3132 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003133}
3134
Robin Murphy7e96c742016-09-14 15:26:46 +01003135static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
3136{
3137 u32 fwid = 0;
3138
3139 if (args->args_count > 0)
3140 fwid |= (u16)args->args[0];
3141
3142 if (args->args_count > 1)
3143 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
3144
3145 return iommu_fwspec_add_ids(dev, &fwid, 1);
3146}
3147
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003148static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
3149{
3150 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3151 struct arm_smmu_device *smmu = smmu_domain->smmu;
3152 void __iomem *cb_base;
3153 u32 reg;
3154 int ret;
3155
3156 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3157 ret = arm_smmu_power_on(smmu->pwr);
3158 if (ret)
3159 return ret;
3160
3161 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3162 reg |= SCTLR_M;
3163
3164 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
3165 arm_smmu_power_off(smmu->pwr);
3166 return ret;
3167}
3168
Liam Mark3ba41cf2016-12-09 14:39:04 -08003169static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
3170 dma_addr_t iova)
3171{
3172 bool ret;
3173 unsigned long flags;
3174 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3175 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3176
3177 if (!ops)
3178 return false;
3179
3180 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3181 ret = ops->is_iova_coherent(ops, iova);
3182 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3183 return ret;
3184}
3185
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003186static void arm_smmu_trigger_fault(struct iommu_domain *domain,
3187 unsigned long flags)
3188{
3189 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3190 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3191 struct arm_smmu_device *smmu;
3192 void __iomem *cb_base;
3193
3194 if (!smmu_domain->smmu) {
3195 pr_err("Can't trigger faults on non-attached domains\n");
3196 return;
3197 }
3198
3199 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003200 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003201 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003202
3203 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3204 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
3205 flags, cfg->cbndx);
3206 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07003207 /* give the interrupt time to fire... */
3208 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003209
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003210 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003211}
3212
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003213static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3214{
Patrick Dalyda765c62017-09-11 16:31:07 -07003215 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3216 const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
3217
3218 tlb->tlb_flush_all(smmu_domain);
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003219}
3220
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003221static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3222{
3223 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3224
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003225 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003226}
3227
3228static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3229{
3230 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3231
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003232 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003233}
3234
Will Deacon518f7132014-11-14 17:17:54 +00003235static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003236 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003237 .domain_alloc = arm_smmu_domain_alloc,
3238 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003239 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003240 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003241 .map = arm_smmu_map,
3242 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003243 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003244 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003245 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003246 .add_device = arm_smmu_add_device,
3247 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003248 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003249 .domain_get_attr = arm_smmu_domain_get_attr,
3250 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003251 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003252 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003253 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003254 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003255 .enable_config_clocks = arm_smmu_enable_config_clocks,
3256 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003257 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003258 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003259};
3260
Patrick Dalyad441dd2016-09-15 15:50:46 -07003261#define IMPL_DEF1_MICRO_MMU_CTRL 0
3262#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3263#define MICRO_MMU_CTRL_IDLE (1 << 3)
3264
3265/* Definitions for implementation-defined registers */
3266#define ACTLR_QCOM_OSH_SHIFT 28
3267#define ACTLR_QCOM_OSH 1
3268
3269#define ACTLR_QCOM_ISH_SHIFT 29
3270#define ACTLR_QCOM_ISH 1
3271
3272#define ACTLR_QCOM_NSH_SHIFT 30
3273#define ACTLR_QCOM_NSH 1
3274
3275static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003276{
3277 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003278 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003279
3280 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3281 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3282 0, 30000)) {
3283 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3284 return -EBUSY;
3285 }
3286
3287 return 0;
3288}
3289
Patrick Dalyad441dd2016-09-15 15:50:46 -07003290static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003291{
3292 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3293 u32 reg;
3294
3295 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3296 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3297 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3298
Patrick Dalyad441dd2016-09-15 15:50:46 -07003299 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003300}
3301
Patrick Dalyad441dd2016-09-15 15:50:46 -07003302static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003303{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003304 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003305}
3306
Patrick Dalyad441dd2016-09-15 15:50:46 -07003307static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003308{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003309 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003310}
3311
Patrick Dalyad441dd2016-09-15 15:50:46 -07003312static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003313{
3314 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3315 u32 reg;
3316
3317 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3318 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3319 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3320}
3321
Patrick Dalyad441dd2016-09-15 15:50:46 -07003322static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003323{
3324 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003325 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003326 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003327 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003328
Patrick Dalyad441dd2016-09-15 15:50:46 -07003329 /*
3330 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3331 * to prevent table walks with an inconsistent state.
3332 */
3333 for (i = 0; i < smmu->num_context_banks; ++i) {
3334 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3335 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3336 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3337 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3338 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3339 }
3340
3341 /* Program implementation defined registers */
3342 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003343 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3344 writel_relaxed(regs[i].value,
3345 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003346 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003347}
3348
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003349static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3350 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003351{
3352 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3353 struct arm_smmu_device *smmu = smmu_domain->smmu;
3354 int ret;
3355 phys_addr_t phys = 0;
3356 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003357 u32 sctlr, sctlr_orig, fsr;
3358 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003359
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003360 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003361 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003362 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003363
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003364 spin_lock_irqsave(&smmu->atos_lock, flags);
3365 cb_base = ARM_SMMU_CB_BASE(smmu) +
3366 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003367
3368 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003369 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003370 qsmmuv2_wait_for_halt(smmu);
3371
3372 /* clear FSR to allow ATOS to log any faults */
3373 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3374 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3375
3376 /* disable stall mode momentarily */
3377 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3378 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3379 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3380
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003381 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003382
3383 /* restore SCTLR */
3384 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3385
3386 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003387 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3388
3389 arm_smmu_power_off(smmu_domain->smmu->pwr);
3390 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003391}
3392
3393struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3394 .device_reset = qsmmuv2_device_reset,
3395 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003396};
3397
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003398static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003399{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003400 int i;
3401 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003402 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003403 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003404
Peng Fan3ca37122016-05-03 21:50:30 +08003405 /*
3406 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3407 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3408 * bit is only present in MMU-500r2 onwards.
3409 */
3410 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3411 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3412 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3413 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3414 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3415 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3416 }
3417
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003418 /* Make sure all context banks are disabled and clear CB_FSR */
3419 for (i = 0; i < smmu->num_context_banks; ++i) {
3420 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3421 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3422 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003423 /*
3424 * Disable MMU-500's not-particularly-beneficial next-page
3425 * prefetcher for the sake of errata #841119 and #826419.
3426 */
3427 if (smmu->model == ARM_MMU500) {
3428 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3429 reg &= ~ARM_MMU500_ACTLR_CPRE;
3430 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3431 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003432 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003433}
3434
3435static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3436{
3437 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003438 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003439 u32 reg;
3440
3441 /* clear global FSR */
3442 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3443 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3444
Robin Murphy468f4942016-09-12 17:13:49 +01003445 /*
3446 * Reset stream mapping groups: Initial values mark all SMRn as
3447 * invalid and all S2CRn as bypass unless overridden.
3448 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003449 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3450 for (i = 0; i < smmu->num_mapping_groups; ++i)
3451 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003452
Patrick Daly59b6d202017-06-12 13:12:15 -07003453 arm_smmu_context_bank_reset(smmu);
3454 }
Will Deacon1463fe42013-07-31 19:21:27 +01003455
Will Deacon45ae7cf2013-06-24 18:31:25 +01003456 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003457 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3458 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3459
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003460 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003461
Will Deacon45ae7cf2013-06-24 18:31:25 +01003462 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003463 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003464
3465 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003466 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003467
Robin Murphy25a1c962016-02-10 14:25:33 +00003468 /* Enable client access, handling unmatched streams as appropriate */
3469 reg &= ~sCR0_CLIENTPD;
3470 if (disable_bypass)
3471 reg |= sCR0_USFCFG;
3472 else
3473 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003474
3475 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003476 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003477
3478 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003479 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003480
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003481 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3482 reg |= sCR0_VMID16EN;
3483
Patrick Daly7f377fe2017-10-06 17:37:10 -07003484 /* Force bypass transaction to be Non-Shareable & not io-coherent */
3485 reg &= ~(sCR0_SHCFG_MASK << sCR0_SHCFG_SHIFT);
Prakash Gupta673a79f2017-11-16 18:07:00 +05303486 reg |= sCR0_SHCFG_NSH << sCR0_SHCFG_SHIFT;
Patrick Daly7f377fe2017-10-06 17:37:10 -07003487
Will Deacon45ae7cf2013-06-24 18:31:25 +01003488 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003489 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003490 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003491
3492 /* Manage any implementation defined features */
3493 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003494}
3495
3496static int arm_smmu_id_size_to_bits(int size)
3497{
3498 switch (size) {
3499 case 0:
3500 return 32;
3501 case 1:
3502 return 36;
3503 case 2:
3504 return 40;
3505 case 3:
3506 return 42;
3507 case 4:
3508 return 44;
3509 case 5:
3510 default:
3511 return 48;
3512 }
3513}
3514
Patrick Dalyda688822017-05-17 20:12:48 -07003515
3516/*
3517 * Some context banks needs to be transferred from bootloader to HLOS in a way
3518 * that allows ongoing traffic. The current expectation is that these context
3519 * banks operate in bypass mode.
3520 * Additionally, there must be exactly one device in devicetree with stream-ids
3521 * overlapping those used by the bootloader.
3522 */
3523static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3524 struct arm_smmu_device *smmu,
3525 struct device *dev)
3526{
3527 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003528 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003529 u32 i, idx;
3530 int cb = -EINVAL;
3531 bool dynamic;
3532
Patrick Dalye72526b2017-07-18 16:21:44 -07003533 /*
3534 * Dynamic domains have already set cbndx through domain attribute.
3535 * Verify that they picked a valid value.
3536 */
Patrick Dalyda688822017-05-17 20:12:48 -07003537 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07003538 if (dynamic) {
3539 cb = smmu_domain->cfg.cbndx;
3540 if (cb < smmu->num_context_banks)
3541 return cb;
3542 else
3543 return -EINVAL;
3544 }
Patrick Dalyda688822017-05-17 20:12:48 -07003545
3546 mutex_lock(&smmu->stream_map_mutex);
3547 for_each_cfg_sme(fwspec, i, idx) {
3548 if (smmu->s2crs[idx].cb_handoff)
3549 cb = smmu->s2crs[idx].cbndx;
3550 }
3551
Charan Teja Reddyf0758df2017-09-04 18:52:07 +05303552 if (cb < 0 && !arm_smmu_is_static_cb(smmu)) {
Patrick Dalyda688822017-05-17 20:12:48 -07003553 mutex_unlock(&smmu->stream_map_mutex);
3554 return __arm_smmu_alloc_bitmap(smmu->context_map,
3555 smmu->num_s2_context_banks,
3556 smmu->num_context_banks);
3557 }
3558
3559 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07003560 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Patrick Dalyda688822017-05-17 20:12:48 -07003561 smmu->s2crs[i].cb_handoff = false;
3562 smmu->s2crs[i].count -= 1;
3563 }
3564 }
3565 mutex_unlock(&smmu->stream_map_mutex);
3566
3567 return cb;
3568}
3569
3570static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
3571{
3572 u32 i, raw_smr, raw_s2cr;
3573 struct arm_smmu_smr smr;
3574 struct arm_smmu_s2cr s2cr;
3575
3576 for (i = 0; i < smmu->num_mapping_groups; i++) {
3577 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3578 ARM_SMMU_GR0_SMR(i));
3579 if (!(raw_smr & SMR_VALID))
3580 continue;
3581
3582 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3583 smr.id = (u16)raw_smr;
3584 smr.valid = true;
3585
3586 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3587 ARM_SMMU_GR0_S2CR(i));
Patrick Daly4ea4bdf2017-08-29 19:24:49 -07003588 memset(&s2cr, 0, sizeof(s2cr));
Patrick Dalyda688822017-05-17 20:12:48 -07003589 s2cr.group = NULL;
3590 s2cr.count = 1;
3591 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
3592 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
3593 S2CR_PRIVCFG_MASK;
3594 s2cr.cbndx = (u8)raw_s2cr;
3595 s2cr.cb_handoff = true;
3596
3597 if (s2cr.type != S2CR_TYPE_TRANS)
3598 continue;
3599
3600 smmu->smrs[i] = smr;
3601 smmu->s2crs[i] = s2cr;
3602 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
3603 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
3604 raw_smr, raw_s2cr, s2cr.cbndx);
3605 }
3606
3607 return 0;
3608}
3609
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003610static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3611{
3612 struct device *dev = smmu->dev;
3613 int i, ntuples, ret;
3614 u32 *tuples;
3615 struct arm_smmu_impl_def_reg *regs, *regit;
3616
3617 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3618 return 0;
3619
3620 ntuples /= sizeof(u32);
3621 if (ntuples % 2) {
3622 dev_err(dev,
3623 "Invalid number of attach-impl-defs registers: %d\n",
3624 ntuples);
3625 return -EINVAL;
3626 }
3627
3628 regs = devm_kmalloc(
3629 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3630 GFP_KERNEL);
3631 if (!regs)
3632 return -ENOMEM;
3633
3634 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3635 if (!tuples)
3636 return -ENOMEM;
3637
3638 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3639 tuples, ntuples);
3640 if (ret)
3641 return ret;
3642
3643 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3644 regit->offset = tuples[i];
3645 regit->value = tuples[i + 1];
3646 }
3647
3648 devm_kfree(dev, tuples);
3649
3650 smmu->impl_def_attach_registers = regs;
3651 smmu->num_impl_def_attach_registers = ntuples / 2;
3652
3653 return 0;
3654}
3655
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003656
3657static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003658{
3659 const char *cname;
3660 struct property *prop;
3661 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003662 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003663
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003664 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003665 of_property_count_strings(dev->of_node, "clock-names");
3666
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003667 if (pwr->num_clocks < 1) {
3668 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003669 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003670 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003671
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003672 pwr->clocks = devm_kzalloc(
3673 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003674 GFP_KERNEL);
3675
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003676 if (!pwr->clocks)
3677 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003678
3679 i = 0;
3680 of_property_for_each_string(dev->of_node, "clock-names",
3681 prop, cname) {
3682 struct clk *c = devm_clk_get(dev, cname);
3683
3684 if (IS_ERR(c)) {
3685 dev_err(dev, "Couldn't get clock: %s",
3686 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003687 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003688 }
3689
3690 if (clk_get_rate(c) == 0) {
3691 long rate = clk_round_rate(c, 1000);
3692
3693 clk_set_rate(c, rate);
3694 }
3695
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003696 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003697
3698 ++i;
3699 }
3700 return 0;
3701}
3702
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003703static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003704{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003705 const char *cname;
3706 struct property *prop;
3707 int i, ret = 0;
3708 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003709
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003710 pwr->num_gdscs =
3711 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3712
3713 if (pwr->num_gdscs < 1) {
3714 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003715 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003716 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003717
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003718 pwr->gdscs = devm_kzalloc(
3719 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3720
3721 if (!pwr->gdscs)
3722 return -ENOMEM;
3723
Prakash Guptafad87ca2017-05-16 12:13:02 +05303724 if (!of_property_read_u32(dev->of_node,
3725 "qcom,deferred-regulator-disable-delay",
3726 &(pwr->regulator_defer)))
3727 dev_info(dev, "regulator defer delay %d\n",
3728 pwr->regulator_defer);
3729
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003730 i = 0;
3731 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3732 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07003733 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003734
3735 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3736 return ret;
3737}
3738
3739static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3740{
3741 struct device *dev = pwr->dev;
3742
3743 /* We don't want the bus APIs to print an error message */
3744 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3745 dev_dbg(dev, "No bus scaling info\n");
3746 return 0;
3747 }
3748
3749 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3750 if (!pwr->bus_dt_data) {
3751 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3752 return -EINVAL;
3753 }
3754
3755 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3756 if (!pwr->bus_client) {
3757 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003758 return -EINVAL;
3759 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003760
3761 return 0;
3762}
3763
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003764/*
3765 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3766 */
3767static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3768 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003769{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003770 struct arm_smmu_power_resources *pwr;
3771 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003772
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003773 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3774 if (!pwr)
3775 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003776
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003777 pwr->dev = &pdev->dev;
3778 pwr->pdev = pdev;
3779 mutex_init(&pwr->power_lock);
3780 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003781
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003782 ret = arm_smmu_init_clocks(pwr);
3783 if (ret)
3784 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003785
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003786 ret = arm_smmu_init_regulators(pwr);
3787 if (ret)
3788 return ERR_PTR(ret);
3789
3790 ret = arm_smmu_init_bus_scaling(pwr);
3791 if (ret)
3792 return ERR_PTR(ret);
3793
3794 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003795}
3796
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003797/*
Patrick Dalyabeee952017-04-13 18:14:59 -07003798 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003799 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003800static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003801{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003802 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003803}
3804
Will Deacon45ae7cf2013-06-24 18:31:25 +01003805static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3806{
3807 unsigned long size;
3808 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3809 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003810 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003811 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003812
Mitchel Humpherysba822582015-10-20 11:37:41 -07003813 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3814 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003815 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003816
3817 /* ID0 */
3818 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003819
3820 /* Restrict available stages based on module parameter */
3821 if (force_stage == 1)
3822 id &= ~(ID0_S2TS | ID0_NTS);
3823 else if (force_stage == 2)
3824 id &= ~(ID0_S1TS | ID0_NTS);
3825
Will Deacon45ae7cf2013-06-24 18:31:25 +01003826 if (id & ID0_S1TS) {
3827 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003828 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003829 }
3830
3831 if (id & ID0_S2TS) {
3832 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003833 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003834 }
3835
3836 if (id & ID0_NTS) {
3837 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003838 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003839 }
3840
3841 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003842 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003843 dev_err(smmu->dev, "\tno translation support!\n");
3844 return -ENODEV;
3845 }
3846
Robin Murphyb7862e32016-04-13 18:13:03 +01003847 if ((id & ID0_S1TS) &&
3848 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003849 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003850 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003851 }
3852
Robin Murphybae2c2d2015-07-29 19:46:05 +01003853 /*
3854 * In order for DMA API calls to work properly, we must defer to what
3855 * the DT says about coherency, regardless of what the hardware claims.
3856 * Fortunately, this also opens up a workaround for systems where the
3857 * ID register value has ended up configured incorrectly.
3858 */
3859 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3860 cttw_reg = !!(id & ID0_CTTW);
3861 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003862 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003863 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003864 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003865 cttw_dt ? "" : "non-");
3866 if (cttw_dt != cttw_reg)
3867 dev_notice(smmu->dev,
3868 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003869
Robin Murphy53867802016-09-12 17:13:48 +01003870 /* Max. number of entries we have for stream matching/indexing */
3871 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3872 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003873 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003874 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08003875 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003876
3877 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003878 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3879 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003880 dev_err(smmu->dev,
3881 "stream-matching supported, but no SMRs present!\n");
3882 return -ENODEV;
3883 }
3884
Robin Murphy53867802016-09-12 17:13:48 +01003885 /*
3886 * SMR.ID bits may not be preserved if the corresponding MASK
3887 * bits are set, so check each one separately. We can reject
3888 * masters later if they try to claim IDs outside these masks.
3889 */
Patrick Daly937de532016-12-12 18:44:09 -08003890 for (i = 0; i < size; i++) {
3891 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
3892 if (!(smr & SMR_VALID))
3893 break;
3894 }
3895 if (i == size) {
3896 dev_err(smmu->dev,
3897 "Unable to compute streamid_masks\n");
3898 return -ENODEV;
3899 }
3900
Robin Murphy53867802016-09-12 17:13:48 +01003901 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003902 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3903 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003904 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003905
Robin Murphy53867802016-09-12 17:13:48 +01003906 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003907 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3908 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003909 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003910
Robin Murphy468f4942016-09-12 17:13:49 +01003911 /* Zero-initialised to mark as invalid */
3912 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3913 GFP_KERNEL);
3914 if (!smmu->smrs)
3915 return -ENOMEM;
3916
Robin Murphy53867802016-09-12 17:13:48 +01003917 dev_notice(smmu->dev,
3918 "\tstream matching with %lu register groups, mask 0x%x",
3919 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003920 }
Robin Murphya754fd12016-09-12 17:13:50 +01003921 /* s2cr->type == 0 means translation, so initialise explicitly */
3922 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3923 GFP_KERNEL);
3924 if (!smmu->s2crs)
3925 return -ENOMEM;
3926 for (i = 0; i < size; i++)
3927 smmu->s2crs[i] = s2cr_init_val;
3928
Robin Murphy53867802016-09-12 17:13:48 +01003929 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003930 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003931
Robin Murphy7602b872016-04-28 17:12:09 +01003932 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3933 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3934 if (!(id & ID0_PTFS_NO_AARCH32S))
3935 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3936 }
3937
Will Deacon45ae7cf2013-06-24 18:31:25 +01003938 /* ID1 */
3939 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003940 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003941
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003942 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003943 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003944 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003945 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003946 dev_warn(smmu->dev,
3947 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3948 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003949
Will Deacon518f7132014-11-14 17:17:54 +00003950 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003951 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3952 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3953 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3954 return -ENODEV;
3955 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003956 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003957 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003958 /*
3959 * Cavium CN88xx erratum #27704.
3960 * Ensure ASID and VMID allocation is unique across all SMMUs in
3961 * the system.
3962 */
3963 if (smmu->model == CAVIUM_SMMUV2) {
3964 smmu->cavium_id_base =
3965 atomic_add_return(smmu->num_context_banks,
3966 &cavium_smmu_context_count);
3967 smmu->cavium_id_base -= smmu->num_context_banks;
3968 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003969
3970 /* ID2 */
3971 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3972 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003973 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003974
Will Deacon518f7132014-11-14 17:17:54 +00003975 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003976 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003977 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003978
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003979 if (id & ID2_VMID16)
3980 smmu->features |= ARM_SMMU_FEAT_VMID16;
3981
Robin Murphyf1d84542015-03-04 16:41:05 +00003982 /*
3983 * What the page table walker can address actually depends on which
3984 * descriptor format is in use, but since a) we don't know that yet,
3985 * and b) it can vary per context bank, this will have to do...
3986 */
3987 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3988 dev_warn(smmu->dev,
3989 "failed to set DMA mask for table walker\n");
3990
Robin Murphyb7862e32016-04-13 18:13:03 +01003991 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003992 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003993 if (smmu->version == ARM_SMMU_V1_64K)
3994 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003995 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003996 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003997 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003998 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003999 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00004000 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01004001 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00004002 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01004003 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004004 }
4005
Robin Murphy7602b872016-04-28 17:12:09 +01004006 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01004007 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01004008 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01004009 if (smmu->features &
4010 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01004011 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01004012 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01004013 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01004014 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01004015 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01004016
Robin Murphyd5466352016-05-09 17:20:09 +01004017 if (arm_smmu_ops.pgsize_bitmap == -1UL)
4018 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
4019 else
4020 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07004021 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01004022 smmu->pgsize_bitmap);
4023
Will Deacon518f7132014-11-14 17:17:54 +00004024
Will Deacon28d60072014-09-01 16:24:48 +01004025 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004026 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
4027 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004028
4029 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004030 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
4031 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004032
Will Deacon45ae7cf2013-06-24 18:31:25 +01004033 return 0;
4034}
4035
Robin Murphy67b65a32016-04-13 18:12:57 +01004036struct arm_smmu_match_data {
4037 enum arm_smmu_arch_version version;
4038 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004039 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01004040};
4041
Patrick Dalyd7476202016-09-08 18:23:28 -07004042#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
4043static struct arm_smmu_match_data name = { \
4044.version = ver, \
4045.model = imp, \
4046.arch_ops = ops, \
4047} \
Robin Murphy67b65a32016-04-13 18:12:57 +01004048
Patrick Daly1f8a2882016-09-12 17:32:05 -07004049struct arm_smmu_arch_ops qsmmuv500_arch_ops;
4050
Patrick Dalyd7476202016-09-08 18:23:28 -07004051ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
4052ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
4053ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
4054ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
4055ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07004056ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004057ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
4058 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01004059
Joerg Roedel09b52692014-10-02 12:24:45 +02004060static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01004061 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
4062 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
4063 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01004064 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01004065 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01004066 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07004067 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07004068 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01004069 { },
4070};
4071MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
4072
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004073
4074static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
4075{
4076 if (!dev->iommu_fwspec)
4077 of_iommu_configure(dev, dev->of_node);
4078 return 0;
4079}
4080
Patrick Daly000a2f22017-02-13 22:18:12 -08004081static int arm_smmu_add_device_fixup(struct device *dev, void *data)
4082{
4083 struct iommu_ops *ops = data;
4084
4085 ops->add_device(dev);
4086 return 0;
4087}
4088
Patrick Daly1f8a2882016-09-12 17:32:05 -07004089static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004090static int arm_smmu_device_dt_probe(struct platform_device *pdev)
4091{
Robin Murphy67b65a32016-04-13 18:12:57 +01004092 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004093 struct resource *res;
4094 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004095 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01004096 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01004097 bool legacy_binding;
4098
4099 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
4100 if (legacy_binding && !using_generic_binding) {
4101 if (!using_legacy_binding)
4102 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
4103 using_legacy_binding = true;
4104 } else if (!legacy_binding && !using_legacy_binding) {
4105 using_generic_binding = true;
4106 } else {
4107 dev_err(dev, "not probing due to mismatched DT properties\n");
4108 return -ENODEV;
4109 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004110
4111 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4112 if (!smmu) {
4113 dev_err(dev, "failed to allocate arm_smmu_device\n");
4114 return -ENOMEM;
4115 }
4116 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08004117 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07004118 idr_init(&smmu->asid_idr);
4119 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004120
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004121 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01004122 smmu->version = data->version;
4123 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004124 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01004125
Will Deacon45ae7cf2013-06-24 18:31:25 +01004126 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01004127 smmu->base = devm_ioremap_resource(dev, res);
4128 if (IS_ERR(smmu->base))
4129 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004130 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004131
4132 if (of_property_read_u32(dev->of_node, "#global-interrupts",
4133 &smmu->num_global_irqs)) {
4134 dev_err(dev, "missing #global-interrupts property\n");
4135 return -ENODEV;
4136 }
4137
4138 num_irqs = 0;
4139 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
4140 num_irqs++;
4141 if (num_irqs > smmu->num_global_irqs)
4142 smmu->num_context_irqs++;
4143 }
4144
Andreas Herrmann44a08de2013-10-01 13:39:07 +01004145 if (!smmu->num_context_irqs) {
4146 dev_err(dev, "found %d interrupts but expected at least %d\n",
4147 num_irqs, smmu->num_global_irqs + 1);
4148 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004149 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004150
4151 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
4152 GFP_KERNEL);
4153 if (!smmu->irqs) {
4154 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
4155 return -ENOMEM;
4156 }
4157
4158 for (i = 0; i < num_irqs; ++i) {
4159 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07004160
Will Deacon45ae7cf2013-06-24 18:31:25 +01004161 if (irq < 0) {
4162 dev_err(dev, "failed to get irq index %d\n", i);
4163 return -ENODEV;
4164 }
4165 smmu->irqs[i] = irq;
4166 }
4167
Dhaval Patel031d7462015-05-09 14:47:29 -07004168 parse_driver_options(smmu);
4169
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004170 smmu->pwr = arm_smmu_init_power_resources(pdev);
4171 if (IS_ERR(smmu->pwr))
4172 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004173
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004174 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004175 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004176 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004177
4178 err = arm_smmu_device_cfg_probe(smmu);
4179 if (err)
4180 goto out_power_off;
4181
Patrick Dalyda688822017-05-17 20:12:48 -07004182 err = arm_smmu_handoff_cbs(smmu);
4183 if (err)
4184 goto out_power_off;
4185
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004186 err = arm_smmu_parse_impl_def_registers(smmu);
4187 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004188 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004189
Robin Murphyb7862e32016-04-13 18:13:03 +01004190 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004191 smmu->num_context_banks != smmu->num_context_irqs) {
4192 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004193 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4194 smmu->num_context_irqs, smmu->num_context_banks,
4195 smmu->num_context_banks);
4196 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004197 }
4198
Will Deacon45ae7cf2013-06-24 18:31:25 +01004199 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004200 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4201 NULL, arm_smmu_global_fault,
4202 IRQF_ONESHOT | IRQF_SHARED,
4203 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004204 if (err) {
4205 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4206 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004207 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004208 }
4209 }
4210
Patrick Dalyd7476202016-09-08 18:23:28 -07004211 err = arm_smmu_arch_init(smmu);
4212 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004213 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004214
Robin Murphy06e393e2016-09-12 17:13:55 +01004215 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004216 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004217 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004218 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004219
Patrick Daly8e3371a2017-02-13 22:14:53 -08004220 INIT_LIST_HEAD(&smmu->list);
4221 spin_lock(&arm_smmu_devices_lock);
4222 list_add(&smmu->list, &arm_smmu_devices);
4223 spin_unlock(&arm_smmu_devices_lock);
4224
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004225 /* bus_set_iommu depends on this. */
4226 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4227 arm_smmu_of_iommu_configure_fixup);
4228
Robin Murphy7e96c742016-09-14 15:26:46 +01004229 /* Oh, for a proper bus abstraction */
4230 if (!iommu_present(&platform_bus_type))
4231 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004232 else
4233 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4234 arm_smmu_add_device_fixup);
Robin Murphy7e96c742016-09-14 15:26:46 +01004235#ifdef CONFIG_ARM_AMBA
4236 if (!iommu_present(&amba_bustype))
4237 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4238#endif
4239#ifdef CONFIG_PCI
4240 if (!iommu_present(&pci_bus_type)) {
4241 pci_request_acs();
4242 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4243 }
4244#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004245 return 0;
4246
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004247out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004248 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004249
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004250out_exit_power_resources:
4251 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004252
Will Deacon45ae7cf2013-06-24 18:31:25 +01004253 return err;
4254}
4255
4256static int arm_smmu_device_remove(struct platform_device *pdev)
4257{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004258 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004259
4260 if (!smmu)
4261 return -ENODEV;
4262
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004263 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004264 return -EINVAL;
4265
Will Deaconecfadb62013-07-31 19:21:28 +01004266 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004267 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004268
Patrick Dalyc190d932016-08-30 17:23:28 -07004269 idr_destroy(&smmu->asid_idr);
4270
Will Deacon45ae7cf2013-06-24 18:31:25 +01004271 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004272 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004273 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004274
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004275 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004276
Will Deacon45ae7cf2013-06-24 18:31:25 +01004277 return 0;
4278}
4279
Will Deacon45ae7cf2013-06-24 18:31:25 +01004280static struct platform_driver arm_smmu_driver = {
4281 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004282 .name = "arm-smmu",
4283 .of_match_table = of_match_ptr(arm_smmu_of_match),
4284 },
4285 .probe = arm_smmu_device_dt_probe,
4286 .remove = arm_smmu_device_remove,
4287};
4288
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004289static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004290static int __init arm_smmu_init(void)
4291{
Robin Murphy7e96c742016-09-14 15:26:46 +01004292 static bool registered;
4293 int ret = 0;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004294 ktime_t cur;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004295
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004296 if (registered)
4297 return 0;
4298
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004299 cur = ktime_get();
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004300 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4301 if (ret)
4302 return ret;
4303
4304 ret = platform_driver_register(&arm_smmu_driver);
4305 registered = !ret;
Sudarshan Rajagopalan35b7cdb2017-10-13 11:23:52 -07004306 trace_smmu_init(ktime_us_delta(ktime_get(), cur));
4307
Robin Murphy7e96c742016-09-14 15:26:46 +01004308 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004309}
4310
4311static void __exit arm_smmu_exit(void)
4312{
4313 return platform_driver_unregister(&arm_smmu_driver);
4314}
4315
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004316subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004317module_exit(arm_smmu_exit);
4318
Robin Murphy7e96c742016-09-14 15:26:46 +01004319static int __init arm_smmu_of_init(struct device_node *np)
4320{
4321 int ret = arm_smmu_init();
4322
4323 if (ret)
4324 return ret;
4325
4326 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4327 return -ENODEV;
4328
4329 return 0;
4330}
4331IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4332IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4333IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4334IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4335IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4336IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004337
Patrick Dalya0fddb62017-03-27 19:26:59 -07004338#define TCU_HW_VERSION_HLOS1 (0x18)
4339
Patrick Daly1f8a2882016-09-12 17:32:05 -07004340#define DEBUG_SID_HALT_REG 0x0
4341#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004342#define DEBUG_SID_HALT_SID_MASK 0x3ff
4343
4344#define DEBUG_VA_ADDR_REG 0x8
4345
4346#define DEBUG_TXN_TRIGG_REG 0x18
4347#define DEBUG_TXN_AXPROT_SHIFT 6
4348#define DEBUG_TXN_AXCACHE_SHIFT 2
4349#define DEBUG_TRX_WRITE (0x1 << 1)
4350#define DEBUG_TXN_READ (0x0 << 1)
4351#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004352
4353#define DEBUG_SR_HALT_ACK_REG 0x20
4354#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004355#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4356
4357#define DEBUG_PAR_REG 0x28
4358#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4359#define DEBUG_PAR_PA_SHIFT 12
4360#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004361
Patrick Daly8c1202b2017-05-10 15:42:30 -07004362#define TBU_DBG_TIMEOUT_US 100
Patrick Daly1f8a2882016-09-12 17:32:05 -07004363
Patrick Daly23301482017-10-12 16:18:25 -07004364#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK 0x3
4365#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT 0x8
4366
Patrick Daly03330cc2017-08-11 14:56:38 -07004367
4368struct actlr_setting {
4369 struct arm_smmu_smr smr;
4370 u32 actlr;
4371};
4372
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004373struct qsmmuv500_archdata {
4374 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004375 void __iomem *tcu_base;
4376 u32 version;
Patrick Dalyda765c62017-09-11 16:31:07 -07004377
4378 struct actlr_setting *actlrs;
4379 u32 actlr_tbl_size;
4380
4381 struct arm_smmu_smr *errata1_clients;
4382 u32 num_errata1_clients;
4383 remote_spinlock_t errata1_lock;
4384 ktime_t last_tlbi_ktime;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004385};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004386#define get_qsmmuv500_archdata(smmu) \
4387 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004388
Patrick Daly1f8a2882016-09-12 17:32:05 -07004389struct qsmmuv500_tbu_device {
4390 struct list_head list;
4391 struct device *dev;
4392 struct arm_smmu_device *smmu;
4393 void __iomem *base;
4394 void __iomem *status_reg;
4395
4396 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004397 u32 sid_start;
4398 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004399
4400 /* Protects halt count */
4401 spinlock_t halt_lock;
4402 u32 halt_count;
4403};
4404
Patrick Daly03330cc2017-08-11 14:56:38 -07004405struct qsmmuv500_group_iommudata {
4406 bool has_actlr;
4407 u32 actlr;
4408};
4409#define to_qsmmuv500_group_iommudata(group) \
4410 ((struct qsmmuv500_group_iommudata *) \
4411 (iommu_group_get_iommudata(group)))
4412
4413
4414static bool arm_smmu_fwspec_match_smr(struct iommu_fwspec *fwspec,
Patrick Dalyda765c62017-09-11 16:31:07 -07004415 struct arm_smmu_smr *smr)
4416{
4417 struct arm_smmu_smr *smr2;
Patrick Daly03330cc2017-08-11 14:56:38 -07004418 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Patrick Dalyda765c62017-09-11 16:31:07 -07004419 int i, idx;
4420
Patrick Daly03330cc2017-08-11 14:56:38 -07004421 for_each_cfg_sme(fwspec, i, idx) {
4422 smr2 = &smmu->smrs[idx];
Patrick Dalyda765c62017-09-11 16:31:07 -07004423 /* Continue if table entry does not match */
4424 if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
4425 continue;
4426 return true;
4427 }
4428 return false;
4429}
4430
4431#define ERRATA1_REMOTE_SPINLOCK "S:6"
4432#define ERRATA1_TLBI_INTERVAL_US 10
4433static bool
4434qsmmuv500_errata1_required(struct arm_smmu_domain *smmu_domain,
4435 struct qsmmuv500_archdata *data)
4436{
4437 bool ret = false;
4438 int j;
4439 struct arm_smmu_smr *smr;
Patrick Daly03330cc2017-08-11 14:56:38 -07004440 struct iommu_fwspec *fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004441
4442 if (smmu_domain->qsmmuv500_errata1_init)
4443 return smmu_domain->qsmmuv500_errata1_client;
4444
Patrick Daly03330cc2017-08-11 14:56:38 -07004445 fwspec = smmu_domain->dev->iommu_fwspec;
Patrick Dalyda765c62017-09-11 16:31:07 -07004446 for (j = 0; j < data->num_errata1_clients; j++) {
4447 smr = &data->errata1_clients[j];
Patrick Daly03330cc2017-08-11 14:56:38 -07004448 if (arm_smmu_fwspec_match_smr(fwspec, smr)) {
Patrick Dalyda765c62017-09-11 16:31:07 -07004449 ret = true;
4450 break;
4451 }
4452 }
4453
4454 smmu_domain->qsmmuv500_errata1_init = true;
4455 smmu_domain->qsmmuv500_errata1_client = ret;
4456 return ret;
4457}
4458
Patrick Daly86960052017-12-04 18:53:13 -08004459#define SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
4460#define SCM_CONFIG_ERRATA1 0x3
Patrick Dalyda765c62017-09-11 16:31:07 -07004461static void __qsmmuv500_errata1_tlbiall(struct arm_smmu_domain *smmu_domain)
4462{
4463 struct arm_smmu_device *smmu = smmu_domain->smmu;
4464 struct device *dev = smmu_domain->dev;
4465 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4466 void __iomem *base;
Patrick Daly86960052017-12-04 18:53:13 -08004467 int ret;
Patrick Dalyda765c62017-09-11 16:31:07 -07004468 ktime_t cur;
4469 u32 val;
Patrick Daly86960052017-12-04 18:53:13 -08004470 struct scm_desc desc = {
4471 .args[0] = SCM_CONFIG_ERRATA1_CLIENT_ALL,
4472 .args[1] = false,
4473 .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
4474 };
Patrick Dalyda765c62017-09-11 16:31:07 -07004475
4476 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4477 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
4478 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
Patrick Daly86960052017-12-04 18:53:13 -08004479 if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
4480 !(val & TLBSTATUS_SACTIVE), 0, 100))
4481 return;
4482
4483 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
4484 SCM_CONFIG_ERRATA1),
4485 &desc);
4486 if (ret) {
4487 dev_err(smmu->dev, "Calling into TZ to disable ERRATA1 failed - IOMMU hardware in bad state\n");
4488 BUG();
4489 return;
4490 }
4491
4492 cur = ktime_get();
4493 trace_tlbi_throttle_start(dev, 0);
4494 msm_bus_noc_throttle_wa(true);
4495
Patrick Dalyda765c62017-09-11 16:31:07 -07004496 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
Patrick Daly86960052017-12-04 18:53:13 -08004497 !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
4498 dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout - IOMMU hardware in bad state");
4499 trace_tlbsync_timeout(dev, 0);
4500 BUG();
4501 }
Patrick Dalyda765c62017-09-11 16:31:07 -07004502
Patrick Daly86960052017-12-04 18:53:13 -08004503 msm_bus_noc_throttle_wa(false);
4504 trace_tlbi_throttle_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07004505
Patrick Daly86960052017-12-04 18:53:13 -08004506 desc.args[1] = true;
4507 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
4508 SCM_CONFIG_ERRATA1),
4509 &desc);
4510 if (ret) {
4511 dev_err(smmu->dev, "Calling into TZ to reenable ERRATA1 failed - IOMMU hardware in bad state\n");
4512 BUG();
Patrick Dalyda765c62017-09-11 16:31:07 -07004513 }
4514}
4515
4516/* Must be called with clocks/regulators enabled */
4517static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
4518{
4519 struct arm_smmu_domain *smmu_domain = cookie;
4520 struct device *dev = smmu_domain->dev;
4521 struct qsmmuv500_archdata *data =
4522 get_qsmmuv500_archdata(smmu_domain->smmu);
4523 ktime_t cur;
Patrick Daly1faa3112017-10-31 16:40:40 -07004524 unsigned long flags;
Patrick Dalyda765c62017-09-11 16:31:07 -07004525 bool errata;
4526
4527 cur = ktime_get();
Prakash Gupta25f90512017-11-20 14:56:54 +05304528 trace_tlbi_start(dev, 0);
Patrick Dalyda765c62017-09-11 16:31:07 -07004529
4530 errata = qsmmuv500_errata1_required(smmu_domain, data);
Patrick Daly1faa3112017-10-31 16:40:40 -07004531 remote_spin_lock_irqsave(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07004532 if (errata) {
4533 s64 delta;
4534
4535 delta = ktime_us_delta(ktime_get(), data->last_tlbi_ktime);
4536 if (delta < ERRATA1_TLBI_INTERVAL_US)
4537 udelay(ERRATA1_TLBI_INTERVAL_US - delta);
4538
4539 __qsmmuv500_errata1_tlbiall(smmu_domain);
4540
4541 data->last_tlbi_ktime = ktime_get();
4542 } else {
4543 __qsmmuv500_errata1_tlbiall(smmu_domain);
4544 }
Patrick Daly1faa3112017-10-31 16:40:40 -07004545 remote_spin_unlock_irqrestore(&data->errata1_lock, flags);
Patrick Dalyda765c62017-09-11 16:31:07 -07004546
Prakash Gupta25f90512017-11-20 14:56:54 +05304547 trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
Patrick Dalyda765c62017-09-11 16:31:07 -07004548}
4549
4550static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
4551 .tlb_flush_all = qsmmuv500_errata1_tlb_inv_context,
4552 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
4553 .free_pages_exact = arm_smmu_free_pages_exact,
4554};
4555
Patrick Daly8c1202b2017-05-10 15:42:30 -07004556static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
4557 struct arm_smmu_domain *smmu_domain)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004558{
4559 unsigned long flags;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004560 u32 halt, fsr, sctlr_orig, sctlr, status;
4561 void __iomem *base, *cb_base;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004562
4563 spin_lock_irqsave(&tbu->halt_lock, flags);
4564 if (tbu->halt_count) {
4565 tbu->halt_count++;
4566 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4567 return 0;
4568 }
4569
Patrick Daly8c1202b2017-05-10 15:42:30 -07004570 cb_base = ARM_SMMU_CB_BASE(smmu_domain->smmu) +
4571 ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004572 base = tbu->base;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004573 halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
4574 halt |= DEBUG_SID_HALT_VAL;
4575 writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004576
Patrick Daly8c1202b2017-05-10 15:42:30 -07004577 if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
4578 (status & DEBUG_SR_HALT_ACK_VAL),
4579 0, TBU_DBG_TIMEOUT_US))
4580 goto out;
4581
4582 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4583 if (!(fsr & FSR_FAULT)) {
Patrick Daly1f8a2882016-09-12 17:32:05 -07004584 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4585 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4586 return -ETIMEDOUT;
4587 }
4588
Patrick Daly8c1202b2017-05-10 15:42:30 -07004589 /*
4590 * We are in a fault; Our request to halt the bus will not complete
4591 * until transactions in front of us (such as the fault itself) have
4592 * completed. Disable iommu faults and terminate any existing
4593 * transactions.
4594 */
4595 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4596 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4597 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4598
4599 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4600 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4601
4602 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
4603 (status & DEBUG_SR_HALT_ACK_VAL),
4604 0, TBU_DBG_TIMEOUT_US)) {
4605 dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
4606 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4607 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4608 return -ETIMEDOUT;
4609 }
4610
4611 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4612out:
Patrick Daly1f8a2882016-09-12 17:32:05 -07004613 tbu->halt_count = 1;
4614 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4615 return 0;
4616}
4617
4618static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4619{
4620 unsigned long flags;
4621 u32 val;
4622 void __iomem *base;
4623
4624 spin_lock_irqsave(&tbu->halt_lock, flags);
4625 if (!tbu->halt_count) {
4626 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4627 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4628 return;
4629
4630 } else if (tbu->halt_count > 1) {
4631 tbu->halt_count--;
4632 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4633 return;
4634 }
4635
4636 base = tbu->base;
4637 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4638 val &= ~DEBUG_SID_HALT_VAL;
4639 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4640
4641 tbu->halt_count = 0;
4642 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4643}
4644
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004645static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4646 struct arm_smmu_device *smmu, u32 sid)
4647{
4648 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004649 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004650
4651 list_for_each_entry(tbu, &data->tbus, list) {
4652 if (tbu->sid_start <= sid &&
4653 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004654 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004655 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004656 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004657}
4658
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004659static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
4660 struct qsmmuv500_tbu_device *tbu,
4661 unsigned long *flags)
4662{
4663 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004664 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004665 u32 val;
4666
4667 spin_lock_irqsave(&smmu->atos_lock, *flags);
4668 /* The status register is not accessible on version 1.0 */
4669 if (data->version == 0x01000000)
4670 return 0;
4671
4672 if (readl_poll_timeout_atomic(tbu->status_reg,
4673 val, (val == 0x1), 0,
4674 TBU_DBG_TIMEOUT_US)) {
4675 dev_err(tbu->dev, "ECATS hw busy!\n");
4676 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4677 return -ETIMEDOUT;
4678 }
4679
4680 return 0;
4681}
4682
4683static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
4684 struct qsmmuv500_tbu_device *tbu,
4685 unsigned long *flags)
4686{
4687 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004688 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004689
4690 /* The status register is not accessible on version 1.0 */
4691 if (data->version != 0x01000000)
4692 writel_relaxed(0, tbu->status_reg);
4693 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4694}
4695
4696/*
4697 * Zero means failure.
4698 */
4699static phys_addr_t qsmmuv500_iova_to_phys(
4700 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
4701{
4702 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4703 struct arm_smmu_device *smmu = smmu_domain->smmu;
4704 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4705 struct qsmmuv500_tbu_device *tbu;
4706 int ret;
4707 phys_addr_t phys = 0;
4708 u64 val, fsr;
4709 unsigned long flags;
4710 void __iomem *cb_base;
4711 u32 sctlr_orig, sctlr;
4712 int needs_redo = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004713 ktime_t timeout;
4714
4715 /* only 36 bit iova is supported */
4716 if (iova >= (1ULL << 36)) {
4717 dev_err_ratelimited(smmu->dev, "ECATS: address too large: %pad\n",
4718 &iova);
4719 return 0;
4720 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004721
4722 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4723 tbu = qsmmuv500_find_tbu(smmu, sid);
4724 if (!tbu)
4725 return 0;
4726
4727 ret = arm_smmu_power_on(tbu->pwr);
4728 if (ret)
4729 return 0;
4730
Patrick Daly8c1202b2017-05-10 15:42:30 -07004731 ret = qsmmuv500_tbu_halt(tbu, smmu_domain);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004732 if (ret)
4733 goto out_power_off;
4734
Patrick Daly8c1202b2017-05-10 15:42:30 -07004735 /*
4736 * ECATS can trigger the fault interrupt, so disable it temporarily
4737 * and check for an interrupt manually.
4738 */
4739 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4740 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4741 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4742
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004743 /* Only one concurrent atos operation */
4744 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
4745 if (ret)
4746 goto out_resume;
4747
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004748redo:
4749 /* Set address and stream-id */
4750 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
4751 val |= sid & DEBUG_SID_HALT_SID_MASK;
4752 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
4753 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
4754
4755 /*
4756 * Write-back Read and Write-Allocate
4757 * Priviledged, nonsecure, data transaction
4758 * Read operation.
4759 */
4760 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
4761 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
4762 val |= DEBUG_TXN_TRIGGER;
4763 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
4764
4765 ret = 0;
Patrick Daly8c1202b2017-05-10 15:42:30 -07004766 //based on readx_poll_timeout_atomic
4767 timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
4768 for (;;) {
4769 val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
4770 if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
4771 break;
4772 val = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4773 if (val & FSR_FAULT)
4774 break;
4775 if (ktime_compare(ktime_get(), timeout) > 0) {
4776 dev_err(tbu->dev, "ECATS translation timed out!\n");
4777 ret = -ETIMEDOUT;
4778 break;
4779 }
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004780 }
4781
4782 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4783 if (fsr & FSR_FAULT) {
4784 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
Patrick Daly8c1202b2017-05-10 15:42:30 -07004785 fsr);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004786 ret = -EINVAL;
4787
4788 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
4789 /*
4790 * Clear pending interrupts
4791 * Barrier required to ensure that the FSR is cleared
4792 * before resuming SMMU operation
4793 */
4794 wmb();
4795 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4796 }
4797
4798 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
4799 if (val & DEBUG_PAR_FAULT_VAL) {
4800 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
4801 val);
4802 ret = -EINVAL;
4803 }
4804
4805 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
4806 if (ret < 0)
4807 phys = 0;
4808
4809 /* Reset hardware */
4810 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
4811 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
4812
4813 /*
4814 * After a failed translation, the next successful translation will
4815 * incorrectly be reported as a failure.
4816 */
4817 if (!phys && needs_redo++ < 2)
4818 goto redo;
4819
4820 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4821 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
4822
4823out_resume:
4824 qsmmuv500_tbu_resume(tbu);
4825
4826out_power_off:
4827 arm_smmu_power_off(tbu->pwr);
4828
4829 return phys;
4830}
4831
4832static phys_addr_t qsmmuv500_iova_to_phys_hard(
4833 struct iommu_domain *domain, dma_addr_t iova)
4834{
4835 u16 sid;
4836 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4837 struct iommu_fwspec *fwspec;
4838
4839 /* Select a sid */
4840 fwspec = smmu_domain->dev->iommu_fwspec;
4841 sid = (u16)fwspec->ids[0];
4842
4843 return qsmmuv500_iova_to_phys(domain, iova, sid);
4844}
4845
Patrick Daly03330cc2017-08-11 14:56:38 -07004846static void qsmmuv500_release_group_iommudata(void *data)
4847{
4848 kfree(data);
4849}
4850
4851/* If a device has a valid actlr, it must match */
4852static int qsmmuv500_device_group(struct device *dev,
4853 struct iommu_group *group)
4854{
4855 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
4856 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
4857 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4858 struct qsmmuv500_group_iommudata *iommudata;
4859 u32 actlr, i;
4860 struct arm_smmu_smr *smr;
4861
4862 iommudata = to_qsmmuv500_group_iommudata(group);
4863 if (!iommudata) {
4864 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
4865 if (!iommudata)
4866 return -ENOMEM;
4867
4868 iommu_group_set_iommudata(group, iommudata,
4869 qsmmuv500_release_group_iommudata);
4870 }
4871
4872 for (i = 0; i < data->actlr_tbl_size; i++) {
4873 smr = &data->actlrs[i].smr;
4874 actlr = data->actlrs[i].actlr;
4875
4876 if (!arm_smmu_fwspec_match_smr(fwspec, smr))
4877 continue;
4878
4879 if (!iommudata->has_actlr) {
4880 iommudata->actlr = actlr;
4881 iommudata->has_actlr = true;
4882 } else if (iommudata->actlr != actlr) {
4883 return -EINVAL;
4884 }
4885 }
4886
4887 return 0;
4888}
4889
4890static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
4891 struct device *dev)
4892{
4893 struct arm_smmu_device *smmu = smmu_domain->smmu;
4894 struct qsmmuv500_group_iommudata *iommudata =
4895 to_qsmmuv500_group_iommudata(dev->iommu_group);
4896 void __iomem *cb_base;
4897 const struct iommu_gather_ops *tlb;
4898
4899 if (!iommudata->has_actlr)
4900 return;
4901
4902 tlb = smmu_domain->pgtbl_cfg.tlb;
4903 cb_base = ARM_SMMU_CB_BASE(smmu) +
4904 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
4905
4906 writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR);
4907
4908 /*
Patrick Daly23301482017-10-12 16:18:25 -07004909 * Prefetch only works properly if the start and end of all
4910 * buffers in the page table are aligned to 16 Kb.
4911 */
Patrick Daly27bd9292017-11-22 13:59:59 -08004912 if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
Patrick Daly23301482017-10-12 16:18:25 -07004913 QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
4914 smmu_domain->qsmmuv500_errata2_min_align = true;
4915
4916 /*
Patrick Daly03330cc2017-08-11 14:56:38 -07004917 * Flush the context bank after modifying ACTLR to ensure there
4918 * are no cache entries with stale state
4919 */
4920 tlb->tlb_flush_all(smmu_domain);
4921}
4922
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004923static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004924{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004925 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004926 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004927 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004928
4929 if (!dev->driver) {
4930 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4931 return -EINVAL;
4932 }
4933
4934 tbu = dev_get_drvdata(dev);
4935
4936 INIT_LIST_HEAD(&tbu->list);
4937 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004938 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004939 return 0;
4940}
4941
Patrick Dalyda765c62017-09-11 16:31:07 -07004942static int qsmmuv500_parse_errata1(struct arm_smmu_device *smmu)
4943{
4944 int len, i;
4945 struct device *dev = smmu->dev;
4946 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4947 struct arm_smmu_smr *smrs;
4948 const __be32 *cell;
4949
4950 cell = of_get_property(dev->of_node, "qcom,mmu500-errata-1", NULL);
4951 if (!cell)
4952 return 0;
4953
4954 remote_spin_lock_init(&data->errata1_lock, ERRATA1_REMOTE_SPINLOCK);
4955 len = of_property_count_elems_of_size(
4956 dev->of_node, "qcom,mmu500-errata-1", sizeof(u32) * 2);
4957 if (len < 0)
4958 return 0;
4959
4960 smrs = devm_kzalloc(dev, sizeof(*smrs) * len, GFP_KERNEL);
4961 if (!smrs)
4962 return -ENOMEM;
4963
4964 for (i = 0; i < len; i++) {
4965 smrs[i].id = of_read_number(cell++, 1);
4966 smrs[i].mask = of_read_number(cell++, 1);
4967 }
4968
4969 data->errata1_clients = smrs;
4970 data->num_errata1_clients = len;
4971 return 0;
4972}
4973
Patrick Daly03330cc2017-08-11 14:56:38 -07004974static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
4975{
4976 int len, i;
4977 struct device *dev = smmu->dev;
4978 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4979 struct actlr_setting *actlrs;
4980 const __be32 *cell;
4981
4982 cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
4983 if (!cell)
4984 return 0;
4985
4986 len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
4987 sizeof(u32) * 3);
4988 if (len < 0)
4989 return 0;
4990
4991 actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
4992 if (!actlrs)
4993 return -ENOMEM;
4994
4995 for (i = 0; i < len; i++) {
4996 actlrs[i].smr.id = of_read_number(cell++, 1);
4997 actlrs[i].smr.mask = of_read_number(cell++, 1);
4998 actlrs[i].actlr = of_read_number(cell++, 1);
4999 }
5000
5001 data->actlrs = actlrs;
5002 data->actlr_tbl_size = len;
5003 return 0;
5004}
5005
Patrick Daly1f8a2882016-09-12 17:32:05 -07005006static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
5007{
Patrick Dalya0fddb62017-03-27 19:26:59 -07005008 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005009 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005010 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07005011 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005012 int ret;
Patrick Daly03330cc2017-08-11 14:56:38 -07005013 u32 val;
5014 void __iomem *reg;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005015
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005016 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
5017 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07005018 return -ENOMEM;
5019
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005020 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07005021
5022 pdev = container_of(dev, struct platform_device, dev);
5023 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
5024 data->tcu_base = devm_ioremap_resource(dev, res);
5025 if (IS_ERR(data->tcu_base))
5026 return PTR_ERR(data->tcu_base);
5027
5028 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07005029 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005030
Patrick Dalyda765c62017-09-11 16:31:07 -07005031 ret = qsmmuv500_parse_errata1(smmu);
5032 if (ret)
5033 return ret;
5034
Patrick Daly03330cc2017-08-11 14:56:38 -07005035 ret = qsmmuv500_read_actlr_tbl(smmu);
5036 if (ret)
5037 return ret;
5038
5039 reg = ARM_SMMU_GR0(smmu);
5040 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5041 val &= ~ARM_MMU500_ACR_CACHE_LOCK;
5042 writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
5043 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
5044 /*
5045 * Modifiying the nonsecure copy of the sACR register is only
5046 * allowed if permission is given in the secure sACR register.
5047 * Attempt to detect if we were able to update the value.
5048 */
5049 WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
5050
Patrick Daly1f8a2882016-09-12 17:32:05 -07005051 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
5052 if (ret)
5053 return ret;
5054
5055 /* Attempt to register child devices */
5056 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
5057 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07005058 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005059
5060 return 0;
5061}
5062
5063struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
5064 .init = qsmmuv500_arch_init,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005065 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly03330cc2017-08-11 14:56:38 -07005066 .init_context_bank = qsmmuv500_init_cb,
5067 .device_group = qsmmuv500_device_group,
Patrick Daly1f8a2882016-09-12 17:32:05 -07005068};
5069
5070static const struct of_device_id qsmmuv500_tbu_of_match[] = {
5071 {.compatible = "qcom,qsmmuv500-tbu"},
5072 {}
5073};
5074
5075static int qsmmuv500_tbu_probe(struct platform_device *pdev)
5076{
5077 struct resource *res;
5078 struct device *dev = &pdev->dev;
5079 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005080 const __be32 *cell;
5081 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07005082
5083 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
5084 if (!tbu)
5085 return -ENOMEM;
5086
5087 INIT_LIST_HEAD(&tbu->list);
5088 tbu->dev = dev;
5089 spin_lock_init(&tbu->halt_lock);
5090
5091 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
5092 tbu->base = devm_ioremap_resource(dev, res);
5093 if (IS_ERR(tbu->base))
5094 return PTR_ERR(tbu->base);
5095
5096 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
5097 tbu->status_reg = devm_ioremap_resource(dev, res);
5098 if (IS_ERR(tbu->status_reg))
5099 return PTR_ERR(tbu->status_reg);
5100
Patrick Daly63b0e2c2016-11-01 16:58:57 -07005101 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
5102 if (!cell || len < 8)
5103 return -EINVAL;
5104
5105 tbu->sid_start = of_read_number(cell, 1);
5106 tbu->num_sids = of_read_number(cell + 1, 1);
5107
Patrick Daly1f8a2882016-09-12 17:32:05 -07005108 tbu->pwr = arm_smmu_init_power_resources(pdev);
5109 if (IS_ERR(tbu->pwr))
5110 return PTR_ERR(tbu->pwr);
5111
5112 dev_set_drvdata(dev, tbu);
5113 return 0;
5114}
5115
5116static struct platform_driver qsmmuv500_tbu_driver = {
5117 .driver = {
5118 .name = "qsmmuv500-tbu",
5119 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
5120 },
5121 .probe = qsmmuv500_tbu_probe,
5122};
5123
Will Deacon45ae7cf2013-06-24 18:31:25 +01005124MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
5125MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
5126MODULE_LICENSE("GPL v2");