blob: cdd638c8e992ea9a86065b4928d2207184a71e76 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070050#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070051#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070052#include <linux/msm-bus.h>
53#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55#include <linux/amba/bus.h>
56
Will Deacon518f7132014-11-14 17:17:54 +000057#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* Maximum number of context banks per SMMU */
60#define ARM_SMMU_MAX_CBS 128
61
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* SMMU global address space */
63#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010064#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000066/*
67 * SMMU global address space with conditional offset to access secure
68 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 * nsGFSYNR0: 0x450)
70 */
71#define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu)->base + \
73 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
74 ? 0x400 : 0))
75
Robin Murphyf9a05f02016-04-13 18:13:01 +010076/*
77 * Some 64-bit registers only make sense to write atomically, but in such
78 * cases all the data relevant to AArch32 formats lies within the lower word,
79 * therefore this actually makes more sense than it might first appear.
80 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#endif
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087/* Configuration registers */
88#define ARM_SMMU_GR0_sCR0 0x0
89#define sCR0_CLIENTPD (1 << 0)
90#define sCR0_GFRE (1 << 1)
91#define sCR0_GFIE (1 << 2)
92#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100131#define ID0_NUMSMRG_SHIFT 0
132#define ID0_NUMSMRG_MASK 0xff
133
134#define ID1_PAGESIZE (1 << 31)
135#define ID1_NUMPAGENDXB_SHIFT 28
136#define ID1_NUMPAGENDXB_MASK 7
137#define ID1_NUMS2CB_SHIFT 16
138#define ID1_NUMS2CB_MASK 0xff
139#define ID1_NUMCB_SHIFT 0
140#define ID1_NUMCB_MASK 0xff
141
142#define ID2_OAS_SHIFT 4
143#define ID2_OAS_MASK 0xf
144#define ID2_IAS_SHIFT 0
145#define ID2_IAS_MASK 0xf
146#define ID2_UBS_SHIFT 8
147#define ID2_UBS_MASK 0xf
148#define ID2_PTFS_4K (1 << 12)
149#define ID2_PTFS_16K (1 << 13)
150#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800151#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152
Peng Fan3ca37122016-05-03 21:50:30 +0800153#define ID7_MAJOR_SHIFT 4
154#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157#define ARM_SMMU_GR0_TLBIVMID 0x64
158#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
159#define ARM_SMMU_GR0_TLBIALLH 0x6c
160#define ARM_SMMU_GR0_sTLBGSYNC 0x70
161#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
162#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800163#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
165/* Stream mapping registers */
166#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
167#define SMR_VALID (1 << 31)
168#define SMR_MASK_SHIFT 16
Patrick Dalyda688822017-05-17 20:12:48 -0700169#define SMR_MASK_MASK 0x7FFF
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100177enum arm_smmu_s2cr_type {
178 S2CR_TYPE_TRANS,
179 S2CR_TYPE_BYPASS,
180 S2CR_TYPE_FAULT,
181};
182
183#define S2CR_PRIVCFG_SHIFT 24
184#define S2CR_PRIVCFG_MASK 0x3
185enum arm_smmu_s2cr_privcfg {
186 S2CR_PRIVCFG_DEFAULT,
187 S2CR_PRIVCFG_DIPAN,
188 S2CR_PRIVCFG_UNPRIV,
189 S2CR_PRIVCFG_PRIV,
190};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191
192/* Context bank attribute registers */
193#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
194#define CBAR_VMID_SHIFT 0
195#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000196#define CBAR_S1_BPSHCFG_SHIFT 8
197#define CBAR_S1_BPSHCFG_MASK 3
198#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100199#define CBAR_S1_MEMATTR_SHIFT 12
200#define CBAR_S1_MEMATTR_MASK 0xf
201#define CBAR_S1_MEMATTR_WB 0xf
202#define CBAR_TYPE_SHIFT 16
203#define CBAR_TYPE_MASK 0x3
204#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
205#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
206#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
207#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
208#define CBAR_IRPTNDX_SHIFT 24
209#define CBAR_IRPTNDX_MASK 0xff
210
Shalaj Jain04059c52015-03-03 13:34:59 -0800211#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
212#define CBFRSYNRA_SID_MASK (0xffff)
213
Will Deacon45ae7cf2013-06-24 18:31:25 +0100214#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
215#define CBA2R_RW64_32BIT (0 << 0)
216#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800217#define CBA2R_VMID_SHIFT 16
218#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219
220/* Translation context bank */
221#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100222#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223
224#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100225#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_RESUME 0x8
227#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100228#define ARM_SMMU_CB_TTBR0 0x20
229#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600231#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100232#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000233#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100234#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700236#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100237#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100238#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000239#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100240#define ARM_SMMU_CB_S1_TLBIASID 0x610
Patrick Dalye7069342017-07-11 12:35:55 -0700241#define ARM_SMMU_CB_S1_TLBIALL 0x618
Will Deacon518f7132014-11-14 17:17:54 +0000242#define ARM_SMMU_CB_S1_TLBIVAL 0x620
243#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
244#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700245#define ARM_SMMU_CB_TLBSYNC 0x7f0
246#define ARM_SMMU_CB_TLBSTATUS 0x7f4
247#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100248#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000249#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100250
251#define SCTLR_S1_ASIDPNE (1 << 12)
252#define SCTLR_CFCFG (1 << 7)
Charan Teja Reddyc682e472017-04-20 19:11:20 +0530253#define SCTLR_HUPCF (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100254#define SCTLR_CFIE (1 << 6)
255#define SCTLR_CFRE (1 << 5)
256#define SCTLR_E (1 << 4)
257#define SCTLR_AFE (1 << 2)
258#define SCTLR_TRE (1 << 1)
259#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100260
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100261#define ARM_MMU500_ACTLR_CPRE (1 << 1)
262
Peng Fan3ca37122016-05-03 21:50:30 +0800263#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
264
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700265#define ARM_SMMU_IMPL_DEF0(smmu) \
266 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
267#define ARM_SMMU_IMPL_DEF1(smmu) \
268 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000269#define CB_PAR_F (1 << 0)
270
271#define ATSR_ACTIVE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define RESUME_RETRY (0 << 0)
274#define RESUME_TERMINATE (1 << 0)
275
Will Deacon45ae7cf2013-06-24 18:31:25 +0100276#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100277#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100279#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280
281#define FSR_MULTI (1 << 31)
282#define FSR_SS (1 << 30)
283#define FSR_UUT (1 << 8)
284#define FSR_ASF (1 << 7)
285#define FSR_TLBLKF (1 << 6)
286#define FSR_TLBMCF (1 << 5)
287#define FSR_EF (1 << 4)
288#define FSR_PF (1 << 3)
289#define FSR_AFF (1 << 2)
290#define FSR_TF (1 << 1)
291
Mitchel Humpherys29073202014-07-08 09:52:18 -0700292#define FSR_IGN (FSR_AFF | FSR_ASF | \
293 FSR_TLBMCF | FSR_TLBLKF)
294#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100295 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100296
297#define FSYNR0_WNR (1 << 4)
298
Will Deacon4cf740b2014-07-14 19:47:39 +0100299static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000300module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100301MODULE_PARM_DESC(force_stage,
302 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800303static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000304module_param(disable_bypass, bool, S_IRUGO);
305MODULE_PARM_DESC(disable_bypass,
306 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100307
Robin Murphy09360402014-08-28 17:51:59 +0100308enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100309 ARM_SMMU_V1,
310 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100311 ARM_SMMU_V2,
312};
313
Robin Murphy67b65a32016-04-13 18:12:57 +0100314enum arm_smmu_implementation {
315 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100316 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100317 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700318 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700319 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100320};
321
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700322struct arm_smmu_impl_def_reg {
323 u32 offset;
324 u32 value;
325};
326
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700327/*
328 * attach_count
329 * The SMR and S2CR registers are only programmed when the number of
330 * devices attached to the iommu using these registers is > 0. This
331 * is required for the "SID switch" use case for secure display.
332 * Protected by stream_map_mutex.
333 */
Robin Murphya754fd12016-09-12 17:13:50 +0100334struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100335 struct iommu_group *group;
336 int count;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -0700337 int attach_count;
Robin Murphya754fd12016-09-12 17:13:50 +0100338 enum arm_smmu_s2cr_type type;
339 enum arm_smmu_s2cr_privcfg privcfg;
340 u8 cbndx;
Patrick Dalyda688822017-05-17 20:12:48 -0700341 bool cb_handoff;
Robin Murphya754fd12016-09-12 17:13:50 +0100342};
343
344#define s2cr_init_val (struct arm_smmu_s2cr){ \
345 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
Patrick Dalyda688822017-05-17 20:12:48 -0700346 .cb_handoff = false, \
Robin Murphya754fd12016-09-12 17:13:50 +0100347}
348
Will Deacon45ae7cf2013-06-24 18:31:25 +0100349struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100350 u16 mask;
351 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100352 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100353};
354
Will Deacona9a1b0b2014-05-01 18:05:08 +0100355struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100356 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100357 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100358};
Robin Murphy468f4942016-09-12 17:13:49 +0100359#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100360#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
361#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphye7595e52016-11-07 18:25:09 +0000362#define fwspec_smendx(fw, i) \
363 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphy06e393e2016-09-12 17:13:55 +0100364#define for_each_cfg_sme(fw, i, idx) \
Robin Murphye7595e52016-11-07 18:25:09 +0000365 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100366
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700367/*
368 * Describes resources required for on/off power operation.
369 * Separate reference count is provided for atomic/nonatomic
370 * operations.
371 */
372struct arm_smmu_power_resources {
373 struct platform_device *pdev;
374 struct device *dev;
375
376 struct clk **clocks;
377 int num_clocks;
378
379 struct regulator_bulk_data *gdscs;
380 int num_gdscs;
381
382 uint32_t bus_client;
383 struct msm_bus_scale_pdata *bus_dt_data;
384
385 /* Protects power_count */
386 struct mutex power_lock;
387 int power_count;
388
389 /* Protects clock_refs_count */
390 spinlock_t clock_refs_lock;
391 int clock_refs_count;
Prakash Guptafad87ca2017-05-16 12:13:02 +0530392 int regulator_defer;
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700393};
394
Patrick Daly95895ba2017-08-11 14:56:38 -0700395struct arm_smmu_arch_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100396struct arm_smmu_device {
397 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100398
399 void __iomem *base;
400 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100401 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100402
403#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
404#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
405#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
406#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
407#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000408#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800409#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100410#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
411#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
412#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
413#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
414#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100415 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000416
417#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800418#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Patrick Daly59b6d202017-06-12 13:12:15 -0700419#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700420#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Patrick Daly4423d3e2017-05-04 18:17:51 -0700421#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
Patrick Dalye7069342017-07-11 12:35:55 -0700422#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000423 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100424 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100425 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100426
427 u32 num_context_banks;
428 u32 num_s2_context_banks;
429 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
430 atomic_t irptndx;
431
432 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100433 u16 streamid_mask;
434 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100435 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100436 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100437 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100438
Will Deacon518f7132014-11-14 17:17:54 +0000439 unsigned long va_size;
440 unsigned long ipa_size;
441 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100442 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100443
444 u32 num_global_irqs;
445 u32 num_context_irqs;
446 unsigned int *irqs;
447
Patrick Daly8e3371a2017-02-13 22:14:53 -0800448 struct list_head list;
449
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800450 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700451 /* Specific to QCOM */
452 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
453 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800454
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700455 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700456
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800457 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700458
459 /* protects idr */
460 struct mutex idr_mutex;
461 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700462
463 struct arm_smmu_arch_ops *arch_ops;
464 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100465};
466
Robin Murphy7602b872016-04-28 17:12:09 +0100467enum arm_smmu_context_fmt {
468 ARM_SMMU_CTX_FMT_NONE,
469 ARM_SMMU_CTX_FMT_AARCH64,
470 ARM_SMMU_CTX_FMT_AARCH32_L,
471 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100472};
473
474struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100475 u8 cbndx;
476 u8 irptndx;
477 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600478 u32 procid;
479 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100480 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100481};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100482#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600483#define INVALID_CBNDX 0xff
484#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700485/*
486 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
487 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
488 */
489#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100490
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600491#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800492#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100493
Will Deaconc752ce42014-06-25 22:46:31 +0100494enum arm_smmu_domain_stage {
495 ARM_SMMU_DOMAIN_S1 = 0,
496 ARM_SMMU_DOMAIN_S2,
497 ARM_SMMU_DOMAIN_NESTED,
498};
499
Patrick Dalyc11d1082016-09-01 15:52:44 -0700500struct arm_smmu_pte_info {
501 void *virt_addr;
502 size_t size;
503 struct list_head entry;
504};
505
Will Deacon45ae7cf2013-06-24 18:31:25 +0100506struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100507 struct arm_smmu_device *smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -0800508 struct device *dev;
Will Deacon518f7132014-11-14 17:17:54 +0000509 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700510 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000511 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100512 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100513 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000514 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700515 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700516 u32 secure_vmid;
517 struct list_head pte_info_list;
518 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700519 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700520 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100521 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100522};
523
Patrick Daly8e3371a2017-02-13 22:14:53 -0800524static DEFINE_SPINLOCK(arm_smmu_devices_lock);
525static LIST_HEAD(arm_smmu_devices);
526
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000527struct arm_smmu_option_prop {
528 u32 opt;
529 const char *prop;
530};
531
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800532static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
533
Robin Murphy7e96c742016-09-14 15:26:46 +0100534static bool using_legacy_binding, using_generic_binding;
535
Mitchel Humpherys29073202014-07-08 09:52:18 -0700536static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000537 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800538 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Patrick Daly59b6d202017-06-12 13:12:15 -0700539 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700540 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Patrick Daly4423d3e2017-05-04 18:17:51 -0700541 { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
Patrick Dalye7069342017-07-11 12:35:55 -0700542 { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000543 { 0, NULL},
544};
545
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800546static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
547 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700548static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
549 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600550static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800551
Patrick Dalyc11d1082016-09-01 15:52:44 -0700552static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
553static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700554static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700555static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
556
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700557static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
558 dma_addr_t iova);
559
Patrick Dalyef6c1dc2016-11-16 14:35:23 -0800560static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
561
Patrick Dalyda688822017-05-17 20:12:48 -0700562static int arm_smmu_alloc_cb(struct iommu_domain *domain,
563 struct arm_smmu_device *smmu,
564 struct device *dev);
565
Joerg Roedel1d672632015-03-26 13:43:10 +0100566static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
567{
568 return container_of(dom, struct arm_smmu_domain, domain);
569}
570
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000571static void parse_driver_options(struct arm_smmu_device *smmu)
572{
573 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700574
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000575 do {
576 if (of_property_read_bool(smmu->dev->of_node,
577 arm_smmu_options[i].prop)) {
578 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700579 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000580 arm_smmu_options[i].prop);
581 }
582 } while (arm_smmu_options[++i].opt);
583}
584
Patrick Dalyc190d932016-08-30 17:23:28 -0700585static bool is_dynamic_domain(struct iommu_domain *domain)
586{
587 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
588
589 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
590}
591
Liam Mark53cf2342016-12-20 11:36:07 -0800592static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
593{
594 if (smmu_domain->attributes &
595 (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
596 return true;
597 else if (smmu_domain->smmu && smmu_domain->smmu->dev)
598 return smmu_domain->smmu->dev->archdata.dma_coherent;
599 else
600 return false;
601}
602
Patrick Dalye271f212016-10-04 13:24:49 -0700603static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
604{
605 return (smmu_domain->secure_vmid != VMID_INVAL);
606}
607
608static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
609{
610 if (arm_smmu_is_domain_secure(smmu_domain))
611 mutex_lock(&smmu_domain->assign_lock);
612}
613
614static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
615{
616 if (arm_smmu_is_domain_secure(smmu_domain))
617 mutex_unlock(&smmu_domain->assign_lock);
618}
619
Patrick Daly95895ba2017-08-11 14:56:38 -0700620/*
621 * init()
622 * Hook for additional device tree parsing at probe time.
623 *
624 * device_reset()
625 * Hook for one-time architecture-specific register settings.
626 *
627 * iova_to_phys_hard()
628 * Provides debug information. May be called from the context fault irq handler.
629 *
630 * init_context_bank()
631 * Hook for architecture-specific settings which require knowledge of the
632 * dynamically allocated context bank number.
633 *
634 * device_group()
635 * Hook for checking whether a device is compatible with a said group.
636 */
637struct arm_smmu_arch_ops {
638 int (*init)(struct arm_smmu_device *smmu);
639 void (*device_reset)(struct arm_smmu_device *smmu);
640 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
641 dma_addr_t iova);
642 void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
643 struct device *dev);
644 int (*device_group)(struct device *dev, struct iommu_group *group);
645};
646
647static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
648{
649 if (!smmu->arch_ops)
650 return 0;
651 if (!smmu->arch_ops->init)
652 return 0;
653 return smmu->arch_ops->init(smmu);
654}
655
656static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
657{
658 if (!smmu->arch_ops)
659 return;
660 if (!smmu->arch_ops->device_reset)
661 return;
662 return smmu->arch_ops->device_reset(smmu);
663}
664
665static void arm_smmu_arch_init_context_bank(
666 struct arm_smmu_domain *smmu_domain, struct device *dev)
667{
668 struct arm_smmu_device *smmu = smmu_domain->smmu;
669
670 if (!smmu->arch_ops)
671 return;
672 if (!smmu->arch_ops->init_context_bank)
673 return;
674 return smmu->arch_ops->init_context_bank(smmu_domain, dev);
675}
676
677static int arm_smmu_arch_device_group(struct device *dev,
678 struct iommu_group *group)
679{
680 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
681 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
682
683 if (!smmu->arch_ops)
684 return 0;
685 if (!smmu->arch_ops->device_group)
686 return 0;
687 return smmu->arch_ops->device_group(dev, group);
688}
689
Will Deacon8f68f8e2014-07-15 11:27:08 +0100690static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100691{
692 if (dev_is_pci(dev)) {
693 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700694
Will Deacona9a1b0b2014-05-01 18:05:08 +0100695 while (!pci_is_root_bus(bus))
696 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100697 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100698 }
699
Robin Murphyd5b41782016-09-14 15:21:39 +0100700 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100701}
702
Robin Murphyd5b41782016-09-14 15:21:39 +0100703static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100704{
Robin Murphyd5b41782016-09-14 15:21:39 +0100705 *((__be32 *)data) = cpu_to_be32(alias);
706 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100707}
708
Robin Murphyd5b41782016-09-14 15:21:39 +0100709static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100710{
Robin Murphyd5b41782016-09-14 15:21:39 +0100711 struct of_phandle_iterator *it = *(void **)data;
712 struct device_node *np = it->node;
713 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100714
Robin Murphyd5b41782016-09-14 15:21:39 +0100715 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
716 "#stream-id-cells", 0)
717 if (it->node == np) {
718 *(void **)data = dev;
719 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700720 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100721 it->node = np;
722 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100723}
724
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100725static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100726static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100727
Robin Murphy06e393e2016-09-12 17:13:55 +0100728static int arm_smmu_register_legacy_master(struct device *dev,
729 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100730{
Robin Murphy06e393e2016-09-12 17:13:55 +0100731 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100732 struct device_node *np;
733 struct of_phandle_iterator it;
734 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100735 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100736 __be32 pci_sid;
737 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100738
Stephen Boydfecdeef2017-03-01 16:53:19 -0800739 memset(&it, 0, sizeof(it));
Robin Murphyd5b41782016-09-14 15:21:39 +0100740 np = dev_get_dev_node(dev);
741 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
742 of_node_put(np);
743 return -ENODEV;
744 }
745
746 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100747 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
748 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100749 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100750 of_node_put(np);
751 if (err == 0)
752 return -ENODEV;
753 if (err < 0)
754 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100755
Robin Murphyd5b41782016-09-14 15:21:39 +0100756 if (dev_is_pci(dev)) {
757 /* "mmu-masters" assumes Stream ID == Requester ID */
758 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
759 &pci_sid);
760 it.cur = &pci_sid;
761 it.cur_count = 1;
762 }
763
Robin Murphy06e393e2016-09-12 17:13:55 +0100764 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
765 &arm_smmu_ops);
766 if (err)
767 return err;
768
769 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
770 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100771 return -ENOMEM;
772
Robin Murphy06e393e2016-09-12 17:13:55 +0100773 *smmu = dev_get_drvdata(smmu_dev);
774 of_phandle_iterator_args(&it, sids, it.cur_count);
775 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
776 kfree(sids);
777 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100778}
779
780static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
781{
782 int idx;
783
784 do {
785 idx = find_next_zero_bit(map, end, start);
786 if (idx == end)
787 return -ENOSPC;
788 } while (test_and_set_bit(idx, map));
789
790 return idx;
791}
792
793static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
794{
795 clear_bit(idx, map);
796}
797
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700798static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700799{
800 int i, ret = 0;
801
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700802 for (i = 0; i < pwr->num_clocks; ++i) {
803 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700804 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700805 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700806 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700807 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700808 break;
809 }
810 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700811 return ret;
812}
813
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700814static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700815{
816 int i;
817
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700818 for (i = pwr->num_clocks; i; --i)
819 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700820}
821
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700822static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700823{
824 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700825
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700826 for (i = 0; i < pwr->num_clocks; ++i) {
827 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700828 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700829 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700830 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700831 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700832 break;
833 }
834 }
835
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700836 return ret;
837}
Patrick Daly8befb662016-08-17 20:03:28 -0700838
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700839static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
840{
841 int i;
842
843 for (i = pwr->num_clocks; i; --i)
844 clk_disable(pwr->clocks[i - 1]);
845}
846
847static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
848{
849 if (!pwr->bus_client)
850 return 0;
851 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
852}
853
854static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
855{
856 if (!pwr->bus_client)
857 return;
858 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
859}
860
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700861static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
862{
863 struct regulator_bulk_data *consumers;
864 int num_consumers, ret;
865 int i;
866
867 num_consumers = pwr->num_gdscs;
868 consumers = pwr->gdscs;
869 for (i = 0; i < num_consumers; i++) {
870 ret = regulator_enable(consumers[i].consumer);
871 if (ret)
872 goto out;
873 }
874 return 0;
875
876out:
877 i -= 1;
878 for (; i >= 0; i--)
879 regulator_disable(consumers[i].consumer);
880 return ret;
881}
882
Prakash Guptafad87ca2017-05-16 12:13:02 +0530883static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
884{
885 struct regulator_bulk_data *consumers;
886 int i;
887 int num_consumers, ret, r;
888
889 num_consumers = pwr->num_gdscs;
890 consumers = pwr->gdscs;
891 for (i = num_consumers - 1; i >= 0; --i) {
892 ret = regulator_disable_deferred(consumers[i].consumer,
893 pwr->regulator_defer);
894 if (ret != 0)
895 goto err;
896 }
897
898 return 0;
899
900err:
901 pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
902 for (++i; i < num_consumers; ++i) {
903 r = regulator_enable(consumers[i].consumer);
904 if (r != 0)
905 pr_err("Failed to reename %s: %d\n",
906 consumers[i].supply, r);
907 }
908
909 return ret;
910}
911
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700912/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
913static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
914{
915 int ret = 0;
916 unsigned long flags;
917
918 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
919 if (pwr->clock_refs_count > 0) {
920 pwr->clock_refs_count++;
921 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
922 return 0;
923 }
924
925 ret = arm_smmu_enable_clocks(pwr);
926 if (!ret)
927 pwr->clock_refs_count = 1;
928
929 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700930 return ret;
931}
932
933/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700934static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700935{
Patrick Daly8befb662016-08-17 20:03:28 -0700936 unsigned long flags;
937
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700938 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
939 if (pwr->clock_refs_count == 0) {
940 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
941 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
942 return;
943
944 } else if (pwr->clock_refs_count > 1) {
945 pwr->clock_refs_count--;
946 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700947 return;
948 }
949
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700950 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700951
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700952 pwr->clock_refs_count = 0;
953 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700954}
955
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700956static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700957{
958 int ret;
959
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700960 mutex_lock(&pwr->power_lock);
961 if (pwr->power_count > 0) {
962 pwr->power_count += 1;
963 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700964 return 0;
965 }
966
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700967 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700968 if (ret)
969 goto out_unlock;
970
Patrick Dalyb26f97c2017-08-11 15:24:20 -0700971 ret = arm_smmu_enable_regulators(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700972 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700973 goto out_disable_bus;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700974
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700975 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700976 if (ret)
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700977 goto out_disable_regulators;
Patrick Daly2764f952016-09-06 19:22:44 -0700978
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700979 pwr->power_count = 1;
980 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700981 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700982
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700983out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700984 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -0700985out_disable_bus:
986 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700987out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700988 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700989 return ret;
990}
991
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700992static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700993{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700994 mutex_lock(&pwr->power_lock);
995 if (pwr->power_count == 0) {
996 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
997 mutex_unlock(&pwr->power_lock);
998 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700999
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001000 } else if (pwr->power_count > 1) {
1001 pwr->power_count--;
1002 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001003 return;
1004 }
1005
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001006 arm_smmu_unprepare_clocks(pwr);
Prakash Guptafad87ca2017-05-16 12:13:02 +05301007 arm_smmu_disable_regulators(pwr);
Patrick Daly8e2aa1a2017-04-13 17:09:43 -07001008 arm_smmu_unrequest_bus(pwr);
Patrick Daly2e3471e2017-04-13 16:24:33 -07001009 pwr->power_count = 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001010 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001011}
1012
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001013static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001014{
1015 int ret;
1016
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001017 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001018 if (ret)
1019 return ret;
1020
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001021 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001022 if (ret)
1023 goto out_disable;
1024
1025 return 0;
1026
1027out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001028 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001029 return ret;
1030}
1031
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001032static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -07001033{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001034 arm_smmu_power_off_atomic(pwr);
1035 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001036}
1037
1038/*
1039 * Must be used instead of arm_smmu_power_on if it may be called from
1040 * atomic context
1041 */
1042static int arm_smmu_domain_power_on(struct iommu_domain *domain,
1043 struct arm_smmu_device *smmu)
1044{
1045 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1046 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1047
1048 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001049 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001050
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001051 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001052}
1053
1054/*
1055 * Must be used instead of arm_smmu_power_on if it may be called from
1056 * atomic context
1057 */
1058static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1059 struct arm_smmu_device *smmu)
1060{
1061 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1062 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1063
1064 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001065 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001066 return;
1067 }
1068
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001069 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001070}
1071
Will Deacon45ae7cf2013-06-24 18:31:25 +01001072/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001073static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1074 int cbndx)
1075{
1076 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1077 u32 val;
1078
1079 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1080 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1081 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -07001082 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001083 dev_err(smmu->dev, "TLBSYNC timeout!\n");
1084}
1085
Will Deacon518f7132014-11-14 17:17:54 +00001086static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001087{
1088 int count = 0;
1089 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1090
1091 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1092 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1093 & sTLBGSTATUS_GSACTIVE) {
1094 cpu_relax();
1095 if (++count == TLB_LOOP_TIMEOUT) {
1096 dev_err_ratelimited(smmu->dev,
1097 "TLB sync timed out -- SMMU may be deadlocked\n");
1098 return;
1099 }
1100 udelay(1);
1101 }
1102}
1103
Will Deacon518f7132014-11-14 17:17:54 +00001104static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001105{
Will Deacon518f7132014-11-14 17:17:54 +00001106 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001107 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001108}
1109
Patrick Daly8befb662016-08-17 20:03:28 -07001110/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001111static void arm_smmu_tlb_inv_context(void *cookie)
1112{
1113 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +01001114 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1115 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001116 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001117 void __iomem *base;
Patrick Dalye7069342017-07-11 12:35:55 -07001118 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon1463fe42013-07-31 19:21:27 +01001119
Patrick Dalye7069342017-07-11 12:35:55 -07001120 if (stage1 && !use_tlbiall) {
Will Deacon1463fe42013-07-31 19:21:27 +01001121 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001122 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001123 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001124 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Patrick Dalye7069342017-07-11 12:35:55 -07001125 } else if (stage1 && use_tlbiall) {
1126 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1127 writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
1128 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001129 } else {
1130 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001131 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001132 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001133 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001134 }
Will Deacon1463fe42013-07-31 19:21:27 +01001135}
1136
Will Deacon518f7132014-11-14 17:17:54 +00001137static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001138 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001139{
1140 struct arm_smmu_domain *smmu_domain = cookie;
1141 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1142 struct arm_smmu_device *smmu = smmu_domain->smmu;
1143 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1144 void __iomem *reg;
Patrick Dalye7069342017-07-11 12:35:55 -07001145 bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
Will Deacon518f7132014-11-14 17:17:54 +00001146
Patrick Dalye7069342017-07-11 12:35:55 -07001147 if (stage1 && !use_tlbiall) {
Will Deacon518f7132014-11-14 17:17:54 +00001148 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1149 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1150
Robin Murphy7602b872016-04-28 17:12:09 +01001151 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001152 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001153 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001154 do {
1155 writel_relaxed(iova, reg);
1156 iova += granule;
1157 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001158 } else {
1159 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001160 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001161 do {
1162 writeq_relaxed(iova, reg);
1163 iova += granule >> 12;
1164 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001165 }
Patrick Dalye7069342017-07-11 12:35:55 -07001166 } else if (stage1 && use_tlbiall) {
1167 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1168 reg += ARM_SMMU_CB_S1_TLBIALL;
1169 writel_relaxed(0, reg);
Will Deacon518f7132014-11-14 17:17:54 +00001170 } else if (smmu->version == ARM_SMMU_V2) {
1171 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1172 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1173 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001174 iova >>= 12;
1175 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001176 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001177 iova += granule >> 12;
1178 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001179 } else {
1180 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001181 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001182 }
1183}
1184
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001185struct arm_smmu_secure_pool_chunk {
1186 void *addr;
1187 size_t size;
1188 struct list_head list;
1189};
1190
1191static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1192 size_t size)
1193{
1194 struct arm_smmu_secure_pool_chunk *it;
1195
1196 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1197 if (it->size == size) {
1198 void *addr = it->addr;
1199
1200 list_del(&it->list);
1201 kfree(it);
1202 return addr;
1203 }
1204 }
1205
1206 return NULL;
1207}
1208
1209static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1210 void *addr, size_t size)
1211{
1212 struct arm_smmu_secure_pool_chunk *chunk;
1213
1214 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1215 if (!chunk)
1216 return -ENOMEM;
1217
1218 chunk->addr = addr;
1219 chunk->size = size;
1220 memset(addr, 0, size);
1221 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1222
1223 return 0;
1224}
1225
1226static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1227{
1228 struct arm_smmu_secure_pool_chunk *it, *i;
1229
1230 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1231 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1232 /* pages will be freed later (after being unassigned) */
1233 kfree(it);
1234 }
1235}
1236
Patrick Dalyc11d1082016-09-01 15:52:44 -07001237static void *arm_smmu_alloc_pages_exact(void *cookie,
1238 size_t size, gfp_t gfp_mask)
1239{
1240 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001241 void *page;
1242 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001243
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001244 if (!arm_smmu_is_domain_secure(smmu_domain))
1245 return alloc_pages_exact(size, gfp_mask);
1246
1247 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1248 if (page)
1249 return page;
1250
1251 page = alloc_pages_exact(size, gfp_mask);
1252 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001253 ret = arm_smmu_prepare_pgtable(page, cookie);
1254 if (ret) {
1255 free_pages_exact(page, size);
1256 return NULL;
1257 }
1258 }
1259
1260 return page;
1261}
1262
1263static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1264{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001265 struct arm_smmu_domain *smmu_domain = cookie;
1266
1267 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1268 free_pages_exact(virt, size);
1269 return;
1270 }
1271
1272 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1273 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001274}
1275
Will Deacon518f7132014-11-14 17:17:54 +00001276static struct iommu_gather_ops arm_smmu_gather_ops = {
1277 .tlb_flush_all = arm_smmu_tlb_inv_context,
1278 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1279 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001280 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1281 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001282};
1283
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001284static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1285 dma_addr_t iova, u32 fsr)
1286{
1287 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001288 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001289 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001290 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001291
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001292 phys = arm_smmu_iova_to_phys_hard(domain, iova);
1293 arm_smmu_tlb_inv_context(smmu_domain);
1294 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001295
Patrick Dalyad441dd2016-09-15 15:50:46 -07001296 if (phys != phys_post_tlbiall) {
1297 dev_err(smmu->dev,
1298 "ATOS results differed across TLBIALL...\n"
1299 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1300 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001301
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001302 return (phys == 0 ? phys_post_tlbiall : phys);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001303}
1304
Will Deacon45ae7cf2013-06-24 18:31:25 +01001305static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1306{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001307 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001308 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001309 unsigned long iova;
1310 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001311 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001312 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1313 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001314 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001315 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001316 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001317 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001318 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001319 bool non_fatal_fault = !!(smmu_domain->attributes &
Sudarshan Rajagopalanf4464e02017-08-10 14:30:39 -07001320 (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001321
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001322 static DEFINE_RATELIMIT_STATE(_rs,
1323 DEFAULT_RATELIMIT_INTERVAL,
1324 DEFAULT_RATELIMIT_BURST);
1325
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001326 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001327 if (ret)
1328 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001329
Shalaj Jain04059c52015-03-03 13:34:59 -08001330 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001331 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001332 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1333
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001334 if (!(fsr & FSR_FAULT)) {
1335 ret = IRQ_NONE;
1336 goto out_power_off;
1337 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001338
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001339 if (fatal_asf && (fsr & FSR_ASF)) {
1340 dev_err(smmu->dev,
1341 "Took an address size fault. Refusing to recover.\n");
1342 BUG();
1343 }
1344
Will Deacon45ae7cf2013-06-24 18:31:25 +01001345 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001346 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001347 if (fsr & FSR_TF)
1348 flags |= IOMMU_FAULT_TRANSLATION;
1349 if (fsr & FSR_PF)
1350 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001351 if (fsr & FSR_EF)
1352 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001353 if (fsr & FSR_SS)
1354 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001355
Robin Murphyf9a05f02016-04-13 18:13:01 +01001356 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001357 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001358 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1359 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001360 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1361 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001362 dev_dbg(smmu->dev,
1363 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1364 iova, fsr, fsynr, cfg->cbndx);
1365 dev_dbg(smmu->dev,
1366 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001367 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001368 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001369 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001370 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1371 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001372 if (__ratelimit(&_rs)) {
1373 dev_err(smmu->dev,
1374 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1375 iova, fsr, fsynr, cfg->cbndx);
1376 dev_err(smmu->dev, "FAR = %016lx\n",
1377 (unsigned long)iova);
1378 dev_err(smmu->dev,
1379 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1380 fsr,
1381 (fsr & 0x02) ? "TF " : "",
1382 (fsr & 0x04) ? "AFF " : "",
1383 (fsr & 0x08) ? "PF " : "",
1384 (fsr & 0x10) ? "EF " : "",
1385 (fsr & 0x20) ? "TLBMCF " : "",
1386 (fsr & 0x40) ? "TLBLKF " : "",
1387 (fsr & 0x80) ? "MHF " : "",
1388 (fsr & 0x40000000) ? "SS " : "",
1389 (fsr & 0x80000000) ? "MULTI " : "");
1390 dev_err(smmu->dev,
1391 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001392 if (!phys_soft)
1393 dev_err(smmu->dev,
1394 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1395 dev_name(smmu->dev));
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08001396 if (phys_atos)
1397 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
1398 &phys_atos);
1399 else
1400 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001401 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1402 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001403 ret = IRQ_NONE;
1404 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001405 if (!non_fatal_fault) {
1406 dev_err(smmu->dev,
1407 "Unhandled arm-smmu context fault!\n");
1408 BUG();
1409 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001410 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001411
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001412 /*
1413 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1414 * if stalled. This is required to keep the IOMMU client stalled on
1415 * the outstanding fault. This gives the client a chance to take any
1416 * debug action and then terminate the stalled transaction.
1417 * So, the sequence in case of stall on fault should be:
1418 * 1) Do not clear FSR or write to RESUME here
1419 * 2) Client takes any debug action
1420 * 3) Client terminates the stalled transaction and resumes the IOMMU
1421 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1422 * not before so that the fault remains outstanding. This ensures
1423 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1424 * need to be terminated.
1425 */
1426 if (tmp != -EBUSY) {
1427 /* Clear the faulting FSR */
1428 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001429
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001430 /*
1431 * Barrier required to ensure that the FSR is cleared
1432 * before resuming SMMU operation
1433 */
1434 wmb();
1435
1436 /* Retry or terminate any stalled transactions */
1437 if (fsr & FSR_SS)
1438 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1439 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001440
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001441out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001442 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001443
Patrick Daly5ba28112016-08-30 19:18:52 -07001444 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001445}
1446
1447static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1448{
1449 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1450 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001451 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001452
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001453 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001454 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001455
Will Deacon45ae7cf2013-06-24 18:31:25 +01001456 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1457 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1458 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1459 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1460
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001461 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001462 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001463 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001464 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001465
Will Deacon45ae7cf2013-06-24 18:31:25 +01001466 dev_err_ratelimited(smmu->dev,
1467 "Unexpected global fault, this could be serious\n");
1468 dev_err_ratelimited(smmu->dev,
1469 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1470 gfsr, gfsynr0, gfsynr1, gfsynr2);
1471
1472 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001473 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001474 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001475}
1476
Will Deacon518f7132014-11-14 17:17:54 +00001477static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1478 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001479{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001480 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001481 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001482 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001483 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1484 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001485 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001486
Will Deacon45ae7cf2013-06-24 18:31:25 +01001487 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001488 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1489 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001490
Will Deacon4a1c93c2015-03-04 12:21:03 +00001491 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001492 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1493 reg = CBA2R_RW64_64BIT;
1494 else
1495 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001496 /* 16-bit VMIDs live in CBA2R */
1497 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001498 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001499
Will Deacon4a1c93c2015-03-04 12:21:03 +00001500 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1501 }
1502
Will Deacon45ae7cf2013-06-24 18:31:25 +01001503 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001504 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001505 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001506 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001507
Will Deacon57ca90f2014-02-06 14:59:05 +00001508 /*
1509 * Use the weakest shareability/memory types, so they are
1510 * overridden by the ttbcr/pte.
1511 */
1512 if (stage1) {
1513 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1514 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001515 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1516 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001517 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001518 }
Will Deacon44680ee2014-06-25 11:29:12 +01001519 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001520
Will Deacon518f7132014-11-14 17:17:54 +00001521 /* TTBRs */
1522 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001523 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001524
Robin Murphyb94df6f2016-08-11 17:44:06 +01001525 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1526 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1527 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1528 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1529 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1530 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1531 } else {
1532 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1533 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1534 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1535 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1536 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1537 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1538 }
Will Deacon518f7132014-11-14 17:17:54 +00001539 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001540 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001541 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001542 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001543
Will Deacon518f7132014-11-14 17:17:54 +00001544 /* TTBCR */
1545 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001546 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1547 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1548 reg2 = 0;
1549 } else {
1550 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1551 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1552 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001553 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001554 if (smmu->version > ARM_SMMU_V1)
1555 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001556 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001557 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001558 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001559 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001560
Will Deacon518f7132014-11-14 17:17:54 +00001561 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001562 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001563 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1564 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1565 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1566 } else {
1567 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1568 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1569 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001570 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001571 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001572 }
1573
Will Deacon45ae7cf2013-06-24 18:31:25 +01001574 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001575 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001576
Charan Teja Reddyc682e472017-04-20 19:11:20 +05301577 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
1578 reg &= ~SCTLR_CFCFG;
1579 reg |= SCTLR_HUPCF;
1580 }
1581
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08001582 if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
1583 !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
1584 !stage1)
Patrick Dalye62d3362016-03-15 18:58:28 -07001585 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001586 if (stage1)
1587 reg |= SCTLR_S1_ASIDPNE;
1588#ifdef __BIG_ENDIAN
1589 reg |= SCTLR_E;
1590#endif
Will Deacon25724842013-08-21 13:49:53 +01001591 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001592}
1593
Patrick Dalyc190d932016-08-30 17:23:28 -07001594static int arm_smmu_init_asid(struct iommu_domain *domain,
1595 struct arm_smmu_device *smmu)
1596{
1597 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1598 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1599 bool dynamic = is_dynamic_domain(domain);
1600 int ret;
1601
1602 if (!dynamic) {
1603 cfg->asid = cfg->cbndx + 1;
1604 } else {
1605 mutex_lock(&smmu->idr_mutex);
1606 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1607 smmu->num_context_banks + 2,
1608 MAX_ASID + 1, GFP_KERNEL);
1609
1610 mutex_unlock(&smmu->idr_mutex);
1611 if (ret < 0) {
1612 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1613 ret);
1614 return ret;
1615 }
1616 cfg->asid = ret;
1617 }
1618 return 0;
1619}
1620
1621static void arm_smmu_free_asid(struct iommu_domain *domain)
1622{
1623 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1624 struct arm_smmu_device *smmu = smmu_domain->smmu;
1625 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1626 bool dynamic = is_dynamic_domain(domain);
1627
1628 if (cfg->asid == INVALID_ASID || !dynamic)
1629 return;
1630
1631 mutex_lock(&smmu->idr_mutex);
1632 idr_remove(&smmu->asid_idr, cfg->asid);
1633 mutex_unlock(&smmu->idr_mutex);
1634}
1635
Will Deacon45ae7cf2013-06-24 18:31:25 +01001636static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Patrick Dalyea63baa2017-02-13 17:11:33 -08001637 struct arm_smmu_device *smmu,
1638 struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001639{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001640 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001641 unsigned long ias, oas;
1642 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001643 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001644 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001645 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001646 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001647 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001648 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001649
Will Deacon518f7132014-11-14 17:17:54 +00001650 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001651 if (smmu_domain->smmu)
1652 goto out_unlock;
1653
Patrick Dalyc190d932016-08-30 17:23:28 -07001654 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1655 smmu_domain->cfg.asid = INVALID_ASID;
1656
Patrick Dalyc190d932016-08-30 17:23:28 -07001657 dynamic = is_dynamic_domain(domain);
1658 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1659 dev_err(smmu->dev, "dynamic domains not supported\n");
1660 ret = -EPERM;
1661 goto out_unlock;
1662 }
1663
Will Deaconc752ce42014-06-25 22:46:31 +01001664 /*
1665 * Mapping the requested stage onto what we support is surprisingly
1666 * complicated, mainly because the spec allows S1+S2 SMMUs without
1667 * support for nested translation. That means we end up with the
1668 * following table:
1669 *
1670 * Requested Supported Actual
1671 * S1 N S1
1672 * S1 S1+S2 S1
1673 * S1 S2 S2
1674 * S1 S1 S1
1675 * N N N
1676 * N S1+S2 S2
1677 * N S2 S2
1678 * N S1 S1
1679 *
1680 * Note that you can't actually request stage-2 mappings.
1681 */
1682 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1683 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1684 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1685 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1686
Robin Murphy7602b872016-04-28 17:12:09 +01001687 /*
1688 * Choosing a suitable context format is even more fiddly. Until we
1689 * grow some way for the caller to express a preference, and/or move
1690 * the decision into the io-pgtable code where it arguably belongs,
1691 * just aim for the closest thing to the rest of the system, and hope
1692 * that the hardware isn't esoteric enough that we can't assume AArch64
1693 * support to be a superset of AArch32 support...
1694 */
1695 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1696 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001697 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1698 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1699 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1700 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1701 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001702 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1703 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1704 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1705 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1706 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1707
1708 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1709 ret = -EINVAL;
1710 goto out_unlock;
1711 }
1712
Will Deaconc752ce42014-06-25 22:46:31 +01001713 switch (smmu_domain->stage) {
1714 case ARM_SMMU_DOMAIN_S1:
1715 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1716 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001717 ias = smmu->va_size;
1718 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001719 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001720 fmt = ARM_64_LPAE_S1;
Patrick Daly4423d3e2017-05-04 18:17:51 -07001721 if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
1722 ias = min(ias, 39UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001723 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001724 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001725 ias = min(ias, 32UL);
1726 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001727 } else {
1728 fmt = ARM_V7S;
1729 ias = min(ias, 32UL);
1730 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001731 }
Will Deaconc752ce42014-06-25 22:46:31 +01001732 break;
1733 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001734 /*
1735 * We will likely want to change this if/when KVM gets
1736 * involved.
1737 */
Will Deaconc752ce42014-06-25 22:46:31 +01001738 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001739 cfg->cbar = CBAR_TYPE_S2_TRANS;
1740 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001741 ias = smmu->ipa_size;
1742 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001743 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001744 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001745 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001746 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001747 ias = min(ias, 40UL);
1748 oas = min(oas, 40UL);
1749 }
Will Deaconc752ce42014-06-25 22:46:31 +01001750 break;
1751 default:
1752 ret = -EINVAL;
1753 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001754 }
1755
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001756 if (is_fast)
1757 fmt = ARM_V8L_FAST;
1758
Patrick Dalyce6786f2016-11-09 14:19:23 -08001759 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1760 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Liam Mark53cf2342016-12-20 11:36:07 -08001761 if (is_iommu_pt_coherent(smmu_domain))
1762 quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001763
Patrick Dalyda688822017-05-17 20:12:48 -07001764 ret = arm_smmu_alloc_cb(domain, smmu, dev);
1765 if (ret < 0)
1766 goto out_unlock;
1767 cfg->cbndx = ret;
1768
Robin Murphyb7862e32016-04-13 18:13:03 +01001769 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001770 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1771 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001773 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001774 }
1775
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001776 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001777 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001778 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001779 .ias = ias,
1780 .oas = oas,
1781 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001782 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001783 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001784
Will Deacon518f7132014-11-14 17:17:54 +00001785 smmu_domain->smmu = smmu;
Patrick Dalyea63baa2017-02-13 17:11:33 -08001786 smmu_domain->dev = dev;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001787 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1788 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001789 if (!pgtbl_ops) {
1790 ret = -ENOMEM;
1791 goto out_clear_smmu;
1792 }
1793
Patrick Dalyc11d1082016-09-01 15:52:44 -07001794 /*
1795 * assign any page table memory that might have been allocated
1796 * during alloc_io_pgtable_ops
1797 */
Patrick Dalye271f212016-10-04 13:24:49 -07001798 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001799 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001800 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001801
Robin Murphyd5466352016-05-09 17:20:09 +01001802 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001803 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001804 domain->geometry.aperture_end = (1UL << ias) - 1;
1805 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001806
Patrick Dalyc190d932016-08-30 17:23:28 -07001807 /* Assign an asid */
1808 ret = arm_smmu_init_asid(domain, smmu);
1809 if (ret)
1810 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001811
Patrick Dalyc190d932016-08-30 17:23:28 -07001812 if (!dynamic) {
1813 /* Initialise the context bank with our page table cfg */
1814 arm_smmu_init_context_bank(smmu_domain,
1815 &smmu_domain->pgtbl_cfg);
1816
Patrick Daly95895ba2017-08-11 14:56:38 -07001817 arm_smmu_arch_init_context_bank(smmu_domain, dev);
1818
Patrick Dalyc190d932016-08-30 17:23:28 -07001819 /*
1820 * Request context fault interrupt. Do this last to avoid the
1821 * handler seeing a half-initialised domain state.
1822 */
1823 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1824 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001825 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1826 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001827 if (ret < 0) {
1828 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1829 cfg->irptndx, irq);
1830 cfg->irptndx = INVALID_IRPTNDX;
1831 goto out_clear_smmu;
1832 }
1833 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001834 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001835 }
Will Deacon518f7132014-11-14 17:17:54 +00001836 mutex_unlock(&smmu_domain->init_mutex);
1837
1838 /* Publish page table ops for map/unmap */
1839 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001840 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001841
Will Deacon518f7132014-11-14 17:17:54 +00001842out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001843 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001844 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001845out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001846 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001847 return ret;
1848}
1849
Patrick Daly77db4f92016-10-14 15:34:10 -07001850static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1851{
1852 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1853 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1854 smmu_domain->secure_vmid = VMID_INVAL;
1855}
1856
Will Deacon45ae7cf2013-06-24 18:31:25 +01001857static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1858{
Joerg Roedel1d672632015-03-26 13:43:10 +01001859 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001860 struct arm_smmu_device *smmu = smmu_domain->smmu;
1861 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001862 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001863 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001864 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001865 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001866
Robin Murphy7e96c742016-09-14 15:26:46 +01001867 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001868 return;
1869
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001870 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001871 if (ret) {
1872 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1873 smmu);
1874 return;
1875 }
1876
Patrick Dalyc190d932016-08-30 17:23:28 -07001877 dynamic = is_dynamic_domain(domain);
1878 if (dynamic) {
1879 arm_smmu_free_asid(domain);
1880 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001881 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001882 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001883 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001884 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001885 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001886 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001887 return;
1888 }
1889
Will Deacon518f7132014-11-14 17:17:54 +00001890 /*
1891 * Disable the context bank and free the page tables before freeing
1892 * it.
1893 */
Will Deacon44680ee2014-06-25 11:29:12 +01001894 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001895 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001896
Will Deacon44680ee2014-06-25 11:29:12 +01001897 if (cfg->irptndx != INVALID_IRPTNDX) {
1898 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001899 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900 }
1901
Markus Elfring44830b02015-11-06 18:32:41 +01001902 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001903 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001904 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001905 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001906 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001907 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001908
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001909 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001910 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001911}
1912
Joerg Roedel1d672632015-03-26 13:43:10 +01001913static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001914{
1915 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001916
Patrick Daly09801312016-08-29 17:02:52 -07001917 /* Do not support DOMAIN_DMA for now */
1918 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001919 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001920 /*
1921 * Allocate the domain and initialise some of its data structures.
1922 * We can't really do anything meaningful until we've added a
1923 * master.
1924 */
1925 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1926 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001927 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001928
Robin Murphy7e96c742016-09-14 15:26:46 +01001929 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1930 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001931 kfree(smmu_domain);
1932 return NULL;
1933 }
1934
Will Deacon518f7132014-11-14 17:17:54 +00001935 mutex_init(&smmu_domain->init_mutex);
1936 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001937 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1938 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001939 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001940 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001941 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001942
1943 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001944}
1945
Joerg Roedel1d672632015-03-26 13:43:10 +01001946static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001947{
Joerg Roedel1d672632015-03-26 13:43:10 +01001948 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001949
1950 /*
1951 * Free the domain resources. We assume that all devices have
1952 * already been detached.
1953 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001954 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001955 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001956 kfree(smmu_domain);
1957}
1958
Robin Murphy468f4942016-09-12 17:13:49 +01001959static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1960{
1961 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01001962 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01001963
1964 if (smr->valid)
1965 reg |= SMR_VALID;
1966 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1967}
1968
Robin Murphya754fd12016-09-12 17:13:50 +01001969static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1970{
1971 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1972 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1973 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1974 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1975
1976 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1977}
1978
1979static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1980{
1981 arm_smmu_write_s2cr(smmu, idx);
1982 if (smmu->smrs)
1983 arm_smmu_write_smr(smmu, idx);
1984}
1985
Robin Murphy6668f692016-09-12 17:13:54 +01001986static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01001987{
1988 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01001989 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001990
Robin Murphy6668f692016-09-12 17:13:54 +01001991 /* Stream indexing is blissfully easy */
1992 if (!smrs)
1993 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01001994
Robin Murphy6668f692016-09-12 17:13:54 +01001995 /* Validating SMRs is... less so */
1996 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1997 if (!smrs[i].valid) {
1998 /*
1999 * Note the first free entry we come across, which
2000 * we'll claim in the end if nothing else matches.
2001 */
2002 if (free_idx < 0)
2003 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01002004 continue;
2005 }
Robin Murphy6668f692016-09-12 17:13:54 +01002006 /*
2007 * If the new entry is _entirely_ matched by an existing entry,
2008 * then reuse that, with the guarantee that there also cannot
2009 * be any subsequent conflicting entries. In normal use we'd
2010 * expect simply identical entries for this case, but there's
2011 * no harm in accommodating the generalisation.
2012 */
2013 if ((mask & smrs[i].mask) == mask &&
2014 !((id ^ smrs[i].id) & ~smrs[i].mask))
2015 return i;
2016 /*
2017 * If the new entry has any other overlap with an existing one,
2018 * though, then there always exists at least one stream ID
2019 * which would cause a conflict, and we can't allow that risk.
2020 */
2021 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
2022 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002023 }
2024
Robin Murphy6668f692016-09-12 17:13:54 +01002025 return free_idx;
2026}
2027
2028static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
2029{
2030 if (--smmu->s2crs[idx].count)
2031 return false;
2032
2033 smmu->s2crs[idx] = s2cr_init_val;
2034 if (smmu->smrs)
2035 smmu->smrs[idx].valid = false;
2036
2037 return true;
2038}
2039
2040static int arm_smmu_master_alloc_smes(struct device *dev)
2041{
Robin Murphy06e393e2016-09-12 17:13:55 +01002042 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2043 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01002044 struct arm_smmu_device *smmu = cfg->smmu;
2045 struct arm_smmu_smr *smrs = smmu->smrs;
2046 struct iommu_group *group;
2047 int i, idx, ret;
2048
2049 mutex_lock(&smmu->stream_map_mutex);
2050 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01002051 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002052 u16 sid = fwspec->ids[i];
2053 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
2054
Robin Murphy6668f692016-09-12 17:13:54 +01002055 if (idx != INVALID_SMENDX) {
2056 ret = -EEXIST;
2057 goto out_err;
2058 }
2059
Robin Murphy7e96c742016-09-14 15:26:46 +01002060 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01002061 if (ret < 0)
2062 goto out_err;
2063
2064 idx = ret;
2065 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002066 smrs[idx].id = sid;
2067 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01002068 smrs[idx].valid = true;
2069 }
2070 smmu->s2crs[idx].count++;
2071 cfg->smendx[i] = (s16)idx;
2072 }
2073
2074 group = iommu_group_get_for_dev(dev);
2075 if (!group)
2076 group = ERR_PTR(-ENOMEM);
2077 if (IS_ERR(group)) {
2078 ret = PTR_ERR(group);
2079 goto out_err;
2080 }
2081 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01002082
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002083 /* It worked! Don't poke the actual hardware until we've attached */
2084 for_each_cfg_sme(fwspec, i, idx)
Robin Murphy6668f692016-09-12 17:13:54 +01002085 smmu->s2crs[idx].group = group;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002086
Robin Murphy6668f692016-09-12 17:13:54 +01002087 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002088 return 0;
2089
Robin Murphy6668f692016-09-12 17:13:54 +01002090out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01002091 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01002092 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01002093 cfg->smendx[i] = INVALID_SMENDX;
2094 }
Robin Murphy6668f692016-09-12 17:13:54 +01002095 mutex_unlock(&smmu->stream_map_mutex);
2096 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002097}
2098
Robin Murphy06e393e2016-09-12 17:13:55 +01002099static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002100{
Robin Murphy06e393e2016-09-12 17:13:55 +01002101 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
2102 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01002103 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01002104
Robin Murphy6668f692016-09-12 17:13:54 +01002105 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002106 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002107 if (arm_smmu_free_sme(smmu, idx))
2108 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01002109 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002110 }
Robin Murphy6668f692016-09-12 17:13:54 +01002111 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002112}
2113
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002114static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
2115 struct iommu_fwspec *fwspec)
2116{
2117 struct arm_smmu_device *smmu = smmu_domain->smmu;
2118 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2119 int i, idx;
2120 const struct iommu_gather_ops *tlb;
2121
2122 tlb = smmu_domain->pgtbl_cfg.tlb;
2123
2124 mutex_lock(&smmu->stream_map_mutex);
2125 for_each_cfg_sme(fwspec, i, idx) {
2126 WARN_ON(s2cr[idx].attach_count == 0);
2127 s2cr[idx].attach_count -= 1;
2128
2129 if (s2cr[idx].attach_count > 0)
2130 continue;
2131
2132 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
2133 writel_relaxed(0, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
2134 }
2135 mutex_unlock(&smmu->stream_map_mutex);
2136
2137 /* Ensure there are no stale mappings for this context bank */
2138 tlb->tlb_flush_all(smmu_domain);
2139}
2140
Will Deacon45ae7cf2013-06-24 18:31:25 +01002141static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01002142 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002143{
Will Deacon44680ee2014-06-25 11:29:12 +01002144 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002145 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
2146 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
2147 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01002148 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002149
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002150 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01002151 for_each_cfg_sme(fwspec, i, idx) {
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002152 if (s2cr[idx].attach_count++ > 0)
Robin Murphy6668f692016-09-12 17:13:54 +01002153 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01002154
2155 s2cr[idx].type = type;
2156 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2157 s2cr[idx].cbndx = cbndx;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002158 arm_smmu_write_sme(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002159 }
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002160 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002161
2162 return 0;
2163}
2164
Patrick Daly09801312016-08-29 17:02:52 -07002165static void arm_smmu_detach_dev(struct iommu_domain *domain,
2166 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002167{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002168 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002169 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002170 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly09801312016-08-29 17:02:52 -07002171 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002172 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002173
2174 if (dynamic)
2175 return;
2176
Patrick Daly09801312016-08-29 17:02:52 -07002177 if (!smmu) {
2178 dev_err(dev, "Domain not attached; cannot detach!\n");
2179 return;
2180 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002181
Patrick Dalyb2ab4fc2017-08-10 18:39:13 -07002182 arm_smmu_domain_remove_master(smmu_domain, fwspec);
2183
Patrick Daly8befb662016-08-17 20:03:28 -07002184 /* Remove additional vote for atomic power */
2185 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002186 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
2187 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002188 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002189}
2190
Patrick Dalye271f212016-10-04 13:24:49 -07002191static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002192{
Patrick Dalye271f212016-10-04 13:24:49 -07002193 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002194 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2195 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2196 int source_vmid = VMID_HLOS;
2197 struct arm_smmu_pte_info *pte_info, *temp;
2198
Patrick Dalye271f212016-10-04 13:24:49 -07002199 if (!arm_smmu_is_domain_secure(smmu_domain))
2200 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002201
Patrick Dalye271f212016-10-04 13:24:49 -07002202 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002203 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2204 PAGE_SIZE, &source_vmid, 1,
2205 dest_vmids, dest_perms, 2);
2206 if (WARN_ON(ret))
2207 break;
2208 }
2209
2210 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2211 entry) {
2212 list_del(&pte_info->entry);
2213 kfree(pte_info);
2214 }
Patrick Dalye271f212016-10-04 13:24:49 -07002215 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002216}
2217
2218static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2219{
2220 int ret;
2221 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002222 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002223 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2224 struct arm_smmu_pte_info *pte_info, *temp;
2225
Patrick Dalye271f212016-10-04 13:24:49 -07002226 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002227 return;
2228
2229 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2230 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2231 PAGE_SIZE, source_vmlist, 2,
2232 &dest_vmids, &dest_perms, 1);
2233 if (WARN_ON(ret))
2234 break;
2235 free_pages_exact(pte_info->virt_addr, pte_info->size);
2236 }
2237
2238 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2239 entry) {
2240 list_del(&pte_info->entry);
2241 kfree(pte_info);
2242 }
2243}
2244
2245static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2246{
2247 struct arm_smmu_domain *smmu_domain = cookie;
2248 struct arm_smmu_pte_info *pte_info;
2249
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002250 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002251
2252 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2253 if (!pte_info)
2254 return;
2255
2256 pte_info->virt_addr = addr;
2257 pte_info->size = size;
2258 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2259}
2260
2261static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2262{
2263 struct arm_smmu_domain *smmu_domain = cookie;
2264 struct arm_smmu_pte_info *pte_info;
2265
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002266 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002267
2268 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2269 if (!pte_info)
2270 return -ENOMEM;
2271 pte_info->virt_addr = addr;
2272 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2273 return 0;
2274}
2275
Will Deacon45ae7cf2013-06-24 18:31:25 +01002276static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2277{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002278 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002279 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002280 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002281 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002282 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002283
Robin Murphy06e393e2016-09-12 17:13:55 +01002284 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002285 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2286 return -ENXIO;
2287 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002288
Robin Murphy4f79b142016-10-17 12:06:21 +01002289 /*
2290 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2291 * domains between of_xlate() and add_device() - we have no way to cope
2292 * with that, so until ARM gets converted to rely on groups and default
2293 * domains, just say no (but more politely than by dereferencing NULL).
2294 * This should be at least a WARN_ON once that's sorted.
2295 */
2296 if (!fwspec->iommu_priv)
2297 return -ENODEV;
2298
Robin Murphy06e393e2016-09-12 17:13:55 +01002299 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002300
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002301 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002302 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002303 if (ret)
2304 return ret;
2305
Will Deacon518f7132014-11-14 17:17:54 +00002306 /* Ensure that the domain is finalised */
Patrick Dalyea63baa2017-02-13 17:11:33 -08002307 ret = arm_smmu_init_domain_context(domain, smmu, dev);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002308 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002309 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002310
Patrick Dalyc190d932016-08-30 17:23:28 -07002311 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002312 if (is_dynamic_domain(domain)) {
2313 ret = 0;
2314 goto out_power_off;
2315 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002316
Will Deacon45ae7cf2013-06-24 18:31:25 +01002317 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002318 * Sanity check the domain. We don't support domains across
2319 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002320 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002321 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002322 dev_err(dev,
2323 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002324 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002325 ret = -EINVAL;
2326 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002327 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002328
2329 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002330 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002331
2332out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002333 /*
2334 * Keep an additional vote for non-atomic power until domain is
2335 * detached
2336 */
2337 if (!ret && atomic_domain) {
2338 WARN_ON(arm_smmu_power_on(smmu->pwr));
2339 arm_smmu_power_off_atomic(smmu->pwr);
2340 }
2341
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002342 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002343
Will Deacon45ae7cf2013-06-24 18:31:25 +01002344 return ret;
2345}
2346
Will Deacon45ae7cf2013-06-24 18:31:25 +01002347static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002348 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002349{
Will Deacon518f7132014-11-14 17:17:54 +00002350 int ret;
2351 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002352 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002353 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002354
Will Deacon518f7132014-11-14 17:17:54 +00002355 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002356 return -ENODEV;
2357
Patrick Dalye271f212016-10-04 13:24:49 -07002358 arm_smmu_secure_domain_lock(smmu_domain);
2359
Will Deacon518f7132014-11-14 17:17:54 +00002360 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2361 ret = ops->map(ops, iova, paddr, size, prot);
2362 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002363
2364 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002365 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002366
Will Deacon518f7132014-11-14 17:17:54 +00002367 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002368}
2369
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07002370static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
2371 dma_addr_t iova)
2372{
2373 uint64_t ret;
2374 unsigned long flags;
2375 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2376 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2377
2378 if (!ops)
2379 return 0;
2380
2381 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2382 ret = ops->iova_to_pte(ops, iova);
2383 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2384 return ret;
2385}
2386
Will Deacon45ae7cf2013-06-24 18:31:25 +01002387static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2388 size_t size)
2389{
Will Deacon518f7132014-11-14 17:17:54 +00002390 size_t ret;
2391 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002392 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002393 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002394
Will Deacon518f7132014-11-14 17:17:54 +00002395 if (!ops)
2396 return 0;
2397
Patrick Daly8befb662016-08-17 20:03:28 -07002398 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002399 if (ret)
2400 return ret;
2401
Patrick Dalye271f212016-10-04 13:24:49 -07002402 arm_smmu_secure_domain_lock(smmu_domain);
2403
Will Deacon518f7132014-11-14 17:17:54 +00002404 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2405 ret = ops->unmap(ops, iova, size);
2406 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002407
Patrick Daly8befb662016-08-17 20:03:28 -07002408 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002409 /*
2410 * While splitting up block mappings, we might allocate page table
2411 * memory during unmap, so the vmids needs to be assigned to the
2412 * memory here as well.
2413 */
2414 arm_smmu_assign_table(smmu_domain);
2415 /* Also unassign any pages that were free'd during unmap */
2416 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002417 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002418 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002419}
2420
Patrick Daly88d321d2017-02-09 18:02:13 -08002421#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002422static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2423 struct scatterlist *sg, unsigned int nents, int prot)
2424{
2425 int ret;
Patrick Daly88d321d2017-02-09 18:02:13 -08002426 size_t size, batch_size, size_to_unmap = 0;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002427 unsigned long flags;
2428 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2429 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Patrick Daly88d321d2017-02-09 18:02:13 -08002430 unsigned int idx_start, idx_end;
2431 struct scatterlist *sg_start, *sg_end;
2432 unsigned long __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002433
2434 if (!ops)
2435 return -ENODEV;
2436
Patrick Daly8befb662016-08-17 20:03:28 -07002437 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002438 if (ret)
2439 return ret;
2440
Patrick Daly88d321d2017-02-09 18:02:13 -08002441 __saved_iova_start = iova;
2442 idx_start = idx_end = 0;
2443 sg_start = sg_end = sg;
2444 while (idx_end < nents) {
2445 batch_size = sg_end->length;
2446 sg_end = sg_next(sg_end);
2447 idx_end++;
2448 while ((idx_end < nents) &&
2449 (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002450
Patrick Daly88d321d2017-02-09 18:02:13 -08002451 batch_size += sg_end->length;
2452 sg_end = sg_next(sg_end);
2453 idx_end++;
2454 }
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002455
Patrick Daly88d321d2017-02-09 18:02:13 -08002456 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2457 ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
2458 prot, &size);
2459 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2460 /* Returns 0 on error */
2461 if (!ret) {
2462 size_to_unmap = iova + size - __saved_iova_start;
2463 goto out;
2464 }
2465
2466 iova += batch_size;
2467 idx_start = idx_end;
2468 sg_start = sg_end;
2469 }
2470
2471out:
Patrick Dalyc11d1082016-09-01 15:52:44 -07002472 arm_smmu_assign_table(smmu_domain);
2473
Patrick Daly88d321d2017-02-09 18:02:13 -08002474 if (size_to_unmap) {
2475 arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
2476 iova = __saved_iova_start;
2477 }
2478 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
2479 return iova - __saved_iova_start;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002480}
2481
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002482static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002483 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002484{
Joerg Roedel1d672632015-03-26 13:43:10 +01002485 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002486 struct arm_smmu_device *smmu = smmu_domain->smmu;
2487 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2488 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2489 struct device *dev = smmu->dev;
2490 void __iomem *cb_base;
2491 u32 tmp;
2492 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002493 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002494
2495 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2496
Robin Murphy661d9622015-05-27 17:09:34 +01002497 /* ATS1 registers can only be written atomically */
2498 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002499 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002500 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2501 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002502 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002503
2504 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2505 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002506 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002507 dev_err(dev,
2508 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2509 &iova, &phys);
2510 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002511 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002512 }
2513
Robin Murphyf9a05f02016-04-13 18:13:01 +01002514 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002515 if (phys & CB_PAR_F) {
2516 dev_err(dev, "translation fault!\n");
2517 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002518 phys = 0;
2519 } else {
2520 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002521 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002522
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002523 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002524}
2525
Will Deacon45ae7cf2013-06-24 18:31:25 +01002526static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002527 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002528{
Will Deacon518f7132014-11-14 17:17:54 +00002529 phys_addr_t ret;
2530 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002531 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002532 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002533
Will Deacon518f7132014-11-14 17:17:54 +00002534 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002535 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002536
Will Deacon518f7132014-11-14 17:17:54 +00002537 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002538 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002539 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002540
Will Deacon518f7132014-11-14 17:17:54 +00002541 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002542}
2543
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002544/*
2545 * This function can sleep, and cannot be called from atomic context. Will
2546 * power on register block if required. This restriction does not apply to the
2547 * original iova_to_phys() op.
2548 */
2549static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2550 dma_addr_t iova)
2551{
2552 phys_addr_t ret = 0;
2553 unsigned long flags;
2554 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002555
Patrick Dalyad441dd2016-09-15 15:50:46 -07002556 if (smmu_domain->smmu->arch_ops &&
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002557 smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
2558 ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
Patrick Dalyad441dd2016-09-15 15:50:46 -07002559 domain, iova);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08002560 return ret;
2561 }
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002562
2563 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2564 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2565 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002566 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002567
2568 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2569
2570 return ret;
2571}
2572
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002573static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002574{
Will Deacond0948942014-06-24 17:30:10 +01002575 switch (cap) {
2576 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002577 /*
2578 * Return true here as the SMMU can always send out coherent
2579 * requests.
2580 */
2581 return true;
Will Deacond0948942014-06-24 17:30:10 +01002582 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002583 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002584 case IOMMU_CAP_NOEXEC:
2585 return true;
Will Deacond0948942014-06-24 17:30:10 +01002586 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002587 return false;
Will Deacond0948942014-06-24 17:30:10 +01002588 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002589}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002590
Patrick Daly8e3371a2017-02-13 22:14:53 -08002591static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
2592{
2593 struct arm_smmu_device *smmu;
2594 unsigned long flags;
2595
2596 spin_lock_irqsave(&arm_smmu_devices_lock, flags);
2597 list_for_each_entry(smmu, &arm_smmu_devices, list) {
2598 if (smmu->dev->of_node == np) {
2599 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2600 return smmu;
2601 }
2602 }
2603 spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
2604 return NULL;
2605}
2606
Robin Murphy7e96c742016-09-14 15:26:46 +01002607static int arm_smmu_match_node(struct device *dev, void *data)
2608{
2609 return dev->of_node == data;
2610}
2611
2612static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2613{
2614 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2615 np, arm_smmu_match_node);
2616 put_device(dev);
Patrick Daly8e3371a2017-02-13 22:14:53 -08002617 return dev ? dev_get_drvdata(dev) : arm_smmu_get_by_list(np);
Robin Murphy7e96c742016-09-14 15:26:46 +01002618}
2619
Will Deacon03edb222015-01-19 14:27:33 +00002620static int arm_smmu_add_device(struct device *dev)
2621{
Robin Murphy06e393e2016-09-12 17:13:55 +01002622 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002623 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002624 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002625 int i, ret;
2626
Robin Murphy7e96c742016-09-14 15:26:46 +01002627 if (using_legacy_binding) {
2628 ret = arm_smmu_register_legacy_master(dev, &smmu);
2629 fwspec = dev->iommu_fwspec;
2630 if (ret)
2631 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002632 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002633 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2634 if (!smmu)
2635 return -ENODEV;
2636 } else {
2637 return -ENODEV;
2638 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002639
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002640 ret = arm_smmu_power_on(smmu->pwr);
2641 if (ret)
2642 goto out_free;
2643
Robin Murphyd5b41782016-09-14 15:21:39 +01002644 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002645 for (i = 0; i < fwspec->num_ids; i++) {
2646 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002647 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002648
Robin Murphy06e393e2016-09-12 17:13:55 +01002649 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002650 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002651 sid, smmu->streamid_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002652 goto out_pwr_off;
Robin Murphyd5b41782016-09-14 15:21:39 +01002653 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002654 if (mask & ~smmu->smr_mask_mask) {
2655 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2656 sid, smmu->smr_mask_mask);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002657 goto out_pwr_off;
Robin Murphy7e96c742016-09-14 15:26:46 +01002658 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002659 }
Will Deacon03edb222015-01-19 14:27:33 +00002660
Robin Murphy06e393e2016-09-12 17:13:55 +01002661 ret = -ENOMEM;
2662 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2663 GFP_KERNEL);
2664 if (!cfg)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002665 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002666
2667 cfg->smmu = smmu;
2668 fwspec->iommu_priv = cfg;
2669 while (i--)
2670 cfg->smendx[i] = INVALID_SMENDX;
2671
Robin Murphy6668f692016-09-12 17:13:54 +01002672 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002673 if (ret)
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002674 goto out_pwr_off;
Robin Murphy06e393e2016-09-12 17:13:55 +01002675
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002676 arm_smmu_power_off(smmu->pwr);
Robin Murphy06e393e2016-09-12 17:13:55 +01002677 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002678
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002679out_pwr_off:
2680 arm_smmu_power_off(smmu->pwr);
Robin Murphyd5b41782016-09-14 15:21:39 +01002681out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002682 if (fwspec)
2683 kfree(fwspec->iommu_priv);
2684 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002685 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002686}
2687
Will Deacon45ae7cf2013-06-24 18:31:25 +01002688static void arm_smmu_remove_device(struct device *dev)
2689{
Robin Murphy06e393e2016-09-12 17:13:55 +01002690 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002691 struct arm_smmu_device *smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01002692
Robin Murphy06e393e2016-09-12 17:13:55 +01002693 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002694 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002695
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002696 smmu = fwspec_smmu(fwspec);
2697 if (arm_smmu_power_on(smmu->pwr)) {
2698 WARN_ON(1);
2699 return;
2700 }
2701
Robin Murphy06e393e2016-09-12 17:13:55 +01002702 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002703 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002704 kfree(fwspec->iommu_priv);
2705 iommu_fwspec_free(dev);
Patrick Daly35bbe7b2017-02-17 20:11:04 -08002706 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002707}
2708
Joerg Roedelaf659932015-10-21 23:51:41 +02002709static struct iommu_group *arm_smmu_device_group(struct device *dev)
2710{
Robin Murphy06e393e2016-09-12 17:13:55 +01002711 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2712 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002713 struct iommu_group *group = NULL;
2714 int i, idx;
2715
Robin Murphy06e393e2016-09-12 17:13:55 +01002716 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002717 if (group && smmu->s2crs[idx].group &&
2718 group != smmu->s2crs[idx].group)
2719 return ERR_PTR(-EINVAL);
2720
2721 group = smmu->s2crs[idx].group;
2722 }
2723
Patrick Daly95895ba2017-08-11 14:56:38 -07002724 if (!group) {
2725 if (dev_is_pci(dev))
2726 group = pci_device_group(dev);
2727 else
2728 group = generic_device_group(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002729
Patrick Daly95895ba2017-08-11 14:56:38 -07002730 if (IS_ERR(group))
2731 return NULL;
2732 }
2733
2734 if (arm_smmu_arch_device_group(dev, group)) {
2735 iommu_group_put(group);
2736 return ERR_PTR(-EINVAL);
2737 }
Joerg Roedelaf659932015-10-21 23:51:41 +02002738
Joerg Roedelaf659932015-10-21 23:51:41 +02002739 return group;
2740}
2741
Will Deaconc752ce42014-06-25 22:46:31 +01002742static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2743 enum iommu_attr attr, void *data)
2744{
Joerg Roedel1d672632015-03-26 13:43:10 +01002745 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002746 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002747
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002748 mutex_lock(&smmu_domain->init_mutex);
Will Deaconc752ce42014-06-25 22:46:31 +01002749 switch (attr) {
2750 case DOMAIN_ATTR_NESTING:
2751 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002752 ret = 0;
2753 break;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002754 case DOMAIN_ATTR_PT_BASE_ADDR:
2755 *((phys_addr_t *)data) =
2756 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002757 ret = 0;
2758 break;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002759 case DOMAIN_ATTR_CONTEXT_BANK:
2760 /* context bank index isn't valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002761 if (smmu_domain->smmu == NULL) {
2762 ret = -ENODEV;
2763 break;
2764 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002765 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2766 ret = 0;
2767 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002768 case DOMAIN_ATTR_TTBR0: {
2769 u64 val;
2770 struct arm_smmu_device *smmu = smmu_domain->smmu;
2771 /* not valid until we are attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002772 if (smmu == NULL) {
2773 ret = -ENODEV;
2774 break;
2775 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002776 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2777 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2778 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2779 << (TTBRn_ASID_SHIFT);
2780 *((u64 *)data) = val;
2781 ret = 0;
2782 break;
2783 }
2784 case DOMAIN_ATTR_CONTEXTIDR:
2785 /* not valid until attached */
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002786 if (smmu_domain->smmu == NULL) {
2787 ret = -ENODEV;
2788 break;
2789 }
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002790 *((u32 *)data) = smmu_domain->cfg.procid;
2791 ret = 0;
2792 break;
2793 case DOMAIN_ATTR_PROCID:
2794 *((u32 *)data) = smmu_domain->cfg.procid;
2795 ret = 0;
2796 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002797 case DOMAIN_ATTR_DYNAMIC:
2798 *((int *)data) = !!(smmu_domain->attributes
2799 & (1 << DOMAIN_ATTR_DYNAMIC));
2800 ret = 0;
2801 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002802 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2803 *((int *)data) = !!(smmu_domain->attributes
2804 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2805 ret = 0;
2806 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002807 case DOMAIN_ATTR_S1_BYPASS:
2808 *((int *)data) = !!(smmu_domain->attributes
2809 & (1 << DOMAIN_ATTR_S1_BYPASS));
2810 ret = 0;
2811 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002812 case DOMAIN_ATTR_SECURE_VMID:
2813 *((int *)data) = smmu_domain->secure_vmid;
2814 ret = 0;
2815 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002816 case DOMAIN_ATTR_PGTBL_INFO: {
2817 struct iommu_pgtbl_info *info = data;
2818
2819 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2820 ret = -ENODEV;
2821 break;
2822 }
2823 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2824 ret = 0;
2825 break;
2826 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002827 case DOMAIN_ATTR_FAST:
2828 *((int *)data) = !!(smmu_domain->attributes
2829 & (1 << DOMAIN_ATTR_FAST));
2830 ret = 0;
2831 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002832 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2833 *((int *)data) = !!(smmu_domain->attributes &
2834 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2835 ret = 0;
2836 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08002837 case DOMAIN_ATTR_EARLY_MAP:
2838 *((int *)data) = !!(smmu_domain->attributes
2839 & (1 << DOMAIN_ATTR_EARLY_MAP));
2840 ret = 0;
2841 break;
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002842 case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002843 if (!smmu_domain->smmu) {
2844 ret = -ENODEV;
2845 break;
2846 }
Liam Mark53cf2342016-12-20 11:36:07 -08002847 *((int *)data) = is_iommu_pt_coherent(smmu_domain);
2848 ret = 0;
2849 break;
2850 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
2851 *((int *)data) = !!(smmu_domain->attributes
2852 & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
Mitchel Humpherys05314f32016-06-07 16:04:40 -07002853 ret = 0;
2854 break;
Charan Teja Reddyc682e472017-04-20 19:11:20 +05302855 case DOMAIN_ATTR_CB_STALL_DISABLE:
2856 *((int *)data) = !!(smmu_domain->attributes
2857 & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
2858 ret = 0;
2859 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002860 default:
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002861 ret = -ENODEV;
2862 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002863 }
Jeremy Gebbena1d92f72015-06-16 12:56:45 -06002864 mutex_unlock(&smmu_domain->init_mutex);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002865 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002866}
2867
2868static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2869 enum iommu_attr attr, void *data)
2870{
Will Deacon518f7132014-11-14 17:17:54 +00002871 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002872 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002873
Will Deacon518f7132014-11-14 17:17:54 +00002874 mutex_lock(&smmu_domain->init_mutex);
2875
Will Deaconc752ce42014-06-25 22:46:31 +01002876 switch (attr) {
2877 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002878 if (smmu_domain->smmu) {
2879 ret = -EPERM;
2880 goto out_unlock;
2881 }
2882
Will Deaconc752ce42014-06-25 22:46:31 +01002883 if (*(int *)data)
2884 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2885 else
2886 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2887
Will Deacon518f7132014-11-14 17:17:54 +00002888 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002889 case DOMAIN_ATTR_PROCID:
2890 if (smmu_domain->smmu != NULL) {
2891 dev_err(smmu_domain->smmu->dev,
2892 "cannot change procid attribute while attached\n");
2893 ret = -EBUSY;
2894 break;
2895 }
2896 smmu_domain->cfg.procid = *((u32 *)data);
2897 ret = 0;
2898 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002899 case DOMAIN_ATTR_DYNAMIC: {
2900 int dynamic = *((int *)data);
2901
2902 if (smmu_domain->smmu != NULL) {
2903 dev_err(smmu_domain->smmu->dev,
2904 "cannot change dynamic attribute while attached\n");
2905 ret = -EBUSY;
2906 break;
2907 }
2908
2909 if (dynamic)
2910 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2911 else
2912 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2913 ret = 0;
2914 break;
2915 }
2916 case DOMAIN_ATTR_CONTEXT_BANK:
2917 /* context bank can't be set while attached */
2918 if (smmu_domain->smmu != NULL) {
2919 ret = -EBUSY;
2920 break;
2921 }
2922 /* ... and it can only be set for dynamic contexts. */
2923 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2924 ret = -EINVAL;
2925 break;
2926 }
2927
2928 /* this will be validated during attach */
2929 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2930 ret = 0;
2931 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002932 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2933 u32 non_fatal_faults = *((int *)data);
2934
2935 if (non_fatal_faults)
2936 smmu_domain->attributes |=
2937 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2938 else
2939 smmu_domain->attributes &=
2940 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2941 ret = 0;
2942 break;
2943 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002944 case DOMAIN_ATTR_S1_BYPASS: {
2945 int bypass = *((int *)data);
2946
2947 /* bypass can't be changed while attached */
2948 if (smmu_domain->smmu != NULL) {
2949 ret = -EBUSY;
2950 break;
2951 }
2952 if (bypass)
2953 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2954 else
2955 smmu_domain->attributes &=
2956 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2957
2958 ret = 0;
2959 break;
2960 }
Patrick Daly8befb662016-08-17 20:03:28 -07002961 case DOMAIN_ATTR_ATOMIC:
2962 {
2963 int atomic_ctx = *((int *)data);
2964
2965 /* can't be changed while attached */
2966 if (smmu_domain->smmu != NULL) {
2967 ret = -EBUSY;
2968 break;
2969 }
2970 if (atomic_ctx)
2971 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2972 else
2973 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2974 break;
2975 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002976 case DOMAIN_ATTR_SECURE_VMID:
2977 if (smmu_domain->secure_vmid != VMID_INVAL) {
2978 ret = -ENODEV;
2979 WARN(1, "secure vmid already set!");
2980 break;
2981 }
2982 smmu_domain->secure_vmid = *((int *)data);
2983 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002984 case DOMAIN_ATTR_FAST:
2985 if (*((int *)data))
2986 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2987 ret = 0;
2988 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002989 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2990 /* can't be changed while attached */
2991 if (smmu_domain->smmu != NULL) {
2992 ret = -EBUSY;
2993 break;
2994 }
2995 if (*((int *)data))
2996 smmu_domain->attributes |=
2997 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
2998 ret = 0;
2999 break;
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003000 case DOMAIN_ATTR_EARLY_MAP: {
3001 int early_map = *((int *)data);
3002
3003 ret = 0;
3004 if (early_map) {
3005 smmu_domain->attributes |=
3006 1 << DOMAIN_ATTR_EARLY_MAP;
3007 } else {
3008 if (smmu_domain->smmu)
3009 ret = arm_smmu_enable_s1_translations(
3010 smmu_domain);
3011
3012 if (!ret)
3013 smmu_domain->attributes &=
3014 ~(1 << DOMAIN_ATTR_EARLY_MAP);
3015 }
3016 break;
3017 }
Liam Mark53cf2342016-12-20 11:36:07 -08003018 case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
3019 int force_coherent = *((int *)data);
3020
3021 if (smmu_domain->smmu != NULL) {
3022 dev_err(smmu_domain->smmu->dev,
3023 "cannot change force coherent attribute while attached\n");
3024 ret = -EBUSY;
3025 break;
3026 }
3027
3028 if (force_coherent)
3029 smmu_domain->attributes |=
3030 1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
3031 else
3032 smmu_domain->attributes &=
3033 ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
3034
3035 ret = 0;
3036 break;
3037 }
3038
Charan Teja Reddyc682e472017-04-20 19:11:20 +05303039 case DOMAIN_ATTR_CB_STALL_DISABLE:
3040 if (*((int *)data))
3041 smmu_domain->attributes |=
3042 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
3043 ret = 0;
3044 break;
Will Deaconc752ce42014-06-25 22:46:31 +01003045 default:
Will Deacon518f7132014-11-14 17:17:54 +00003046 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01003047 }
Will Deacon518f7132014-11-14 17:17:54 +00003048
3049out_unlock:
3050 mutex_unlock(&smmu_domain->init_mutex);
3051 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01003052}
3053
Robin Murphy7e96c742016-09-14 15:26:46 +01003054static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
3055{
3056 u32 fwid = 0;
3057
3058 if (args->args_count > 0)
3059 fwid |= (u16)args->args[0];
3060
3061 if (args->args_count > 1)
3062 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
3063
3064 return iommu_fwspec_add_ids(dev, &fwid, 1);
3065}
3066
Patrick Dalyef6c1dc2016-11-16 14:35:23 -08003067static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
3068{
3069 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3070 struct arm_smmu_device *smmu = smmu_domain->smmu;
3071 void __iomem *cb_base;
3072 u32 reg;
3073 int ret;
3074
3075 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3076 ret = arm_smmu_power_on(smmu->pwr);
3077 if (ret)
3078 return ret;
3079
3080 reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3081 reg |= SCTLR_M;
3082
3083 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
3084 arm_smmu_power_off(smmu->pwr);
3085 return ret;
3086}
3087
Liam Mark3ba41cf2016-12-09 14:39:04 -08003088static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
3089 dma_addr_t iova)
3090{
3091 bool ret;
3092 unsigned long flags;
3093 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3094 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
3095
3096 if (!ops)
3097 return false;
3098
3099 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3100 ret = ops->is_iova_coherent(ops, iova);
3101 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3102 return ret;
3103}
3104
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003105static void arm_smmu_trigger_fault(struct iommu_domain *domain,
3106 unsigned long flags)
3107{
3108 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3109 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3110 struct arm_smmu_device *smmu;
3111 void __iomem *cb_base;
3112
3113 if (!smmu_domain->smmu) {
3114 pr_err("Can't trigger faults on non-attached domains\n");
3115 return;
3116 }
3117
3118 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003119 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003120 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003121
3122 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3123 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
3124 flags, cfg->cbndx);
3125 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07003126 /* give the interrupt time to fire... */
3127 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003128
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003129 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003130}
3131
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003132static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
3133 unsigned long offset)
3134{
3135 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3136 struct arm_smmu_device *smmu;
3137 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3138 void __iomem *cb_base;
3139 unsigned long val;
3140
3141 if (offset >= SZ_4K) {
3142 pr_err("Invalid offset: 0x%lx\n", offset);
3143 return 0;
3144 }
3145
3146 smmu = smmu_domain->smmu;
3147 if (!smmu) {
3148 WARN(1, "Can't read registers of a detached domain\n");
3149 val = 0;
3150 return val;
3151 }
3152
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003153 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003154 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003155
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003156 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3157 val = readl_relaxed(cb_base + offset);
3158
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003159 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003160 return val;
3161}
3162
3163static void arm_smmu_reg_write(struct iommu_domain *domain,
3164 unsigned long offset, unsigned long val)
3165{
3166 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3167 struct arm_smmu_device *smmu;
3168 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3169 void __iomem *cb_base;
3170
3171 if (offset >= SZ_4K) {
3172 pr_err("Invalid offset: 0x%lx\n", offset);
3173 return;
3174 }
3175
3176 smmu = smmu_domain->smmu;
3177 if (!smmu) {
3178 WARN(1, "Can't read registers of a detached domain\n");
3179 return;
3180 }
3181
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003182 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003183 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003184
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003185 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3186 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003187
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003188 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003189}
3190
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003191static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
3192{
3193 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
3194}
3195
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003196static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
3197{
3198 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3199
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003200 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003201}
3202
3203static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
3204{
3205 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3206
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003207 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003208}
3209
Will Deacon518f7132014-11-14 17:17:54 +00003210static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01003211 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01003212 .domain_alloc = arm_smmu_domain_alloc,
3213 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01003214 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07003215 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01003216 .map = arm_smmu_map,
3217 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07003218 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01003219 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07003220 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01003221 .add_device = arm_smmu_add_device,
3222 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02003223 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01003224 .domain_get_attr = arm_smmu_domain_get_attr,
3225 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01003226 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00003227 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07003228 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07003229 .reg_read = arm_smmu_reg_read,
3230 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08003231 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08003232 .enable_config_clocks = arm_smmu_enable_config_clocks,
3233 .disable_config_clocks = arm_smmu_disable_config_clocks,
Liam Mark3ba41cf2016-12-09 14:39:04 -08003234 .is_iova_coherent = arm_smmu_is_iova_coherent,
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -07003235 .iova_to_pte = arm_smmu_iova_to_pte,
Will Deacon45ae7cf2013-06-24 18:31:25 +01003236};
3237
Patrick Dalyad441dd2016-09-15 15:50:46 -07003238#define IMPL_DEF1_MICRO_MMU_CTRL 0
3239#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
3240#define MICRO_MMU_CTRL_IDLE (1 << 3)
3241
3242/* Definitions for implementation-defined registers */
3243#define ACTLR_QCOM_OSH_SHIFT 28
3244#define ACTLR_QCOM_OSH 1
3245
3246#define ACTLR_QCOM_ISH_SHIFT 29
3247#define ACTLR_QCOM_ISH 1
3248
3249#define ACTLR_QCOM_NSH_SHIFT 30
3250#define ACTLR_QCOM_NSH 1
3251
3252static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003253{
3254 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003255 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003256
3257 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
3258 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
3259 0, 30000)) {
3260 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
3261 return -EBUSY;
3262 }
3263
3264 return 0;
3265}
3266
Patrick Dalyad441dd2016-09-15 15:50:46 -07003267static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003268{
3269 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3270 u32 reg;
3271
3272 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3273 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3274 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3275
Patrick Dalyad441dd2016-09-15 15:50:46 -07003276 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003277}
3278
Patrick Dalyad441dd2016-09-15 15:50:46 -07003279static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003280{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003281 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003282}
3283
Patrick Dalyad441dd2016-09-15 15:50:46 -07003284static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003285{
Patrick Dalyad441dd2016-09-15 15:50:46 -07003286 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07003287}
3288
Patrick Dalyad441dd2016-09-15 15:50:46 -07003289static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07003290{
3291 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
3292 u32 reg;
3293
3294 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3295 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
3296 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
3297}
3298
Patrick Dalyad441dd2016-09-15 15:50:46 -07003299static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003300{
3301 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003302 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003303 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003304 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003305
Patrick Dalyad441dd2016-09-15 15:50:46 -07003306 /*
3307 * SCTLR.M must be disabled here per ARM SMMUv2 spec
3308 * to prevent table walks with an inconsistent state.
3309 */
3310 for (i = 0; i < smmu->num_context_banks; ++i) {
3311 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3312 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
3313 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
3314 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
3315 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
3316 }
3317
3318 /* Program implementation defined registers */
3319 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003320 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3321 writel_relaxed(regs[i].value,
3322 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003323 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003324}
3325
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003326static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3327 dma_addr_t iova)
Patrick Dalyad441dd2016-09-15 15:50:46 -07003328{
3329 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3330 struct arm_smmu_device *smmu = smmu_domain->smmu;
3331 int ret;
3332 phys_addr_t phys = 0;
3333 unsigned long flags;
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003334 u32 sctlr, sctlr_orig, fsr;
3335 void __iomem *cb_base;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003336
Patrick Dalyf7007672017-08-08 17:12:03 -07003337 if (smmu->model == QCOM_SMMUV2) {
3338 dev_err(smmu->dev, "ATOS support is disabled\n");
3339 return 0;
3340 }
3341
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003342 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003343 if (ret)
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003344 return ret;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003345
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003346 spin_lock_irqsave(&smmu->atos_lock, flags);
3347 cb_base = ARM_SMMU_CB_BASE(smmu) +
3348 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003349
3350 qsmmuv2_halt_nowait(smmu);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003351 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003352 qsmmuv2_wait_for_halt(smmu);
3353
3354 /* clear FSR to allow ATOS to log any faults */
3355 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3356 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3357
3358 /* disable stall mode momentarily */
3359 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3360 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3361 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3362
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003363 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003364
3365 /* restore SCTLR */
3366 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3367
3368 qsmmuv2_resume(smmu);
Patrick Daly2dd3e5b2017-01-25 14:49:42 -08003369 spin_unlock_irqrestore(&smmu->atos_lock, flags);
3370
3371 arm_smmu_power_off(smmu_domain->smmu->pwr);
3372 return phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07003373}
3374
3375struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3376 .device_reset = qsmmuv2_device_reset,
3377 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
Patrick Dalyad441dd2016-09-15 15:50:46 -07003378};
3379
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003380static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003381{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003382 int i;
3383 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003384 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003385 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003386
Peng Fan3ca37122016-05-03 21:50:30 +08003387 /*
3388 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3389 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3390 * bit is only present in MMU-500r2 onwards.
3391 */
3392 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3393 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3394 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3395 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3396 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3397 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3398 }
3399
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003400 /* Make sure all context banks are disabled and clear CB_FSR */
3401 for (i = 0; i < smmu->num_context_banks; ++i) {
3402 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3403 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3404 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003405 /*
3406 * Disable MMU-500's not-particularly-beneficial next-page
3407 * prefetcher for the sake of errata #841119 and #826419.
3408 */
3409 if (smmu->model == ARM_MMU500) {
3410 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3411 reg &= ~ARM_MMU500_ACTLR_CPRE;
3412 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3413 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003414 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003415}
3416
3417static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3418{
3419 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003420 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003421 u32 reg;
3422
3423 /* clear global FSR */
3424 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3425 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3426
Robin Murphy468f4942016-09-12 17:13:49 +01003427 /*
3428 * Reset stream mapping groups: Initial values mark all SMRn as
3429 * invalid and all S2CRn as bypass unless overridden.
3430 */
Patrick Daly59b6d202017-06-12 13:12:15 -07003431 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3432 for (i = 0; i < smmu->num_mapping_groups; ++i)
3433 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003434
Patrick Daly59b6d202017-06-12 13:12:15 -07003435 arm_smmu_context_bank_reset(smmu);
3436 }
Will Deacon1463fe42013-07-31 19:21:27 +01003437
Will Deacon45ae7cf2013-06-24 18:31:25 +01003438 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003439 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3440 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3441
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003442 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003443
Will Deacon45ae7cf2013-06-24 18:31:25 +01003444 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003445 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003446
3447 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003448 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003449
Robin Murphy25a1c962016-02-10 14:25:33 +00003450 /* Enable client access, handling unmatched streams as appropriate */
3451 reg &= ~sCR0_CLIENTPD;
3452 if (disable_bypass)
3453 reg |= sCR0_USFCFG;
3454 else
3455 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003456
3457 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003458 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003459
3460 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003461 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003462
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003463 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3464 reg |= sCR0_VMID16EN;
3465
Will Deacon45ae7cf2013-06-24 18:31:25 +01003466 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003467 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003468 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003469
3470 /* Manage any implementation defined features */
3471 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003472}
3473
3474static int arm_smmu_id_size_to_bits(int size)
3475{
3476 switch (size) {
3477 case 0:
3478 return 32;
3479 case 1:
3480 return 36;
3481 case 2:
3482 return 40;
3483 case 3:
3484 return 42;
3485 case 4:
3486 return 44;
3487 case 5:
3488 default:
3489 return 48;
3490 }
3491}
3492
Patrick Dalyda688822017-05-17 20:12:48 -07003493
3494/*
3495 * Some context banks needs to be transferred from bootloader to HLOS in a way
3496 * that allows ongoing traffic. The current expectation is that these context
3497 * banks operate in bypass mode.
3498 * Additionally, there must be exactly one device in devicetree with stream-ids
3499 * overlapping those used by the bootloader.
3500 */
3501static int arm_smmu_alloc_cb(struct iommu_domain *domain,
3502 struct arm_smmu_device *smmu,
3503 struct device *dev)
3504{
3505 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Patrick Dalye72526b2017-07-18 16:21:44 -07003506 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyda688822017-05-17 20:12:48 -07003507 u32 i, idx;
3508 int cb = -EINVAL;
3509 bool dynamic;
3510
Patrick Dalye72526b2017-07-18 16:21:44 -07003511 /*
3512 * Dynamic domains have already set cbndx through domain attribute.
3513 * Verify that they picked a valid value.
3514 */
Patrick Dalyda688822017-05-17 20:12:48 -07003515 dynamic = is_dynamic_domain(domain);
Patrick Dalye72526b2017-07-18 16:21:44 -07003516 if (dynamic) {
3517 cb = smmu_domain->cfg.cbndx;
3518 if (cb < smmu->num_context_banks)
3519 return cb;
3520 else
3521 return -EINVAL;
3522 }
Patrick Dalyda688822017-05-17 20:12:48 -07003523
3524 mutex_lock(&smmu->stream_map_mutex);
3525 for_each_cfg_sme(fwspec, i, idx) {
3526 if (smmu->s2crs[idx].cb_handoff)
3527 cb = smmu->s2crs[idx].cbndx;
3528 }
3529
3530 if (cb < 0) {
3531 mutex_unlock(&smmu->stream_map_mutex);
3532 return __arm_smmu_alloc_bitmap(smmu->context_map,
3533 smmu->num_s2_context_banks,
3534 smmu->num_context_banks);
3535 }
3536
3537 for (i = 0; i < smmu->num_mapping_groups; i++) {
Patrick Daly2eb31362017-06-14 18:29:36 -07003538 if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
Patrick Dalyda688822017-05-17 20:12:48 -07003539 smmu->s2crs[i].cb_handoff = false;
3540 smmu->s2crs[i].count -= 1;
3541 }
3542 }
3543 mutex_unlock(&smmu->stream_map_mutex);
3544
3545 return cb;
3546}
3547
3548static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
3549{
3550 u32 i, raw_smr, raw_s2cr;
3551 struct arm_smmu_smr smr;
3552 struct arm_smmu_s2cr s2cr;
3553
3554 for (i = 0; i < smmu->num_mapping_groups; i++) {
3555 raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3556 ARM_SMMU_GR0_SMR(i));
3557 if (!(raw_smr & SMR_VALID))
3558 continue;
3559
3560 smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3561 smr.id = (u16)raw_smr;
3562 smr.valid = true;
3563
3564 raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
3565 ARM_SMMU_GR0_S2CR(i));
Patrick Daly4ea4bdf2017-08-29 19:24:49 -07003566 memset(&s2cr, 0, sizeof(s2cr));
Patrick Dalyda688822017-05-17 20:12:48 -07003567 s2cr.group = NULL;
3568 s2cr.count = 1;
3569 s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
3570 s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
3571 S2CR_PRIVCFG_MASK;
3572 s2cr.cbndx = (u8)raw_s2cr;
3573 s2cr.cb_handoff = true;
3574
3575 if (s2cr.type != S2CR_TYPE_TRANS)
3576 continue;
3577
3578 smmu->smrs[i] = smr;
3579 smmu->s2crs[i] = s2cr;
3580 bitmap_set(smmu->context_map, s2cr.cbndx, 1);
3581 dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
3582 raw_smr, raw_s2cr, s2cr.cbndx);
3583 }
3584
3585 return 0;
3586}
3587
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003588static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3589{
3590 struct device *dev = smmu->dev;
3591 int i, ntuples, ret;
3592 u32 *tuples;
3593 struct arm_smmu_impl_def_reg *regs, *regit;
3594
3595 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3596 return 0;
3597
3598 ntuples /= sizeof(u32);
3599 if (ntuples % 2) {
3600 dev_err(dev,
3601 "Invalid number of attach-impl-defs registers: %d\n",
3602 ntuples);
3603 return -EINVAL;
3604 }
3605
3606 regs = devm_kmalloc(
3607 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3608 GFP_KERNEL);
3609 if (!regs)
3610 return -ENOMEM;
3611
3612 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3613 if (!tuples)
3614 return -ENOMEM;
3615
3616 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3617 tuples, ntuples);
3618 if (ret)
3619 return ret;
3620
3621 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3622 regit->offset = tuples[i];
3623 regit->value = tuples[i + 1];
3624 }
3625
3626 devm_kfree(dev, tuples);
3627
3628 smmu->impl_def_attach_registers = regs;
3629 smmu->num_impl_def_attach_registers = ntuples / 2;
3630
3631 return 0;
3632}
3633
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003634
3635static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003636{
3637 const char *cname;
3638 struct property *prop;
3639 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003640 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003641
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003642 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003643 of_property_count_strings(dev->of_node, "clock-names");
3644
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003645 if (pwr->num_clocks < 1) {
3646 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003647 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003648 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003649
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003650 pwr->clocks = devm_kzalloc(
3651 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003652 GFP_KERNEL);
3653
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003654 if (!pwr->clocks)
3655 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003656
3657 i = 0;
3658 of_property_for_each_string(dev->of_node, "clock-names",
3659 prop, cname) {
3660 struct clk *c = devm_clk_get(dev, cname);
3661
3662 if (IS_ERR(c)) {
3663 dev_err(dev, "Couldn't get clock: %s",
3664 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003665 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003666 }
3667
3668 if (clk_get_rate(c) == 0) {
3669 long rate = clk_round_rate(c, 1000);
3670
3671 clk_set_rate(c, rate);
3672 }
3673
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003674 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003675
3676 ++i;
3677 }
3678 return 0;
3679}
3680
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003681static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003682{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003683 const char *cname;
3684 struct property *prop;
3685 int i, ret = 0;
3686 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003687
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003688 pwr->num_gdscs =
3689 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3690
3691 if (pwr->num_gdscs < 1) {
3692 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003693 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003694 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003695
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003696 pwr->gdscs = devm_kzalloc(
3697 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3698
3699 if (!pwr->gdscs)
3700 return -ENOMEM;
3701
Prakash Guptafad87ca2017-05-16 12:13:02 +05303702 if (!of_property_read_u32(dev->of_node,
3703 "qcom,deferred-regulator-disable-delay",
3704 &(pwr->regulator_defer)))
3705 dev_info(dev, "regulator defer delay %d\n",
3706 pwr->regulator_defer);
3707
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003708 i = 0;
3709 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3710 prop, cname)
Patrick Daly86396be2017-04-17 18:08:45 -07003711 pwr->gdscs[i++].supply = cname;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003712
3713 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3714 return ret;
3715}
3716
3717static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3718{
3719 struct device *dev = pwr->dev;
3720
3721 /* We don't want the bus APIs to print an error message */
3722 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3723 dev_dbg(dev, "No bus scaling info\n");
3724 return 0;
3725 }
3726
3727 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3728 if (!pwr->bus_dt_data) {
3729 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3730 return -EINVAL;
3731 }
3732
3733 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3734 if (!pwr->bus_client) {
3735 dev_err(dev, "Bus client registration failed\n");
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003736 return -EINVAL;
3737 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003738
3739 return 0;
3740}
3741
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003742/*
3743 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3744 */
3745static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3746 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003747{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003748 struct arm_smmu_power_resources *pwr;
3749 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003750
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003751 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3752 if (!pwr)
3753 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003754
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003755 pwr->dev = &pdev->dev;
3756 pwr->pdev = pdev;
3757 mutex_init(&pwr->power_lock);
3758 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003759
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003760 ret = arm_smmu_init_clocks(pwr);
3761 if (ret)
3762 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003763
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003764 ret = arm_smmu_init_regulators(pwr);
3765 if (ret)
3766 return ERR_PTR(ret);
3767
3768 ret = arm_smmu_init_bus_scaling(pwr);
3769 if (ret)
3770 return ERR_PTR(ret);
3771
3772 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003773}
3774
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003775/*
Patrick Dalyabeee952017-04-13 18:14:59 -07003776 * Bus APIs are devm-safe.
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003777 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003778static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003779{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003780 msm_bus_scale_unregister_client(pwr->bus_client);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003781}
3782
Will Deacon45ae7cf2013-06-24 18:31:25 +01003783static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3784{
3785 unsigned long size;
3786 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3787 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003788 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003789 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003790
Mitchel Humpherysba822582015-10-20 11:37:41 -07003791 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3792 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003793 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003794
3795 /* ID0 */
3796 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003797
3798 /* Restrict available stages based on module parameter */
3799 if (force_stage == 1)
3800 id &= ~(ID0_S2TS | ID0_NTS);
3801 else if (force_stage == 2)
3802 id &= ~(ID0_S1TS | ID0_NTS);
3803
Will Deacon45ae7cf2013-06-24 18:31:25 +01003804 if (id & ID0_S1TS) {
3805 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003806 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003807 }
3808
3809 if (id & ID0_S2TS) {
3810 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003811 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003812 }
3813
3814 if (id & ID0_NTS) {
3815 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003816 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003817 }
3818
3819 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003820 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003821 dev_err(smmu->dev, "\tno translation support!\n");
3822 return -ENODEV;
3823 }
3824
Robin Murphyb7862e32016-04-13 18:13:03 +01003825 if ((id & ID0_S1TS) &&
3826 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003827 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003828 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003829 }
3830
Robin Murphybae2c2d2015-07-29 19:46:05 +01003831 /*
3832 * In order for DMA API calls to work properly, we must defer to what
3833 * the DT says about coherency, regardless of what the hardware claims.
3834 * Fortunately, this also opens up a workaround for systems where the
3835 * ID register value has ended up configured incorrectly.
3836 */
3837 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3838 cttw_reg = !!(id & ID0_CTTW);
3839 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003840 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003841 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003842 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003843 cttw_dt ? "" : "non-");
3844 if (cttw_dt != cttw_reg)
3845 dev_notice(smmu->dev,
3846 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003847
Robin Murphy53867802016-09-12 17:13:48 +01003848 /* Max. number of entries we have for stream matching/indexing */
3849 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3850 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003851 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003852 u32 smr;
Patrick Daly937de532016-12-12 18:44:09 -08003853 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003854
3855 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003856 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3857 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003858 dev_err(smmu->dev,
3859 "stream-matching supported, but no SMRs present!\n");
3860 return -ENODEV;
3861 }
3862
Robin Murphy53867802016-09-12 17:13:48 +01003863 /*
3864 * SMR.ID bits may not be preserved if the corresponding MASK
3865 * bits are set, so check each one separately. We can reject
3866 * masters later if they try to claim IDs outside these masks.
3867 */
Patrick Daly937de532016-12-12 18:44:09 -08003868 for (i = 0; i < size; i++) {
3869 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
3870 if (!(smr & SMR_VALID))
3871 break;
3872 }
3873 if (i == size) {
3874 dev_err(smmu->dev,
3875 "Unable to compute streamid_masks\n");
3876 return -ENODEV;
3877 }
3878
Robin Murphy53867802016-09-12 17:13:48 +01003879 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003880 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3881 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003882 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003883
Robin Murphy53867802016-09-12 17:13:48 +01003884 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
Patrick Daly937de532016-12-12 18:44:09 -08003885 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
3886 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy53867802016-09-12 17:13:48 +01003887 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003888
Robin Murphy468f4942016-09-12 17:13:49 +01003889 /* Zero-initialised to mark as invalid */
3890 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3891 GFP_KERNEL);
3892 if (!smmu->smrs)
3893 return -ENOMEM;
3894
Robin Murphy53867802016-09-12 17:13:48 +01003895 dev_notice(smmu->dev,
3896 "\tstream matching with %lu register groups, mask 0x%x",
3897 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003898 }
Robin Murphya754fd12016-09-12 17:13:50 +01003899 /* s2cr->type == 0 means translation, so initialise explicitly */
3900 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3901 GFP_KERNEL);
3902 if (!smmu->s2crs)
3903 return -ENOMEM;
3904 for (i = 0; i < size; i++)
3905 smmu->s2crs[i] = s2cr_init_val;
3906
Robin Murphy53867802016-09-12 17:13:48 +01003907 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003908 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003909
Robin Murphy7602b872016-04-28 17:12:09 +01003910 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3911 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3912 if (!(id & ID0_PTFS_NO_AARCH32S))
3913 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3914 }
3915
Will Deacon45ae7cf2013-06-24 18:31:25 +01003916 /* ID1 */
3917 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003918 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003919
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003920 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003921 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003922 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003923 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003924 dev_warn(smmu->dev,
3925 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3926 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003927
Will Deacon518f7132014-11-14 17:17:54 +00003928 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003929 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3930 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3931 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3932 return -ENODEV;
3933 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003934 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003935 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003936 /*
3937 * Cavium CN88xx erratum #27704.
3938 * Ensure ASID and VMID allocation is unique across all SMMUs in
3939 * the system.
3940 */
3941 if (smmu->model == CAVIUM_SMMUV2) {
3942 smmu->cavium_id_base =
3943 atomic_add_return(smmu->num_context_banks,
3944 &cavium_smmu_context_count);
3945 smmu->cavium_id_base -= smmu->num_context_banks;
3946 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003947
3948 /* ID2 */
3949 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3950 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003951 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003952
Will Deacon518f7132014-11-14 17:17:54 +00003953 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003954 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003955 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003956
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003957 if (id & ID2_VMID16)
3958 smmu->features |= ARM_SMMU_FEAT_VMID16;
3959
Robin Murphyf1d84542015-03-04 16:41:05 +00003960 /*
3961 * What the page table walker can address actually depends on which
3962 * descriptor format is in use, but since a) we don't know that yet,
3963 * and b) it can vary per context bank, this will have to do...
3964 */
3965 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3966 dev_warn(smmu->dev,
3967 "failed to set DMA mask for table walker\n");
3968
Robin Murphyb7862e32016-04-13 18:13:03 +01003969 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003970 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003971 if (smmu->version == ARM_SMMU_V1_64K)
3972 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003973 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003974 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003975 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003976 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003977 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003978 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003979 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003980 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003981 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003982 }
3983
Robin Murphy7602b872016-04-28 17:12:09 +01003984 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003985 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003986 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003987 if (smmu->features &
3988 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003989 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003990 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003991 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003992 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003993 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003994
Robin Murphyd5466352016-05-09 17:20:09 +01003995 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3996 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3997 else
3998 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003999 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01004000 smmu->pgsize_bitmap);
4001
Will Deacon518f7132014-11-14 17:17:54 +00004002
Will Deacon28d60072014-09-01 16:24:48 +01004003 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004004 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
4005 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004006
4007 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07004008 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
4009 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01004010
Will Deacon45ae7cf2013-06-24 18:31:25 +01004011 return 0;
4012}
4013
Robin Murphy67b65a32016-04-13 18:12:57 +01004014struct arm_smmu_match_data {
4015 enum arm_smmu_arch_version version;
4016 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004017 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01004018};
4019
Patrick Dalyd7476202016-09-08 18:23:28 -07004020#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
4021static struct arm_smmu_match_data name = { \
4022.version = ver, \
4023.model = imp, \
4024.arch_ops = ops, \
4025} \
Robin Murphy67b65a32016-04-13 18:12:57 +01004026
Patrick Daly1f8a2882016-09-12 17:32:05 -07004027struct arm_smmu_arch_ops qsmmuv500_arch_ops;
4028
Patrick Dalyd7476202016-09-08 18:23:28 -07004029ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
4030ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
4031ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
4032ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
4033ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07004034ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004035ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
4036 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01004037
Joerg Roedel09b52692014-10-02 12:24:45 +02004038static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01004039 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
4040 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
4041 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01004042 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01004043 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01004044 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07004045 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07004046 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01004047 { },
4048};
4049MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
4050
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004051
4052static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
4053{
4054 if (!dev->iommu_fwspec)
4055 of_iommu_configure(dev, dev->of_node);
4056 return 0;
4057}
4058
Patrick Daly000a2f22017-02-13 22:18:12 -08004059static int arm_smmu_add_device_fixup(struct device *dev, void *data)
4060{
4061 struct iommu_ops *ops = data;
4062
4063 ops->add_device(dev);
4064 return 0;
4065}
4066
Patrick Daly1f8a2882016-09-12 17:32:05 -07004067static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004068static int arm_smmu_device_dt_probe(struct platform_device *pdev)
4069{
Robin Murphy67b65a32016-04-13 18:12:57 +01004070 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004071 struct resource *res;
4072 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004073 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01004074 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01004075 bool legacy_binding;
4076
4077 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
4078 if (legacy_binding && !using_generic_binding) {
4079 if (!using_legacy_binding)
4080 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
4081 using_legacy_binding = true;
4082 } else if (!legacy_binding && !using_legacy_binding) {
4083 using_generic_binding = true;
4084 } else {
4085 dev_err(dev, "not probing due to mismatched DT properties\n");
4086 return -ENODEV;
4087 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004088
4089 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
4090 if (!smmu) {
4091 dev_err(dev, "failed to allocate arm_smmu_device\n");
4092 return -ENOMEM;
4093 }
4094 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08004095 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07004096 idr_init(&smmu->asid_idr);
4097 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004098
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004099 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01004100 smmu->version = data->version;
4101 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07004102 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01004103
Will Deacon45ae7cf2013-06-24 18:31:25 +01004104 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01004105 smmu->base = devm_ioremap_resource(dev, res);
4106 if (IS_ERR(smmu->base))
4107 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004108 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004109
4110 if (of_property_read_u32(dev->of_node, "#global-interrupts",
4111 &smmu->num_global_irqs)) {
4112 dev_err(dev, "missing #global-interrupts property\n");
4113 return -ENODEV;
4114 }
4115
4116 num_irqs = 0;
4117 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
4118 num_irqs++;
4119 if (num_irqs > smmu->num_global_irqs)
4120 smmu->num_context_irqs++;
4121 }
4122
Andreas Herrmann44a08de2013-10-01 13:39:07 +01004123 if (!smmu->num_context_irqs) {
4124 dev_err(dev, "found %d interrupts but expected at least %d\n",
4125 num_irqs, smmu->num_global_irqs + 1);
4126 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004127 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01004128
4129 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
4130 GFP_KERNEL);
4131 if (!smmu->irqs) {
4132 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
4133 return -ENOMEM;
4134 }
4135
4136 for (i = 0; i < num_irqs; ++i) {
4137 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07004138
Will Deacon45ae7cf2013-06-24 18:31:25 +01004139 if (irq < 0) {
4140 dev_err(dev, "failed to get irq index %d\n", i);
4141 return -ENODEV;
4142 }
4143 smmu->irqs[i] = irq;
4144 }
4145
Dhaval Patel031d7462015-05-09 14:47:29 -07004146 parse_driver_options(smmu);
4147
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004148 smmu->pwr = arm_smmu_init_power_resources(pdev);
4149 if (IS_ERR(smmu->pwr))
4150 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07004151
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004152 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004153 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004154 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004155
4156 err = arm_smmu_device_cfg_probe(smmu);
4157 if (err)
4158 goto out_power_off;
4159
Patrick Dalyda688822017-05-17 20:12:48 -07004160 err = arm_smmu_handoff_cbs(smmu);
4161 if (err)
4162 goto out_power_off;
4163
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004164 err = arm_smmu_parse_impl_def_registers(smmu);
4165 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004166 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07004167
Robin Murphyb7862e32016-04-13 18:13:03 +01004168 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01004169 smmu->num_context_banks != smmu->num_context_irqs) {
4170 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08004171 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
4172 smmu->num_context_irqs, smmu->num_context_banks,
4173 smmu->num_context_banks);
4174 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004175 }
4176
Will Deacon45ae7cf2013-06-24 18:31:25 +01004177 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08004178 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
4179 NULL, arm_smmu_global_fault,
4180 IRQF_ONESHOT | IRQF_SHARED,
4181 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004182 if (err) {
4183 dev_err(dev, "failed to request global IRQ %d (%u)\n",
4184 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01004185 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004186 }
4187 }
4188
Patrick Dalyd7476202016-09-08 18:23:28 -07004189 err = arm_smmu_arch_init(smmu);
4190 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01004191 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07004192
Robin Murphy06e393e2016-09-12 17:13:55 +01004193 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004194 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01004195 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004196 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07004197
Patrick Daly8e3371a2017-02-13 22:14:53 -08004198 INIT_LIST_HEAD(&smmu->list);
4199 spin_lock(&arm_smmu_devices_lock);
4200 list_add(&smmu->list, &arm_smmu_devices);
4201 spin_unlock(&arm_smmu_devices_lock);
4202
Patrick Dalyc47dcd42017-02-09 23:09:57 -08004203 /* bus_set_iommu depends on this. */
4204 bus_for_each_dev(&platform_bus_type, NULL, NULL,
4205 arm_smmu_of_iommu_configure_fixup);
4206
Robin Murphy7e96c742016-09-14 15:26:46 +01004207 /* Oh, for a proper bus abstraction */
4208 if (!iommu_present(&platform_bus_type))
4209 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
Patrick Daly000a2f22017-02-13 22:18:12 -08004210 else
4211 bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
4212 arm_smmu_add_device_fixup);
Robin Murphy7e96c742016-09-14 15:26:46 +01004213#ifdef CONFIG_ARM_AMBA
4214 if (!iommu_present(&amba_bustype))
4215 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
4216#endif
4217#ifdef CONFIG_PCI
4218 if (!iommu_present(&pci_bus_type)) {
4219 pci_request_acs();
4220 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
4221 }
4222#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01004223 return 0;
4224
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004225out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004226 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004227
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004228out_exit_power_resources:
4229 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07004230
Will Deacon45ae7cf2013-06-24 18:31:25 +01004231 return err;
4232}
4233
4234static int arm_smmu_device_remove(struct platform_device *pdev)
4235{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004236 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004237
4238 if (!smmu)
4239 return -ENODEV;
4240
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004241 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07004242 return -EINVAL;
4243
Will Deaconecfadb62013-07-31 19:21:28 +01004244 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01004245 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01004246
Patrick Dalyc190d932016-08-30 17:23:28 -07004247 idr_destroy(&smmu->asid_idr);
4248
Will Deacon45ae7cf2013-06-24 18:31:25 +01004249 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07004250 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004251 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07004252
Patrick Daly5b3d8c62016-11-01 15:34:11 -07004253 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07004254
Will Deacon45ae7cf2013-06-24 18:31:25 +01004255 return 0;
4256}
4257
Will Deacon45ae7cf2013-06-24 18:31:25 +01004258static struct platform_driver arm_smmu_driver = {
4259 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01004260 .name = "arm-smmu",
4261 .of_match_table = of_match_ptr(arm_smmu_of_match),
4262 },
4263 .probe = arm_smmu_device_dt_probe,
4264 .remove = arm_smmu_device_remove,
4265};
4266
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004267static struct platform_driver qsmmuv500_tbu_driver;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004268static int __init arm_smmu_init(void)
4269{
Robin Murphy7e96c742016-09-14 15:26:46 +01004270 static bool registered;
4271 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004272
Patrick Dalyf3ea404f2017-02-13 17:03:29 -08004273 if (registered)
4274 return 0;
4275
4276 ret = platform_driver_register(&qsmmuv500_tbu_driver);
4277 if (ret)
4278 return ret;
4279
4280 ret = platform_driver_register(&arm_smmu_driver);
4281 registered = !ret;
Robin Murphy7e96c742016-09-14 15:26:46 +01004282 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01004283}
4284
4285static void __exit arm_smmu_exit(void)
4286{
4287 return platform_driver_unregister(&arm_smmu_driver);
4288}
4289
Andreas Herrmannb1950b22013-10-01 13:39:05 +01004290subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01004291module_exit(arm_smmu_exit);
4292
Robin Murphy7e96c742016-09-14 15:26:46 +01004293static int __init arm_smmu_of_init(struct device_node *np)
4294{
4295 int ret = arm_smmu_init();
4296
4297 if (ret)
4298 return ret;
4299
4300 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
4301 return -ENODEV;
4302
4303 return 0;
4304}
4305IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
4306IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
4307IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
4308IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
4309IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
4310IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
Robin Murphy7e96c742016-09-14 15:26:46 +01004311
Patrick Dalya0fddb62017-03-27 19:26:59 -07004312#define TCU_HW_VERSION_HLOS1 (0x18)
4313
Patrick Daly1f8a2882016-09-12 17:32:05 -07004314#define DEBUG_SID_HALT_REG 0x0
4315#define DEBUG_SID_HALT_VAL (0x1 << 16)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004316#define DEBUG_SID_HALT_SID_MASK 0x3ff
4317
4318#define DEBUG_VA_ADDR_REG 0x8
4319
4320#define DEBUG_TXN_TRIGG_REG 0x18
4321#define DEBUG_TXN_AXPROT_SHIFT 6
4322#define DEBUG_TXN_AXCACHE_SHIFT 2
4323#define DEBUG_TRX_WRITE (0x1 << 1)
4324#define DEBUG_TXN_READ (0x0 << 1)
4325#define DEBUG_TXN_TRIGGER 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004326
4327#define DEBUG_SR_HALT_ACK_REG 0x20
4328#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004329#define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
4330
4331#define DEBUG_PAR_REG 0x28
4332#define DEBUG_PAR_PA_MASK ((0x1ULL << 36) - 1)
4333#define DEBUG_PAR_PA_SHIFT 12
4334#define DEBUG_PAR_FAULT_VAL 0x1
Patrick Daly1f8a2882016-09-12 17:32:05 -07004335
4336#define TBU_DBG_TIMEOUT_US 30000
4337
Patrick Daly95895ba2017-08-11 14:56:38 -07004338
4339struct actlr_setting {
4340 struct arm_smmu_smr smr;
4341 u32 actlr;
4342};
4343
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004344struct qsmmuv500_archdata {
4345 struct list_head tbus;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004346 void __iomem *tcu_base;
4347 u32 version;
Patrick Daly95895ba2017-08-11 14:56:38 -07004348
4349 struct actlr_setting *actlrs;
4350 u32 actlr_tbl_size;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004351};
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004352#define get_qsmmuv500_archdata(smmu) \
4353 ((struct qsmmuv500_archdata *)(smmu->archdata))
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004354
Patrick Daly1f8a2882016-09-12 17:32:05 -07004355struct qsmmuv500_tbu_device {
4356 struct list_head list;
4357 struct device *dev;
4358 struct arm_smmu_device *smmu;
4359 void __iomem *base;
4360 void __iomem *status_reg;
4361
4362 struct arm_smmu_power_resources *pwr;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004363 u32 sid_start;
4364 u32 num_sids;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004365
4366 /* Protects halt count */
4367 spinlock_t halt_lock;
4368 u32 halt_count;
4369};
4370
Patrick Daly95895ba2017-08-11 14:56:38 -07004371struct qsmmuv500_group_iommudata {
4372 bool has_actlr;
4373 u32 actlr;
4374};
4375#define to_qsmmuv500_group_iommudata(group) \
4376 ((struct qsmmuv500_group_iommudata *) \
4377 (iommu_group_get_iommudata(group)))
4378
Patrick Daly1f8a2882016-09-12 17:32:05 -07004379static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
4380{
4381 unsigned long flags;
4382 u32 val;
4383 void __iomem *base;
4384
4385 spin_lock_irqsave(&tbu->halt_lock, flags);
4386 if (tbu->halt_count) {
4387 tbu->halt_count++;
4388 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4389 return 0;
4390 }
4391
4392 base = tbu->base;
4393 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4394 val |= DEBUG_SID_HALT_VAL;
4395 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4396
4397 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
4398 val, (val & DEBUG_SR_HALT_ACK_VAL),
4399 0, TBU_DBG_TIMEOUT_US)) {
4400 dev_err(tbu->dev, "Couldn't halt TBU!\n");
4401 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4402 return -ETIMEDOUT;
4403 }
4404
4405 tbu->halt_count = 1;
4406 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4407 return 0;
4408}
4409
4410static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
4411{
4412 unsigned long flags;
4413 u32 val;
4414 void __iomem *base;
4415
4416 spin_lock_irqsave(&tbu->halt_lock, flags);
4417 if (!tbu->halt_count) {
4418 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
4419 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4420 return;
4421
4422 } else if (tbu->halt_count > 1) {
4423 tbu->halt_count--;
4424 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4425 return;
4426 }
4427
4428 base = tbu->base;
4429 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4430 val &= ~DEBUG_SID_HALT_VAL;
4431 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4432
4433 tbu->halt_count = 0;
4434 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4435}
4436
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004437static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
4438 struct arm_smmu_device *smmu, u32 sid)
4439{
4440 struct qsmmuv500_tbu_device *tbu = NULL;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004441 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004442
4443 list_for_each_entry(tbu, &data->tbus, list) {
4444 if (tbu->sid_start <= sid &&
4445 sid < tbu->sid_start + tbu->num_sids)
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004446 return tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004447 }
Patrick Dalyf8ac0fb2017-04-05 18:50:00 -07004448 return NULL;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004449}
4450
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004451static int qsmmuv500_ecats_lock(struct arm_smmu_domain *smmu_domain,
4452 struct qsmmuv500_tbu_device *tbu,
4453 unsigned long *flags)
4454{
4455 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004456 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004457 u32 val;
4458
4459 spin_lock_irqsave(&smmu->atos_lock, *flags);
4460 /* The status register is not accessible on version 1.0 */
4461 if (data->version == 0x01000000)
4462 return 0;
4463
4464 if (readl_poll_timeout_atomic(tbu->status_reg,
4465 val, (val == 0x1), 0,
4466 TBU_DBG_TIMEOUT_US)) {
4467 dev_err(tbu->dev, "ECATS hw busy!\n");
4468 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4469 return -ETIMEDOUT;
4470 }
4471
4472 return 0;
4473}
4474
4475static void qsmmuv500_ecats_unlock(struct arm_smmu_domain *smmu_domain,
4476 struct qsmmuv500_tbu_device *tbu,
4477 unsigned long *flags)
4478{
4479 struct arm_smmu_device *smmu = tbu->smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004480 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004481
4482 /* The status register is not accessible on version 1.0 */
4483 if (data->version != 0x01000000)
4484 writel_relaxed(0, tbu->status_reg);
4485 spin_unlock_irqrestore(&smmu->atos_lock, *flags);
4486}
4487
4488/*
4489 * Zero means failure.
4490 */
4491static phys_addr_t qsmmuv500_iova_to_phys(
4492 struct iommu_domain *domain, dma_addr_t iova, u32 sid)
4493{
4494 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4495 struct arm_smmu_device *smmu = smmu_domain->smmu;
4496 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
4497 struct qsmmuv500_tbu_device *tbu;
4498 int ret;
4499 phys_addr_t phys = 0;
4500 u64 val, fsr;
4501 unsigned long flags;
4502 void __iomem *cb_base;
4503 u32 sctlr_orig, sctlr;
4504 int needs_redo = 0;
4505
4506 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
4507 tbu = qsmmuv500_find_tbu(smmu, sid);
4508 if (!tbu)
4509 return 0;
4510
4511 ret = arm_smmu_power_on(tbu->pwr);
4512 if (ret)
4513 return 0;
4514
4515 /*
4516 * Disable client transactions & wait for existing operations to
4517 * complete.
4518 */
4519 ret = qsmmuv500_tbu_halt(tbu);
4520 if (ret)
4521 goto out_power_off;
4522
4523 /* Only one concurrent atos operation */
4524 ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
4525 if (ret)
4526 goto out_resume;
4527
4528 /*
4529 * We can be called from an interrupt handler with FSR already set
4530 * so terminate the faulting transaction prior to starting ecats.
4531 * No new racing faults can occur since we in the halted state.
4532 * ECATS can trigger the fault interrupt, so disable it temporarily
4533 * and check for an interrupt manually.
4534 */
4535 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4536 if (fsr & FSR_FAULT) {
4537 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
4538 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4539 }
4540 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
4541 sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
4542 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
4543
4544redo:
4545 /* Set address and stream-id */
4546 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
4547 val |= sid & DEBUG_SID_HALT_SID_MASK;
4548 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
4549 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
4550
4551 /*
4552 * Write-back Read and Write-Allocate
4553 * Priviledged, nonsecure, data transaction
4554 * Read operation.
4555 */
4556 val = 0xF << DEBUG_TXN_AXCACHE_SHIFT;
4557 val |= 0x3 << DEBUG_TXN_AXPROT_SHIFT;
4558 val |= DEBUG_TXN_TRIGGER;
4559 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
4560
4561 ret = 0;
4562 if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG,
4563 val, !(val & DEBUG_SR_ECATS_RUNNING_VAL),
4564 0, TBU_DBG_TIMEOUT_US)) {
4565 dev_err(tbu->dev, "ECATS translation timed out!\n");
4566 }
4567
4568 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
4569 if (fsr & FSR_FAULT) {
4570 dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
4571 val);
4572 ret = -EINVAL;
4573
4574 writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
4575 /*
4576 * Clear pending interrupts
4577 * Barrier required to ensure that the FSR is cleared
4578 * before resuming SMMU operation
4579 */
4580 wmb();
4581 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
4582 }
4583
4584 val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
4585 if (val & DEBUG_PAR_FAULT_VAL) {
4586 dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
4587 val);
4588 ret = -EINVAL;
4589 }
4590
4591 phys = (val >> DEBUG_PAR_PA_SHIFT) & DEBUG_PAR_PA_MASK;
4592 if (ret < 0)
4593 phys = 0;
4594
4595 /* Reset hardware */
4596 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
4597 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
4598
4599 /*
4600 * After a failed translation, the next successful translation will
4601 * incorrectly be reported as a failure.
4602 */
4603 if (!phys && needs_redo++ < 2)
4604 goto redo;
4605
4606 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
4607 qsmmuv500_ecats_unlock(smmu_domain, tbu, &flags);
4608
4609out_resume:
4610 qsmmuv500_tbu_resume(tbu);
4611
4612out_power_off:
4613 arm_smmu_power_off(tbu->pwr);
4614
4615 return phys;
4616}
4617
4618static phys_addr_t qsmmuv500_iova_to_phys_hard(
4619 struct iommu_domain *domain, dma_addr_t iova)
4620{
4621 u16 sid;
4622 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
4623 struct iommu_fwspec *fwspec;
4624
4625 /* Select a sid */
4626 fwspec = smmu_domain->dev->iommu_fwspec;
4627 sid = (u16)fwspec->ids[0];
4628
4629 return qsmmuv500_iova_to_phys(domain, iova, sid);
4630}
4631
Patrick Daly95895ba2017-08-11 14:56:38 -07004632static void qsmmuv500_release_group_iommudata(void *data)
4633{
4634 kfree(data);
4635}
4636
4637/* If a device has a valid actlr, it must match */
4638static int qsmmuv500_device_group(struct device *dev,
4639 struct iommu_group *group)
4640{
4641 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
4642 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
4643 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4644 struct qsmmuv500_group_iommudata *iommudata;
4645 u32 actlr, i, j, idx;
4646 struct arm_smmu_smr *smr, *smr2;
4647
4648 iommudata = to_qsmmuv500_group_iommudata(group);
4649 if (!iommudata) {
4650 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
4651 if (!iommudata)
4652 return -ENOMEM;
4653
4654 iommu_group_set_iommudata(group, iommudata,
4655 qsmmuv500_release_group_iommudata);
4656 }
4657
4658 for_each_cfg_sme(fwspec, i, idx) {
4659 smr = &smmu->smrs[idx];
4660 for (j = 0; j < data->actlr_tbl_size; j++) {
4661 smr2 = &data->actlrs[j].smr;
4662 actlr = data->actlrs[j].actlr;
4663
4664 /* Continue if table entry does not match */
4665 if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
4666 continue;
4667
4668 if (!iommudata->has_actlr) {
4669 iommudata->actlr = actlr;
4670 iommudata->has_actlr = true;
4671 } else if (iommudata->actlr != actlr) {
4672 return -EINVAL;
4673 }
4674 }
4675 }
4676
4677 return 0;
4678}
4679
4680static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
4681 struct device *dev)
4682{
4683 struct arm_smmu_device *smmu = smmu_domain->smmu;
4684 struct qsmmuv500_group_iommudata *iommudata =
4685 to_qsmmuv500_group_iommudata(dev->iommu_group);
4686 void __iomem *cb_base;
4687 const struct iommu_gather_ops *tlb;
4688
4689 if (!iommudata->has_actlr)
4690 return;
4691
4692 tlb = smmu_domain->pgtbl_cfg.tlb;
4693 cb_base = ARM_SMMU_CB_BASE(smmu) +
4694 ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
4695
4696 writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR);
4697
4698 /*
4699 * Flush the context bank after modifying ACTLR to ensure there
4700 * are no cache entries with stale state
4701 */
4702 tlb->tlb_flush_all(smmu_domain);
4703}
4704
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004705static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004706{
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004707 struct arm_smmu_device *smmu = cookie;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004708 struct qsmmuv500_tbu_device *tbu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004709 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004710
4711 if (!dev->driver) {
4712 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4713 return -EINVAL;
4714 }
4715
4716 tbu = dev_get_drvdata(dev);
4717
4718 INIT_LIST_HEAD(&tbu->list);
4719 tbu->smmu = smmu;
Patrick Dalye15b3bc2017-04-05 14:53:59 -07004720 list_add(&tbu->list, &data->tbus);
Patrick Daly1f8a2882016-09-12 17:32:05 -07004721 return 0;
4722}
4723
Patrick Daly95895ba2017-08-11 14:56:38 -07004724static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
4725{
4726 int len, i;
4727 struct device *dev = smmu->dev;
4728 struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
4729 struct actlr_setting *actlrs;
4730 const __be32 *cell;
4731
4732 cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
4733 if (!cell)
4734 return 0;
4735
4736 len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
4737 sizeof(u32) * 3);
4738 if (len < 0)
4739 return 0;
4740
4741 actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
4742 if (!actlrs)
4743 return -ENOMEM;
4744
4745 for (i = 0; i < len; i++) {
4746 actlrs[i].smr.id = of_read_number(cell++, 1);
4747 actlrs[i].smr.mask = of_read_number(cell++, 1);
4748 actlrs[i].actlr = of_read_number(cell++, 1);
4749 }
4750
4751 data->actlrs = actlrs;
4752 data->actlr_tbl_size = len;
4753 return 0;
4754}
4755
Patrick Daly1f8a2882016-09-12 17:32:05 -07004756static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4757{
Patrick Dalya0fddb62017-03-27 19:26:59 -07004758 struct resource *res;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004759 struct device *dev = smmu->dev;
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004760 struct qsmmuv500_archdata *data;
Patrick Dalya0fddb62017-03-27 19:26:59 -07004761 struct platform_device *pdev;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004762 int ret;
Patrick Daly95895ba2017-08-11 14:56:38 -07004763 u32 val;
4764 void __iomem *reg;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004765
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004766 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
4767 if (!data)
Patrick Daly1f8a2882016-09-12 17:32:05 -07004768 return -ENOMEM;
4769
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004770 INIT_LIST_HEAD(&data->tbus);
Patrick Dalya0fddb62017-03-27 19:26:59 -07004771
4772 pdev = container_of(dev, struct platform_device, dev);
4773 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base");
4774 data->tcu_base = devm_ioremap_resource(dev, res);
4775 if (IS_ERR(data->tcu_base))
4776 return PTR_ERR(data->tcu_base);
4777
4778 data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
Patrick Daly6b290f1e2017-03-27 19:26:59 -07004779 smmu->archdata = data;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004780
Patrick Daly95895ba2017-08-11 14:56:38 -07004781 ret = qsmmuv500_read_actlr_tbl(smmu);
4782 if (ret)
4783 return ret;
4784
4785 reg = ARM_SMMU_GR0(smmu);
4786 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
4787 val &= ~ARM_MMU500_ACR_CACHE_LOCK;
4788 writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
4789 val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
4790 /*
4791 * Modifiying the nonsecure copy of the sACR register is only
4792 * allowed if permission is given in the secure sACR register.
4793 * Attempt to detect if we were able to update the value.
4794 */
4795 WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
4796
4797
Patrick Daly1f8a2882016-09-12 17:32:05 -07004798 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4799 if (ret)
4800 return ret;
4801
4802 /* Attempt to register child devices */
4803 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
4804 if (ret)
Patrick Daly6ce54262017-04-12 21:24:06 -07004805 return -EPROBE_DEFER;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004806
4807 return 0;
4808}
4809
4810struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
4811 .init = qsmmuv500_arch_init,
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004812 .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
Patrick Daly95895ba2017-08-11 14:56:38 -07004813 .init_context_bank = qsmmuv500_init_cb,
4814 .device_group = qsmmuv500_device_group,
Patrick Daly1f8a2882016-09-12 17:32:05 -07004815};
4816
4817static const struct of_device_id qsmmuv500_tbu_of_match[] = {
4818 {.compatible = "qcom,qsmmuv500-tbu"},
4819 {}
4820};
4821
4822static int qsmmuv500_tbu_probe(struct platform_device *pdev)
4823{
4824 struct resource *res;
4825 struct device *dev = &pdev->dev;
4826 struct qsmmuv500_tbu_device *tbu;
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004827 const __be32 *cell;
4828 int len;
Patrick Daly1f8a2882016-09-12 17:32:05 -07004829
4830 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
4831 if (!tbu)
4832 return -ENOMEM;
4833
4834 INIT_LIST_HEAD(&tbu->list);
4835 tbu->dev = dev;
4836 spin_lock_init(&tbu->halt_lock);
4837
4838 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
4839 tbu->base = devm_ioremap_resource(dev, res);
4840 if (IS_ERR(tbu->base))
4841 return PTR_ERR(tbu->base);
4842
4843 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
4844 tbu->status_reg = devm_ioremap_resource(dev, res);
4845 if (IS_ERR(tbu->status_reg))
4846 return PTR_ERR(tbu->status_reg);
4847
Patrick Daly63b0e2c2016-11-01 16:58:57 -07004848 cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
4849 if (!cell || len < 8)
4850 return -EINVAL;
4851
4852 tbu->sid_start = of_read_number(cell, 1);
4853 tbu->num_sids = of_read_number(cell + 1, 1);
4854
Patrick Daly1f8a2882016-09-12 17:32:05 -07004855 tbu->pwr = arm_smmu_init_power_resources(pdev);
4856 if (IS_ERR(tbu->pwr))
4857 return PTR_ERR(tbu->pwr);
4858
4859 dev_set_drvdata(dev, tbu);
4860 return 0;
4861}
4862
4863static struct platform_driver qsmmuv500_tbu_driver = {
4864 .driver = {
4865 .name = "qsmmuv500-tbu",
4866 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
4867 },
4868 .probe = qsmmuv500_tbu_probe,
4869};
4870
Will Deacon45ae7cf2013-06-24 18:31:25 +01004871MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
4872MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
4873MODULE_LICENSE("GPL v2");