blob: 55a90922d7b7c6afea8d61b463aa14e529658bf1 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010044#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010045#include <linux/platform_device.h>
46#include <linux/slab.h>
47#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070048#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070049#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070050#include <linux/msm-bus.h>
51#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010052
53#include <linux/amba/bus.h>
54
Will Deacon518f7132014-11-14 17:17:54 +000055#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010056
57/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020058#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010059
60/* Maximum number of context banks per SMMU */
61#define ARM_SMMU_MAX_CBS 128
62
Will Deacon45ae7cf2013-06-24 18:31:25 +010063/* SMMU global address space */
64#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010065#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010066
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000067/*
68 * SMMU global address space with conditional offset to access secure
69 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
70 * nsGFSYNR0: 0x450)
71 */
72#define ARM_SMMU_GR0_NS(smmu) \
73 ((smmu)->base + \
74 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
75 ? 0x400 : 0))
76
Robin Murphyf9a05f02016-04-13 18:13:01 +010077/*
78 * Some 64-bit registers only make sense to write atomically, but in such
79 * cases all the data relevant to AArch32 formats lies within the lower word,
80 * therefore this actually makes more sense than it might first appear.
81 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010085#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010086#endif
87
Will Deacon45ae7cf2013-06-24 18:31:25 +010088/* Configuration registers */
89#define ARM_SMMU_GR0_sCR0 0x0
90#define sCR0_CLIENTPD (1 << 0)
91#define sCR0_GFRE (1 << 1)
92#define sCR0_GFIE (1 << 2)
93#define sCR0_GCFGFRE (1 << 4)
94#define sCR0_GCFGFIE (1 << 5)
95#define sCR0_USFCFG (1 << 10)
96#define sCR0_VMIDPNE (1 << 11)
97#define sCR0_PTM (1 << 12)
98#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080099#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100100#define sCR0_BSU_SHIFT 14
101#define sCR0_BSU_MASK 0x3
102
Peng Fan3ca37122016-05-03 21:50:30 +0800103/* Auxiliary Configuration register */
104#define ARM_SMMU_GR0_sACR 0x10
105
Will Deacon45ae7cf2013-06-24 18:31:25 +0100106/* Identification registers */
107#define ARM_SMMU_GR0_ID0 0x20
108#define ARM_SMMU_GR0_ID1 0x24
109#define ARM_SMMU_GR0_ID2 0x28
110#define ARM_SMMU_GR0_ID3 0x2c
111#define ARM_SMMU_GR0_ID4 0x30
112#define ARM_SMMU_GR0_ID5 0x34
113#define ARM_SMMU_GR0_ID6 0x38
114#define ARM_SMMU_GR0_ID7 0x3c
115#define ARM_SMMU_GR0_sGFSR 0x48
116#define ARM_SMMU_GR0_sGFSYNR0 0x50
117#define ARM_SMMU_GR0_sGFSYNR1 0x54
118#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100119
120#define ID0_S1TS (1 << 30)
121#define ID0_S2TS (1 << 29)
122#define ID0_NTS (1 << 28)
123#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000124#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100125#define ID0_PTFS_NO_AARCH32 (1 << 25)
126#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100127#define ID0_CTTW (1 << 14)
128#define ID0_NUMIRPT_SHIFT 16
129#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700130#define ID0_NUMSIDB_SHIFT 9
131#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100132#define ID0_NUMSMRG_SHIFT 0
133#define ID0_NUMSMRG_MASK 0xff
134
135#define ID1_PAGESIZE (1 << 31)
136#define ID1_NUMPAGENDXB_SHIFT 28
137#define ID1_NUMPAGENDXB_MASK 7
138#define ID1_NUMS2CB_SHIFT 16
139#define ID1_NUMS2CB_MASK 0xff
140#define ID1_NUMCB_SHIFT 0
141#define ID1_NUMCB_MASK 0xff
142
143#define ID2_OAS_SHIFT 4
144#define ID2_OAS_MASK 0xf
145#define ID2_IAS_SHIFT 0
146#define ID2_IAS_MASK 0xf
147#define ID2_UBS_SHIFT 8
148#define ID2_UBS_MASK 0xf
149#define ID2_PTFS_4K (1 << 12)
150#define ID2_PTFS_16K (1 << 13)
151#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800152#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153
Peng Fan3ca37122016-05-03 21:50:30 +0800154#define ID7_MAJOR_SHIFT 4
155#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158#define ARM_SMMU_GR0_TLBIVMID 0x64
159#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
160#define ARM_SMMU_GR0_TLBIALLH 0x6c
161#define ARM_SMMU_GR0_sTLBGSYNC 0x70
162#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
163#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800164#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165
166/* Stream mapping registers */
167#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
168#define SMR_VALID (1 << 31)
169#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
181/* Context bank attribute registers */
182#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
183#define CBAR_VMID_SHIFT 0
184#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000185#define CBAR_S1_BPSHCFG_SHIFT 8
186#define CBAR_S1_BPSHCFG_MASK 3
187#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100188#define CBAR_S1_MEMATTR_SHIFT 12
189#define CBAR_S1_MEMATTR_MASK 0xf
190#define CBAR_S1_MEMATTR_WB 0xf
191#define CBAR_TYPE_SHIFT 16
192#define CBAR_TYPE_MASK 0x3
193#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
194#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
195#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
196#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
197#define CBAR_IRPTNDX_SHIFT 24
198#define CBAR_IRPTNDX_MASK 0xff
199
Shalaj Jain04059c52015-03-03 13:34:59 -0800200#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
201#define CBFRSYNRA_SID_MASK (0xffff)
202
Will Deacon45ae7cf2013-06-24 18:31:25 +0100203#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
204#define CBA2R_RW64_32BIT (0 << 0)
205#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800206#define CBA2R_VMID_SHIFT 16
207#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100208
209/* Translation context bank */
210#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100211#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
213#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100214#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215#define ARM_SMMU_CB_RESUME 0x8
216#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100217#define ARM_SMMU_CB_TTBR0 0x20
218#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600220#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100221#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000222#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100223#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700225#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100226#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000228#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100229#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000230#define ARM_SMMU_CB_S1_TLBIVAL 0x620
231#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
232#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700233#define ARM_SMMU_CB_TLBSYNC 0x7f0
234#define ARM_SMMU_CB_TLBSTATUS 0x7f4
235#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100236#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000237#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100238
239#define SCTLR_S1_ASIDPNE (1 << 12)
240#define SCTLR_CFCFG (1 << 7)
241#define SCTLR_CFIE (1 << 6)
242#define SCTLR_CFRE (1 << 5)
243#define SCTLR_E (1 << 4)
244#define SCTLR_AFE (1 << 2)
245#define SCTLR_TRE (1 << 1)
246#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100247
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100248#define ARM_MMU500_ACTLR_CPRE (1 << 1)
249
Peng Fan3ca37122016-05-03 21:50:30 +0800250#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
251
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700252#define ARM_SMMU_IMPL_DEF0(smmu) \
253 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
254#define ARM_SMMU_IMPL_DEF1(smmu) \
255 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000256#define CB_PAR_F (1 << 0)
257
258#define ATSR_ACTIVE (1 << 0)
259
Will Deacon45ae7cf2013-06-24 18:31:25 +0100260#define RESUME_RETRY (0 << 0)
261#define RESUME_TERMINATE (1 << 0)
262
Will Deacon45ae7cf2013-06-24 18:31:25 +0100263#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100264#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100265
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100266#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267
268#define FSR_MULTI (1 << 31)
269#define FSR_SS (1 << 30)
270#define FSR_UUT (1 << 8)
271#define FSR_ASF (1 << 7)
272#define FSR_TLBLKF (1 << 6)
273#define FSR_TLBMCF (1 << 5)
274#define FSR_EF (1 << 4)
275#define FSR_PF (1 << 3)
276#define FSR_AFF (1 << 2)
277#define FSR_TF (1 << 1)
278
Mitchel Humpherys29073202014-07-08 09:52:18 -0700279#define FSR_IGN (FSR_AFF | FSR_ASF | \
280 FSR_TLBMCF | FSR_TLBLKF)
281#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100282 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100283
284#define FSYNR0_WNR (1 << 4)
285
Will Deacon4cf740b2014-07-14 19:47:39 +0100286static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000287module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100288MODULE_PARM_DESC(force_stage,
289 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800290static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000291module_param(disable_bypass, bool, S_IRUGO);
292MODULE_PARM_DESC(disable_bypass,
293 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100294
Robin Murphy09360402014-08-28 17:51:59 +0100295enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100296 ARM_SMMU_V1,
297 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100298 ARM_SMMU_V2,
299};
300
Robin Murphy67b65a32016-04-13 18:12:57 +0100301enum arm_smmu_implementation {
302 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100303 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100304 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700305 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700306 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100307};
308
Patrick Dalyd7476202016-09-08 18:23:28 -0700309struct arm_smmu_device;
310struct arm_smmu_arch_ops {
311 int (*init)(struct arm_smmu_device *smmu);
312 void (*device_reset)(struct arm_smmu_device *smmu);
313 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
314 dma_addr_t iova);
315 void (*iova_to_phys_fault)(struct iommu_domain *domain,
316 dma_addr_t iova, phys_addr_t *phys1,
317 phys_addr_t *phys_post_tlbiall);
318};
319
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700320struct arm_smmu_impl_def_reg {
321 u32 offset;
322 u32 value;
323};
324
Will Deacon45ae7cf2013-06-24 18:31:25 +0100325struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100326 u16 mask;
327 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100328 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100329};
330
Will Deacona9a1b0b2014-05-01 18:05:08 +0100331struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100332 int num_streamids;
333 u16 streamids[MAX_MASTER_STREAMIDS];
Robin Murphy468f4942016-09-12 17:13:49 +0100334 s16 smendx[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335};
Robin Murphy468f4942016-09-12 17:13:49 +0100336#define INVALID_SMENDX -1
Will Deacon45ae7cf2013-06-24 18:31:25 +0100337
Will Deacona9a1b0b2014-05-01 18:05:08 +0100338struct arm_smmu_master {
339 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100340 struct rb_node node;
341 struct arm_smmu_master_cfg cfg;
342};
343
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700344/*
345 * Describes resources required for on/off power operation.
346 * Separate reference count is provided for atomic/nonatomic
347 * operations.
348 */
349struct arm_smmu_power_resources {
350 struct platform_device *pdev;
351 struct device *dev;
352
353 struct clk **clocks;
354 int num_clocks;
355
356 struct regulator_bulk_data *gdscs;
357 int num_gdscs;
358
359 uint32_t bus_client;
360 struct msm_bus_scale_pdata *bus_dt_data;
361
362 /* Protects power_count */
363 struct mutex power_lock;
364 int power_count;
365
366 /* Protects clock_refs_count */
367 spinlock_t clock_refs_lock;
368 int clock_refs_count;
369};
370
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371struct arm_smmu_device {
372 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100373
374 void __iomem *base;
375 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100376 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100377
378#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
379#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
380#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
381#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
382#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000383#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800384#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100385#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
386#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
387#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
388#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
389#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100390 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000391
392#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800393#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800394#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700395#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000396 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100397 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100398 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100399
400 u32 num_context_banks;
401 u32 num_s2_context_banks;
402 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
403 atomic_t irptndx;
404
405 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100406 u16 streamid_mask;
407 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100408 struct arm_smmu_smr *smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100409
Will Deacon518f7132014-11-14 17:17:54 +0000410 unsigned long va_size;
411 unsigned long ipa_size;
412 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100413 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100414
415 u32 num_global_irqs;
416 u32 num_context_irqs;
417 unsigned int *irqs;
418
Will Deacon45ae7cf2013-06-24 18:31:25 +0100419 struct list_head list;
420 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800421
422 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700423 /* Specific to QCOM */
424 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
425 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800426
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700427 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700428
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800429 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700430
431 /* protects idr */
432 struct mutex idr_mutex;
433 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700434
435 struct arm_smmu_arch_ops *arch_ops;
436 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100437};
438
Robin Murphy7602b872016-04-28 17:12:09 +0100439enum arm_smmu_context_fmt {
440 ARM_SMMU_CTX_FMT_NONE,
441 ARM_SMMU_CTX_FMT_AARCH64,
442 ARM_SMMU_CTX_FMT_AARCH32_L,
443 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100444};
445
446struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100447 u8 cbndx;
448 u8 irptndx;
449 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600450 u32 procid;
451 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100452 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100453};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100454#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600455#define INVALID_CBNDX 0xff
456#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700457/*
458 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
459 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
460 */
461#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100462
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600463#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800464#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100465
Will Deaconc752ce42014-06-25 22:46:31 +0100466enum arm_smmu_domain_stage {
467 ARM_SMMU_DOMAIN_S1 = 0,
468 ARM_SMMU_DOMAIN_S2,
469 ARM_SMMU_DOMAIN_NESTED,
470};
471
Patrick Dalyc11d1082016-09-01 15:52:44 -0700472struct arm_smmu_pte_info {
473 void *virt_addr;
474 size_t size;
475 struct list_head entry;
476};
477
Will Deacon45ae7cf2013-06-24 18:31:25 +0100478struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100479 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000480 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700481 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000482 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100483 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100484 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000485 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700486 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700487 u32 secure_vmid;
488 struct list_head pte_info_list;
489 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700490 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700491 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100492 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100493};
494
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200495struct arm_smmu_phandle_args {
496 struct device_node *np;
497 int args_count;
498 uint32_t args[MAX_MASTER_STREAMIDS];
499};
500
Will Deacon45ae7cf2013-06-24 18:31:25 +0100501static DEFINE_SPINLOCK(arm_smmu_devices_lock);
502static LIST_HEAD(arm_smmu_devices);
503
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000504struct arm_smmu_option_prop {
505 u32 opt;
506 const char *prop;
507};
508
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800509static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
510
Mitchel Humpherys29073202014-07-08 09:52:18 -0700511static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000512 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800513 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800514 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700515 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000516 { 0, NULL},
517};
518
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800519static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
520 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700521static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
522 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600523static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800524
Patrick Dalyc11d1082016-09-01 15:52:44 -0700525static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
526static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700527static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700528static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
529
Patrick Dalyd7476202016-09-08 18:23:28 -0700530static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
531static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
532
Joerg Roedel1d672632015-03-26 13:43:10 +0100533static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
534{
535 return container_of(dom, struct arm_smmu_domain, domain);
536}
537
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000538static void parse_driver_options(struct arm_smmu_device *smmu)
539{
540 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700541
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000542 do {
543 if (of_property_read_bool(smmu->dev->of_node,
544 arm_smmu_options[i].prop)) {
545 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700546 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000547 arm_smmu_options[i].prop);
548 }
549 } while (arm_smmu_options[++i].opt);
550}
551
Patrick Dalyc190d932016-08-30 17:23:28 -0700552static bool is_dynamic_domain(struct iommu_domain *domain)
553{
554 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
555
556 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
557}
558
Patrick Dalye271f212016-10-04 13:24:49 -0700559static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
560{
561 return (smmu_domain->secure_vmid != VMID_INVAL);
562}
563
564static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
565{
566 if (arm_smmu_is_domain_secure(smmu_domain))
567 mutex_lock(&smmu_domain->assign_lock);
568}
569
570static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
571{
572 if (arm_smmu_is_domain_secure(smmu_domain))
573 mutex_unlock(&smmu_domain->assign_lock);
574}
575
Will Deacon8f68f8e2014-07-15 11:27:08 +0100576static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100577{
578 if (dev_is_pci(dev)) {
579 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700580
Will Deacona9a1b0b2014-05-01 18:05:08 +0100581 while (!pci_is_root_bus(bus))
582 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100583 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100584 }
585
Will Deacon8f68f8e2014-07-15 11:27:08 +0100586 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100587}
588
Will Deacon45ae7cf2013-06-24 18:31:25 +0100589static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
590 struct device_node *dev_node)
591{
592 struct rb_node *node = smmu->masters.rb_node;
593
594 while (node) {
595 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700596
Will Deacon45ae7cf2013-06-24 18:31:25 +0100597 master = container_of(node, struct arm_smmu_master, node);
598
599 if (dev_node < master->of_node)
600 node = node->rb_left;
601 else if (dev_node > master->of_node)
602 node = node->rb_right;
603 else
604 return master;
605 }
606
607 return NULL;
608}
609
Will Deacona9a1b0b2014-05-01 18:05:08 +0100610static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100611find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100612{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100613 struct arm_smmu_master_cfg *cfg = NULL;
614 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100615
Will Deacon8f68f8e2014-07-15 11:27:08 +0100616 if (group) {
617 cfg = iommu_group_get_iommudata(group);
618 iommu_group_put(group);
619 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100620
Will Deacon8f68f8e2014-07-15 11:27:08 +0100621 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100622}
623
Will Deacon45ae7cf2013-06-24 18:31:25 +0100624static int insert_smmu_master(struct arm_smmu_device *smmu,
625 struct arm_smmu_master *master)
626{
627 struct rb_node **new, *parent;
628
629 new = &smmu->masters.rb_node;
630 parent = NULL;
631 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700632 struct arm_smmu_master *this
633 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100634
635 parent = *new;
636 if (master->of_node < this->of_node)
637 new = &((*new)->rb_left);
638 else if (master->of_node > this->of_node)
639 new = &((*new)->rb_right);
640 else
641 return -EEXIST;
642 }
643
644 rb_link_node(&master->node, parent, new);
645 rb_insert_color(&master->node, &smmu->masters);
646 return 0;
647}
648
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700649struct iommus_entry {
650 struct list_head list;
651 struct device_node *node;
652 u16 streamids[MAX_MASTER_STREAMIDS];
653 int num_sids;
654};
655
Will Deacon45ae7cf2013-06-24 18:31:25 +0100656static int register_smmu_master(struct arm_smmu_device *smmu,
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700657 struct iommus_entry *entry)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100658{
659 int i;
660 struct arm_smmu_master *master;
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700661 struct device *dev = smmu->dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100662
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700663 master = find_smmu_master(smmu, entry->node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100664 if (master) {
665 dev_err(dev,
666 "rejecting multiple registrations for master device %s\n",
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700667 entry->node->name);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100668 return -EBUSY;
669 }
670
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700671 if (entry->num_sids > MAX_MASTER_STREAMIDS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100672 dev_err(dev,
673 "reached maximum number (%d) of stream IDs for master device %s\n",
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700674 MAX_MASTER_STREAMIDS, entry->node->name);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100675 return -ENOSPC;
676 }
677
678 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
679 if (!master)
680 return -ENOMEM;
681
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700682 master->of_node = entry->node;
683 master->cfg.num_streamids = entry->num_sids;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100684
Olav Haugan3c8766d2014-08-22 17:12:32 -0700685 for (i = 0; i < master->cfg.num_streamids; ++i) {
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700686 u16 streamid = entry->streamids[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100687
Olav Haugan3c8766d2014-08-22 17:12:32 -0700688 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
689 (streamid >= smmu->num_mapping_groups)) {
690 dev_err(dev,
691 "stream ID for master device %s greater than maximum allowed (%d)\n",
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700692 entry->node->name, smmu->num_mapping_groups);
Olav Haugan3c8766d2014-08-22 17:12:32 -0700693 return -ERANGE;
694 }
695 master->cfg.streamids[i] = streamid;
Robin Murphy468f4942016-09-12 17:13:49 +0100696 master->cfg.smendx[i] = INVALID_SMENDX;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700697 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100698 return insert_smmu_master(smmu, master);
699}
700
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700701static int arm_smmu_parse_iommus_properties(struct arm_smmu_device *smmu,
702 int *num_masters)
703{
704 struct of_phandle_args iommuspec;
705 struct device_node *master;
706
707 *num_masters = 0;
708
709 for_each_node_with_property(master, "iommus") {
710 int arg_ind = 0;
711 struct iommus_entry *entry, *n;
712 LIST_HEAD(iommus);
713
714 while (!of_parse_phandle_with_args(
715 master, "iommus", "#iommu-cells",
716 arg_ind, &iommuspec)) {
717 if (iommuspec.np != smmu->dev->of_node) {
718 arg_ind++;
719 continue;
720 }
721
722 list_for_each_entry(entry, &iommus, list)
723 if (entry->node == master)
724 break;
725 if (&entry->list == &iommus) {
726 entry = devm_kzalloc(smmu->dev, sizeof(*entry),
727 GFP_KERNEL);
728 if (!entry)
729 return -ENOMEM;
730 entry->node = master;
731 list_add(&entry->list, &iommus);
732 }
Patrick Dalya571f732016-09-26 15:12:36 -0700733 switch (iommuspec.args_count) {
734 case 0:
735 /*
736 * For pci-e devices the SIDs are provided
737 * at device attach time.
738 */
739 break;
740 case 1:
741 entry->num_sids++;
742 entry->streamids[entry->num_sids - 1]
743 = iommuspec.args[0];
744 break;
745 default:
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700746 dev_err(smmu->dev, "iommus property has wrong #iommu-cells");
747 return -EINVAL;
748 }
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700749 arg_ind++;
750 }
751
752 list_for_each_entry_safe(entry, n, &iommus, list) {
Mitchel Humpherys4c775602014-10-02 17:55:41 -0700753 int rc = register_smmu_master(smmu, entry);
754
755 if (rc) {
756 dev_err(smmu->dev, "Couldn't register %s\n",
757 entry->node->name);
758 } else {
759 (*num_masters)++;
760 }
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700761 list_del(&entry->list);
762 devm_kfree(smmu->dev, entry);
763 }
764 }
765
766 return 0;
767}
768
Will Deacon44680ee2014-06-25 11:29:12 +0100769static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100770{
Will Deacon44680ee2014-06-25 11:29:12 +0100771 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100772 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100773 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100774
775 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100776 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100777 master = find_smmu_master(smmu, dev_node);
778 if (master)
779 break;
780 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100781 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100782
Will Deacona9a1b0b2014-05-01 18:05:08 +0100783 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100784}
785
786static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
787{
788 int idx;
789
790 do {
791 idx = find_next_zero_bit(map, end, start);
792 if (idx == end)
793 return -ENOSPC;
794 } while (test_and_set_bit(idx, map));
795
796 return idx;
797}
798
799static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
800{
801 clear_bit(idx, map);
802}
803
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700804static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700805{
806 int i, ret = 0;
807
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700808 for (i = 0; i < pwr->num_clocks; ++i) {
809 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700810 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700811 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700812 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700813 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700814 break;
815 }
816 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700817 return ret;
818}
819
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700820static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700821{
822 int i;
823
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700824 for (i = pwr->num_clocks; i; --i)
825 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700826}
827
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700828static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700829{
830 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700831
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700832 for (i = 0; i < pwr->num_clocks; ++i) {
833 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700834 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700835 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700836 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700837 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700838 break;
839 }
840 }
841
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700842 return ret;
843}
Patrick Daly8befb662016-08-17 20:03:28 -0700844
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700845static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
846{
847 int i;
848
849 for (i = pwr->num_clocks; i; --i)
850 clk_disable(pwr->clocks[i - 1]);
851}
852
853static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
854{
855 if (!pwr->bus_client)
856 return 0;
857 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
858}
859
860static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
861{
862 if (!pwr->bus_client)
863 return;
864 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
865}
866
867/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
868static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
869{
870 int ret = 0;
871 unsigned long flags;
872
873 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
874 if (pwr->clock_refs_count > 0) {
875 pwr->clock_refs_count++;
876 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
877 return 0;
878 }
879
880 ret = arm_smmu_enable_clocks(pwr);
881 if (!ret)
882 pwr->clock_refs_count = 1;
883
884 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700885 return ret;
886}
887
888/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700889static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700890{
Patrick Daly8befb662016-08-17 20:03:28 -0700891 unsigned long flags;
892
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700893 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
894 if (pwr->clock_refs_count == 0) {
895 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
896 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
897 return;
898
899 } else if (pwr->clock_refs_count > 1) {
900 pwr->clock_refs_count--;
901 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700902 return;
903 }
904
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700905 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700906
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700907 pwr->clock_refs_count = 0;
908 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700909}
910
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700911static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700912{
913 int ret;
914
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700915 mutex_lock(&pwr->power_lock);
916 if (pwr->power_count > 0) {
917 pwr->power_count += 1;
918 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700919 return 0;
920 }
921
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700922 ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700923 if (ret)
924 goto out_unlock;
925
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700926 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700927 if (ret)
928 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700929
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700930 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700931 if (ret)
932 goto out_disable_bus;
933
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700934 pwr->power_count = 1;
935 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700936 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700937
Patrick Daly2764f952016-09-06 19:22:44 -0700938out_disable_bus:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700939 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700940out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700941 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700942out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700943 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700944 return ret;
945}
946
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700947static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700948{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700949 mutex_lock(&pwr->power_lock);
950 if (pwr->power_count == 0) {
951 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
952 mutex_unlock(&pwr->power_lock);
953 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700954
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700955 } else if (pwr->power_count > 1) {
956 pwr->power_count--;
957 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700958 return;
959 }
960
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700961 arm_smmu_unprepare_clocks(pwr);
962 arm_smmu_unrequest_bus(pwr);
963 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700964
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700965 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700966}
967
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700968static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700969{
970 int ret;
971
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700972 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700973 if (ret)
974 return ret;
975
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700976 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700977 if (ret)
978 goto out_disable;
979
980 return 0;
981
982out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700983 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700984 return ret;
985}
986
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700987static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700988{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700989 arm_smmu_power_off_atomic(pwr);
990 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700991}
992
993/*
994 * Must be used instead of arm_smmu_power_on if it may be called from
995 * atomic context
996 */
997static int arm_smmu_domain_power_on(struct iommu_domain *domain,
998 struct arm_smmu_device *smmu)
999{
1000 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1001 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1002
1003 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001004 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001005
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001006 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001007}
1008
1009/*
1010 * Must be used instead of arm_smmu_power_on if it may be called from
1011 * atomic context
1012 */
1013static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1014 struct arm_smmu_device *smmu)
1015{
1016 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1017 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1018
1019 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001020 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001021 return;
1022 }
1023
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001024 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001025}
1026
Will Deacon45ae7cf2013-06-24 18:31:25 +01001027/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001028static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1029 int cbndx)
1030{
1031 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1032 u32 val;
1033
1034 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1035 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1036 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -07001037 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001038 dev_err(smmu->dev, "TLBSYNC timeout!\n");
1039}
1040
Will Deacon518f7132014-11-14 17:17:54 +00001041static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001042{
1043 int count = 0;
1044 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1045
1046 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1047 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1048 & sTLBGSTATUS_GSACTIVE) {
1049 cpu_relax();
1050 if (++count == TLB_LOOP_TIMEOUT) {
1051 dev_err_ratelimited(smmu->dev,
1052 "TLB sync timed out -- SMMU may be deadlocked\n");
1053 return;
1054 }
1055 udelay(1);
1056 }
1057}
1058
Will Deacon518f7132014-11-14 17:17:54 +00001059static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001060{
Will Deacon518f7132014-11-14 17:17:54 +00001061 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001062 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001063}
1064
Patrick Daly8befb662016-08-17 20:03:28 -07001065/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001066static void arm_smmu_tlb_inv_context(void *cookie)
1067{
1068 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +01001069 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1070 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001071 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001072 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +01001073
1074 if (stage1) {
1075 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001076 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001077 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001078 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001079 } else {
1080 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001081 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001082 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001083 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001084 }
Will Deacon1463fe42013-07-31 19:21:27 +01001085}
1086
Will Deacon518f7132014-11-14 17:17:54 +00001087static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001088 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001089{
1090 struct arm_smmu_domain *smmu_domain = cookie;
1091 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1092 struct arm_smmu_device *smmu = smmu_domain->smmu;
1093 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1094 void __iomem *reg;
1095
1096 if (stage1) {
1097 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1098 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1099
Robin Murphy7602b872016-04-28 17:12:09 +01001100 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001101 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001102 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001103 do {
1104 writel_relaxed(iova, reg);
1105 iova += granule;
1106 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001107 } else {
1108 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001109 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001110 do {
1111 writeq_relaxed(iova, reg);
1112 iova += granule >> 12;
1113 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001114 }
Will Deacon518f7132014-11-14 17:17:54 +00001115 } else if (smmu->version == ARM_SMMU_V2) {
1116 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1117 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1118 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001119 iova >>= 12;
1120 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001121 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001122 iova += granule >> 12;
1123 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001124 } else {
1125 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001126 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001127 }
1128}
1129
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001130struct arm_smmu_secure_pool_chunk {
1131 void *addr;
1132 size_t size;
1133 struct list_head list;
1134};
1135
1136static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1137 size_t size)
1138{
1139 struct arm_smmu_secure_pool_chunk *it;
1140
1141 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1142 if (it->size == size) {
1143 void *addr = it->addr;
1144
1145 list_del(&it->list);
1146 kfree(it);
1147 return addr;
1148 }
1149 }
1150
1151 return NULL;
1152}
1153
1154static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1155 void *addr, size_t size)
1156{
1157 struct arm_smmu_secure_pool_chunk *chunk;
1158
1159 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1160 if (!chunk)
1161 return -ENOMEM;
1162
1163 chunk->addr = addr;
1164 chunk->size = size;
1165 memset(addr, 0, size);
1166 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1167
1168 return 0;
1169}
1170
1171static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1172{
1173 struct arm_smmu_secure_pool_chunk *it, *i;
1174
1175 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1176 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1177 /* pages will be freed later (after being unassigned) */
1178 kfree(it);
1179 }
1180}
1181
Patrick Dalyc11d1082016-09-01 15:52:44 -07001182static void *arm_smmu_alloc_pages_exact(void *cookie,
1183 size_t size, gfp_t gfp_mask)
1184{
1185 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001186 void *page;
1187 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001188
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001189 if (!arm_smmu_is_domain_secure(smmu_domain))
1190 return alloc_pages_exact(size, gfp_mask);
1191
1192 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1193 if (page)
1194 return page;
1195
1196 page = alloc_pages_exact(size, gfp_mask);
1197 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001198 ret = arm_smmu_prepare_pgtable(page, cookie);
1199 if (ret) {
1200 free_pages_exact(page, size);
1201 return NULL;
1202 }
1203 }
1204
1205 return page;
1206}
1207
1208static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1209{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001210 struct arm_smmu_domain *smmu_domain = cookie;
1211
1212 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1213 free_pages_exact(virt, size);
1214 return;
1215 }
1216
1217 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1218 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001219}
1220
Will Deacon518f7132014-11-14 17:17:54 +00001221static struct iommu_gather_ops arm_smmu_gather_ops = {
1222 .tlb_flush_all = arm_smmu_tlb_inv_context,
1223 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1224 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001225 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1226 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001227};
1228
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001229static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1230 dma_addr_t iova, u32 fsr)
1231{
1232 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001233 struct arm_smmu_device *smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001234 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001235 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001236
1237 smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001238
Patrick Dalyad441dd2016-09-15 15:50:46 -07001239 if (smmu->arch_ops && smmu->arch_ops->iova_to_phys_fault) {
1240 smmu->arch_ops->iova_to_phys_fault(domain, iova, &phys,
1241 &phys_post_tlbiall);
1242 } else {
1243 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001244 arm_smmu_tlb_inv_context(smmu_domain);
Patrick Dalyad441dd2016-09-15 15:50:46 -07001245 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001246 }
1247
Patrick Dalyad441dd2016-09-15 15:50:46 -07001248 if (phys != phys_post_tlbiall) {
1249 dev_err(smmu->dev,
1250 "ATOS results differed across TLBIALL...\n"
1251 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1252 }
1253 if (!phys_post_tlbiall) {
1254 dev_err(smmu->dev,
1255 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1256 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001257
Patrick Dalyad441dd2016-09-15 15:50:46 -07001258 return phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001259}
1260
Will Deacon45ae7cf2013-06-24 18:31:25 +01001261static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1262{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001263 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001264 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001265 unsigned long iova;
1266 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001267 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001268 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1269 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001270 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001271 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001272 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001273 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001274 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001275 bool non_fatal_fault = !!(smmu_domain->attributes &
1276 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001277
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001278 static DEFINE_RATELIMIT_STATE(_rs,
1279 DEFAULT_RATELIMIT_INTERVAL,
1280 DEFAULT_RATELIMIT_BURST);
1281
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001282 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001283 if (ret)
1284 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001285
Shalaj Jain04059c52015-03-03 13:34:59 -08001286 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001287 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001288 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1289
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001290 if (!(fsr & FSR_FAULT)) {
1291 ret = IRQ_NONE;
1292 goto out_power_off;
1293 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001294
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001295 if (fatal_asf && (fsr & FSR_ASF)) {
1296 dev_err(smmu->dev,
1297 "Took an address size fault. Refusing to recover.\n");
1298 BUG();
1299 }
1300
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001302 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001303 if (fsr & FSR_TF)
1304 flags |= IOMMU_FAULT_TRANSLATION;
1305 if (fsr & FSR_PF)
1306 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001307 if (fsr & FSR_EF)
1308 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001309 if (fsr & FSR_SS)
1310 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001311
Robin Murphyf9a05f02016-04-13 18:13:01 +01001312 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001313 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001314 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1315 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001316 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1317 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001318 dev_dbg(smmu->dev,
1319 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1320 iova, fsr, fsynr, cfg->cbndx);
1321 dev_dbg(smmu->dev,
1322 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001323 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001324 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001325 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001326 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1327 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001328 if (__ratelimit(&_rs)) {
1329 dev_err(smmu->dev,
1330 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1331 iova, fsr, fsynr, cfg->cbndx);
1332 dev_err(smmu->dev, "FAR = %016lx\n",
1333 (unsigned long)iova);
1334 dev_err(smmu->dev,
1335 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1336 fsr,
1337 (fsr & 0x02) ? "TF " : "",
1338 (fsr & 0x04) ? "AFF " : "",
1339 (fsr & 0x08) ? "PF " : "",
1340 (fsr & 0x10) ? "EF " : "",
1341 (fsr & 0x20) ? "TLBMCF " : "",
1342 (fsr & 0x40) ? "TLBLKF " : "",
1343 (fsr & 0x80) ? "MHF " : "",
1344 (fsr & 0x40000000) ? "SS " : "",
1345 (fsr & 0x80000000) ? "MULTI " : "");
1346 dev_err(smmu->dev,
1347 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001348 if (!phys_soft)
1349 dev_err(smmu->dev,
1350 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1351 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001352 dev_err(smmu->dev,
1353 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1354 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1355 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001356 ret = IRQ_NONE;
1357 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001358 if (!non_fatal_fault) {
1359 dev_err(smmu->dev,
1360 "Unhandled arm-smmu context fault!\n");
1361 BUG();
1362 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001363 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001365 /*
1366 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1367 * if stalled. This is required to keep the IOMMU client stalled on
1368 * the outstanding fault. This gives the client a chance to take any
1369 * debug action and then terminate the stalled transaction.
1370 * So, the sequence in case of stall on fault should be:
1371 * 1) Do not clear FSR or write to RESUME here
1372 * 2) Client takes any debug action
1373 * 3) Client terminates the stalled transaction and resumes the IOMMU
1374 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1375 * not before so that the fault remains outstanding. This ensures
1376 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1377 * need to be terminated.
1378 */
1379 if (tmp != -EBUSY) {
1380 /* Clear the faulting FSR */
1381 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001382
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001383 /*
1384 * Barrier required to ensure that the FSR is cleared
1385 * before resuming SMMU operation
1386 */
1387 wmb();
1388
1389 /* Retry or terminate any stalled transactions */
1390 if (fsr & FSR_SS)
1391 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1392 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001393
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001394out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001395 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001396
Patrick Daly5ba28112016-08-30 19:18:52 -07001397 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001398}
1399
1400static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1401{
1402 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1403 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001404 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001405
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001406 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001407 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001408
Will Deacon45ae7cf2013-06-24 18:31:25 +01001409 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1410 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1411 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1412 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1413
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001414 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001415 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001416 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001417 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001418
Will Deacon45ae7cf2013-06-24 18:31:25 +01001419 dev_err_ratelimited(smmu->dev,
1420 "Unexpected global fault, this could be serious\n");
1421 dev_err_ratelimited(smmu->dev,
1422 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1423 gfsr, gfsynr0, gfsynr1, gfsynr2);
1424
1425 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001426 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001427 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001428}
1429
Will Deacon518f7132014-11-14 17:17:54 +00001430static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1431 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001432{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001433 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001434 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001435 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001436 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1437 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001438 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001439
Will Deacon45ae7cf2013-06-24 18:31:25 +01001440 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001441 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1442 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001443
Will Deacon4a1c93c2015-03-04 12:21:03 +00001444 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001445 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1446 reg = CBA2R_RW64_64BIT;
1447 else
1448 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001449 /* 16-bit VMIDs live in CBA2R */
1450 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001451 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001452
Will Deacon4a1c93c2015-03-04 12:21:03 +00001453 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1454 }
1455
Will Deacon45ae7cf2013-06-24 18:31:25 +01001456 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001457 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001458 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001459 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001460
Will Deacon57ca90f2014-02-06 14:59:05 +00001461 /*
1462 * Use the weakest shareability/memory types, so they are
1463 * overridden by the ttbcr/pte.
1464 */
1465 if (stage1) {
1466 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1467 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001468 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1469 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001470 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001471 }
Will Deacon44680ee2014-06-25 11:29:12 +01001472 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001473
Will Deacon518f7132014-11-14 17:17:54 +00001474 /* TTBRs */
1475 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001476 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001477
Robin Murphyb94df6f2016-08-11 17:44:06 +01001478 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1479 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1480 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1481 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1482 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1483 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1484 } else {
1485 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1486 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1487 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1488 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1489 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1490 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1491 }
Will Deacon518f7132014-11-14 17:17:54 +00001492 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001493 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001494 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001495 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001496
Will Deacon518f7132014-11-14 17:17:54 +00001497 /* TTBCR */
1498 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001499 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1500 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1501 reg2 = 0;
1502 } else {
1503 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1504 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1505 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001506 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001507 if (smmu->version > ARM_SMMU_V1)
1508 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001509 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001510 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001511 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001512 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001513
Will Deacon518f7132014-11-14 17:17:54 +00001514 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001515 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001516 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1517 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1518 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1519 } else {
1520 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1521 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1522 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001523 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001524 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001525 }
1526
Will Deacon45ae7cf2013-06-24 18:31:25 +01001527 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001528 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalye62d3362016-03-15 18:58:28 -07001529 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1530 !stage1)
1531 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001532 if (stage1)
1533 reg |= SCTLR_S1_ASIDPNE;
1534#ifdef __BIG_ENDIAN
1535 reg |= SCTLR_E;
1536#endif
Will Deacon25724842013-08-21 13:49:53 +01001537 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001538}
1539
Patrick Dalyc190d932016-08-30 17:23:28 -07001540static int arm_smmu_init_asid(struct iommu_domain *domain,
1541 struct arm_smmu_device *smmu)
1542{
1543 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1544 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1545 bool dynamic = is_dynamic_domain(domain);
1546 int ret;
1547
1548 if (!dynamic) {
1549 cfg->asid = cfg->cbndx + 1;
1550 } else {
1551 mutex_lock(&smmu->idr_mutex);
1552 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1553 smmu->num_context_banks + 2,
1554 MAX_ASID + 1, GFP_KERNEL);
1555
1556 mutex_unlock(&smmu->idr_mutex);
1557 if (ret < 0) {
1558 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1559 ret);
1560 return ret;
1561 }
1562 cfg->asid = ret;
1563 }
1564 return 0;
1565}
1566
1567static void arm_smmu_free_asid(struct iommu_domain *domain)
1568{
1569 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1570 struct arm_smmu_device *smmu = smmu_domain->smmu;
1571 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1572 bool dynamic = is_dynamic_domain(domain);
1573
1574 if (cfg->asid == INVALID_ASID || !dynamic)
1575 return;
1576
1577 mutex_lock(&smmu->idr_mutex);
1578 idr_remove(&smmu->asid_idr, cfg->asid);
1579 mutex_unlock(&smmu->idr_mutex);
1580}
1581
Will Deacon45ae7cf2013-06-24 18:31:25 +01001582static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001583 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001584{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001585 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001586 unsigned long ias, oas;
1587 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001588 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001589 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001590 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001591 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001592 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001593 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001594
Will Deacon518f7132014-11-14 17:17:54 +00001595 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001596 if (smmu_domain->smmu)
1597 goto out_unlock;
1598
Patrick Dalyc190d932016-08-30 17:23:28 -07001599 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1600 smmu_domain->cfg.asid = INVALID_ASID;
1601
Robin Murphy98006992016-04-20 14:53:33 +01001602 /* We're bypassing these SIDs, so don't allocate an actual context */
1603 if (domain->type == IOMMU_DOMAIN_DMA) {
1604 smmu_domain->smmu = smmu;
1605 goto out_unlock;
1606 }
1607
Patrick Dalyc190d932016-08-30 17:23:28 -07001608 dynamic = is_dynamic_domain(domain);
1609 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1610 dev_err(smmu->dev, "dynamic domains not supported\n");
1611 ret = -EPERM;
1612 goto out_unlock;
1613 }
1614
Will Deaconc752ce42014-06-25 22:46:31 +01001615 /*
1616 * Mapping the requested stage onto what we support is surprisingly
1617 * complicated, mainly because the spec allows S1+S2 SMMUs without
1618 * support for nested translation. That means we end up with the
1619 * following table:
1620 *
1621 * Requested Supported Actual
1622 * S1 N S1
1623 * S1 S1+S2 S1
1624 * S1 S2 S2
1625 * S1 S1 S1
1626 * N N N
1627 * N S1+S2 S2
1628 * N S2 S2
1629 * N S1 S1
1630 *
1631 * Note that you can't actually request stage-2 mappings.
1632 */
1633 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1634 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1635 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1636 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1637
Robin Murphy7602b872016-04-28 17:12:09 +01001638 /*
1639 * Choosing a suitable context format is even more fiddly. Until we
1640 * grow some way for the caller to express a preference, and/or move
1641 * the decision into the io-pgtable code where it arguably belongs,
1642 * just aim for the closest thing to the rest of the system, and hope
1643 * that the hardware isn't esoteric enough that we can't assume AArch64
1644 * support to be a superset of AArch32 support...
1645 */
1646 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1647 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001648 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1649 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1650 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1651 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1652 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001653 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1654 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1655 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1656 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1657 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1658
1659 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1660 ret = -EINVAL;
1661 goto out_unlock;
1662 }
1663
Will Deaconc752ce42014-06-25 22:46:31 +01001664 switch (smmu_domain->stage) {
1665 case ARM_SMMU_DOMAIN_S1:
1666 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1667 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001668 ias = smmu->va_size;
1669 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001670 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001671 fmt = ARM_64_LPAE_S1;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001672 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001673 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001674 ias = min(ias, 32UL);
1675 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001676 } else {
1677 fmt = ARM_V7S;
1678 ias = min(ias, 32UL);
1679 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001680 }
Will Deaconc752ce42014-06-25 22:46:31 +01001681 break;
1682 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001683 /*
1684 * We will likely want to change this if/when KVM gets
1685 * involved.
1686 */
Will Deaconc752ce42014-06-25 22:46:31 +01001687 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001688 cfg->cbar = CBAR_TYPE_S2_TRANS;
1689 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001690 ias = smmu->ipa_size;
1691 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001692 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001693 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001694 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001695 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001696 ias = min(ias, 40UL);
1697 oas = min(oas, 40UL);
1698 }
Will Deaconc752ce42014-06-25 22:46:31 +01001699 break;
1700 default:
1701 ret = -EINVAL;
1702 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001703 }
1704
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001705 if (is_fast)
1706 fmt = ARM_V8L_FAST;
1707
Patrick Dalyce6786f2016-11-09 14:19:23 -08001708 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1709 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001710
Patrick Dalyc190d932016-08-30 17:23:28 -07001711 /* Dynamic domains must set cbndx through domain attribute */
1712 if (!dynamic) {
1713 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001714 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001715 if (ret < 0)
1716 goto out_unlock;
1717 cfg->cbndx = ret;
1718 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001719 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001720 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1721 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001722 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001723 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001724 }
1725
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001726 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001727 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001728 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001729 .ias = ias,
1730 .oas = oas,
1731 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001732 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001733 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001734
Will Deacon518f7132014-11-14 17:17:54 +00001735 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001736 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1737 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001738 if (!pgtbl_ops) {
1739 ret = -ENOMEM;
1740 goto out_clear_smmu;
1741 }
1742
Patrick Dalyc11d1082016-09-01 15:52:44 -07001743 /*
1744 * assign any page table memory that might have been allocated
1745 * during alloc_io_pgtable_ops
1746 */
Patrick Dalye271f212016-10-04 13:24:49 -07001747 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001748 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001749 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001750
Robin Murphyd5466352016-05-09 17:20:09 +01001751 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001752 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001753
Patrick Dalyc190d932016-08-30 17:23:28 -07001754 /* Assign an asid */
1755 ret = arm_smmu_init_asid(domain, smmu);
1756 if (ret)
1757 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001758
Patrick Dalyc190d932016-08-30 17:23:28 -07001759 if (!dynamic) {
1760 /* Initialise the context bank with our page table cfg */
1761 arm_smmu_init_context_bank(smmu_domain,
1762 &smmu_domain->pgtbl_cfg);
1763
1764 /*
1765 * Request context fault interrupt. Do this last to avoid the
1766 * handler seeing a half-initialised domain state.
1767 */
1768 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1769 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001770 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1771 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001772 if (ret < 0) {
1773 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1774 cfg->irptndx, irq);
1775 cfg->irptndx = INVALID_IRPTNDX;
1776 goto out_clear_smmu;
1777 }
1778 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001779 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001780 }
Will Deacon518f7132014-11-14 17:17:54 +00001781 mutex_unlock(&smmu_domain->init_mutex);
1782
1783 /* Publish page table ops for map/unmap */
1784 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001785 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001786
Will Deacon518f7132014-11-14 17:17:54 +00001787out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001788 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001789 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001790out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001791 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001792 return ret;
1793}
1794
Patrick Daly77db4f92016-10-14 15:34:10 -07001795static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1796{
1797 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1798 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1799 smmu_domain->secure_vmid = VMID_INVAL;
1800}
1801
Will Deacon45ae7cf2013-06-24 18:31:25 +01001802static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1803{
Joerg Roedel1d672632015-03-26 13:43:10 +01001804 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001805 struct arm_smmu_device *smmu = smmu_domain->smmu;
1806 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001807 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001808 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001809 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001810 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001811
Robin Murphy98006992016-04-20 14:53:33 +01001812 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001813 return;
1814
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001815 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001816 if (ret) {
1817 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1818 smmu);
1819 return;
1820 }
1821
Patrick Dalyc190d932016-08-30 17:23:28 -07001822 dynamic = is_dynamic_domain(domain);
1823 if (dynamic) {
1824 arm_smmu_free_asid(domain);
1825 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001826 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001827 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001828 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001829 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001830 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001831 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001832 return;
1833 }
1834
Will Deacon518f7132014-11-14 17:17:54 +00001835 /*
1836 * Disable the context bank and free the page tables before freeing
1837 * it.
1838 */
Will Deacon44680ee2014-06-25 11:29:12 +01001839 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001840 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001841
Will Deacon44680ee2014-06-25 11:29:12 +01001842 if (cfg->irptndx != INVALID_IRPTNDX) {
1843 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001844 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001845 }
1846
Markus Elfring44830b02015-11-06 18:32:41 +01001847 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001848 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001849 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001850 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001851 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001852 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001853
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001854 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001855 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001856}
1857
Joerg Roedel1d672632015-03-26 13:43:10 +01001858static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001859{
1860 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001861
Patrick Daly09801312016-08-29 17:02:52 -07001862 /* Do not support DOMAIN_DMA for now */
1863 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001864 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001865 /*
1866 * Allocate the domain and initialise some of its data structures.
1867 * We can't really do anything meaningful until we've added a
1868 * master.
1869 */
1870 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1871 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001872 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001873
Robin Murphy9adb9592016-01-26 18:06:36 +00001874 if (type == IOMMU_DOMAIN_DMA &&
1875 iommu_get_dma_cookie(&smmu_domain->domain)) {
1876 kfree(smmu_domain);
1877 return NULL;
1878 }
1879
Will Deacon518f7132014-11-14 17:17:54 +00001880 mutex_init(&smmu_domain->init_mutex);
1881 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001882 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1883 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001884 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001885 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001886 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001887
1888 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001889}
1890
Joerg Roedel1d672632015-03-26 13:43:10 +01001891static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001892{
Joerg Roedel1d672632015-03-26 13:43:10 +01001893 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001894
1895 /*
1896 * Free the domain resources. We assume that all devices have
1897 * already been detached.
1898 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001899 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001901 kfree(smmu_domain);
1902}
1903
Robin Murphy468f4942016-09-12 17:13:49 +01001904static int arm_smmu_alloc_smr(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001905{
1906 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001907
Robin Murphy468f4942016-09-12 17:13:49 +01001908 for (i = 0; i < smmu->num_mapping_groups; i++)
1909 if (!cmpxchg(&smmu->smrs[i].valid, false, true))
1910 return i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001911
Robin Murphy468f4942016-09-12 17:13:49 +01001912 return INVALID_SMENDX;
1913}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001914
Robin Murphy468f4942016-09-12 17:13:49 +01001915static void arm_smmu_free_smr(struct arm_smmu_device *smmu, int idx)
1916{
1917 writel_relaxed(~SMR_VALID, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1918 WRITE_ONCE(smmu->smrs[idx].valid, false);
1919}
1920
1921static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1922{
1923 struct arm_smmu_smr *smr = smmu->smrs + idx;
1924 u32 reg = (smr->id & smmu->streamid_mask) << SMR_ID_SHIFT |
1925 (smr->mask & smmu->smr_mask_mask) << SMR_MASK_SHIFT;
1926
1927 if (smr->valid)
1928 reg |= SMR_VALID;
1929 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1930}
1931
1932static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu,
1933 struct arm_smmu_master_cfg *cfg)
1934{
1935 struct arm_smmu_smr *smrs = smmu->smrs;
1936 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001937
Will Deacon44680ee2014-06-25 11:29:12 +01001938 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001939 for (i = 0; i < cfg->num_streamids; ++i) {
Robin Murphy468f4942016-09-12 17:13:49 +01001940 if (cfg->smendx[i] != INVALID_SMENDX)
1941 return -EEXIST;
1942
1943 /* ...except on stream indexing hardware, of course */
1944 if (!smrs) {
1945 cfg->smendx[i] = cfg->streamids[i];
1946 continue;
1947 }
1948
1949 idx = arm_smmu_alloc_smr(smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001950 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001951 dev_err(smmu->dev, "failed to allocate free SMR\n");
1952 goto err_free_smrs;
1953 }
Robin Murphy468f4942016-09-12 17:13:49 +01001954 cfg->smendx[i] = idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001955
Robin Murphy468f4942016-09-12 17:13:49 +01001956 smrs[idx].id = cfg->streamids[i];
1957 smrs[idx].mask = 0; /* We don't currently share SMRs */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001958 }
1959
Robin Murphy468f4942016-09-12 17:13:49 +01001960 if (!smrs)
1961 return 0;
1962
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963 /* It worked! Now, poke the actual hardware */
Robin Murphy468f4942016-09-12 17:13:49 +01001964 for (i = 0; i < cfg->num_streamids; ++i)
1965 arm_smmu_write_smr(smmu, cfg->smendx[i]);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001966
Will Deacon45ae7cf2013-06-24 18:31:25 +01001967 return 0;
1968
1969err_free_smrs:
Robin Murphy468f4942016-09-12 17:13:49 +01001970 while (i--) {
1971 arm_smmu_free_smr(smmu, cfg->smendx[i]);
1972 cfg->smendx[i] = INVALID_SMENDX;
1973 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001974 return -ENOSPC;
1975}
1976
Robin Murphy468f4942016-09-12 17:13:49 +01001977static void arm_smmu_master_free_smes(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001978 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001979{
1980 int i;
Will Deacon43b412b2014-07-15 11:22:24 +01001981
Will Deacon45ae7cf2013-06-24 18:31:25 +01001982 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001983 for (i = 0; i < cfg->num_streamids; ++i) {
Robin Murphy468f4942016-09-12 17:13:49 +01001984 if (smmu->smrs)
1985 arm_smmu_free_smr(smmu, cfg->smendx[i]);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001986
Robin Murphy468f4942016-09-12 17:13:49 +01001987 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001988 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989}
1990
Will Deacon45ae7cf2013-06-24 18:31:25 +01001991static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001992 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001993{
1994 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001995 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001996 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1997
Will Deacon5f634952016-04-20 14:53:32 +01001998 /*
1999 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
2000 * for all devices behind the SMMU. Note that we need to take
2001 * care configuring SMRs for devices both a platform_device and
2002 * and a PCI device (i.e. a PCI host controller)
2003 */
2004 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
2005 return 0;
2006
Will Deacon8f68f8e2014-07-15 11:27:08 +01002007 /* Devices in an IOMMU group may already be configured */
Robin Murphy468f4942016-09-12 17:13:49 +01002008 ret = arm_smmu_master_alloc_smes(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002009 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01002010 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002011
Will Deacona9a1b0b2014-05-01 18:05:08 +01002012 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002013 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07002014
Robin Murphy468f4942016-09-12 17:13:49 +01002015 idx = cfg->smendx[i];
Patrick Dalyf4930442016-06-27 20:50:14 -07002016 s2cr = S2CR_TYPE_TRANS |
Will Deacon44680ee2014-06-25 11:29:12 +01002017 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
2019 }
2020
2021 return 0;
2022}
2023
2024static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01002025 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002026{
Will Deacon43b412b2014-07-15 11:22:24 +01002027 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01002028 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01002029 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002030
2031 /*
2032 * We *must* clear the S2CR first, because freeing the SMR means
2033 * that it can be re-allocated immediately.
2034 */
Will Deacon43b412b2014-07-15 11:22:24 +01002035 for (i = 0; i < cfg->num_streamids; ++i) {
Robin Murphy468f4942016-09-12 17:13:49 +01002036 int idx = cfg->smendx[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00002037 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01002038
Robin Murphy468f4942016-09-12 17:13:49 +01002039 /*
2040 * An IOMMU group is torn down by the first device to be
2041 * removed
2042 */
2043 if (idx == INVALID_SMENDX)
2044 return;
2045
Robin Murphy25a1c962016-02-10 14:25:33 +00002046 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01002047 }
2048
Robin Murphy468f4942016-09-12 17:13:49 +01002049 arm_smmu_master_free_smes(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002050}
2051
Patrick Daly09801312016-08-29 17:02:52 -07002052static void arm_smmu_detach_dev(struct iommu_domain *domain,
2053 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002054{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002055 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002056 struct arm_smmu_device *smmu = smmu_domain->smmu;
2057 struct arm_smmu_master_cfg *cfg;
2058 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002059 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002060
2061 if (dynamic)
2062 return;
2063
2064 cfg = find_smmu_master_cfg(dev);
2065 if (!cfg)
2066 return;
2067
2068 if (!smmu) {
2069 dev_err(dev, "Domain not attached; cannot detach!\n");
2070 return;
2071 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002072
2073 dev->archdata.iommu = NULL;
2074 arm_smmu_domain_remove_master(smmu_domain, cfg);
Patrick Daly8befb662016-08-17 20:03:28 -07002075
2076 /* Remove additional vote for atomic power */
2077 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002078 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
2079 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002080 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002081}
2082
Patrick Dalye271f212016-10-04 13:24:49 -07002083static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002084{
Patrick Dalye271f212016-10-04 13:24:49 -07002085 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002086 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2087 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2088 int source_vmid = VMID_HLOS;
2089 struct arm_smmu_pte_info *pte_info, *temp;
2090
Patrick Dalye271f212016-10-04 13:24:49 -07002091 if (!arm_smmu_is_domain_secure(smmu_domain))
2092 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002093
Patrick Dalye271f212016-10-04 13:24:49 -07002094 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002095 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2096 PAGE_SIZE, &source_vmid, 1,
2097 dest_vmids, dest_perms, 2);
2098 if (WARN_ON(ret))
2099 break;
2100 }
2101
2102 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2103 entry) {
2104 list_del(&pte_info->entry);
2105 kfree(pte_info);
2106 }
Patrick Dalye271f212016-10-04 13:24:49 -07002107 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002108}
2109
2110static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2111{
2112 int ret;
2113 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002114 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002115 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2116 struct arm_smmu_pte_info *pte_info, *temp;
2117
Patrick Dalye271f212016-10-04 13:24:49 -07002118 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002119 return;
2120
2121 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2122 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2123 PAGE_SIZE, source_vmlist, 2,
2124 &dest_vmids, &dest_perms, 1);
2125 if (WARN_ON(ret))
2126 break;
2127 free_pages_exact(pte_info->virt_addr, pte_info->size);
2128 }
2129
2130 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2131 entry) {
2132 list_del(&pte_info->entry);
2133 kfree(pte_info);
2134 }
2135}
2136
2137static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2138{
2139 struct arm_smmu_domain *smmu_domain = cookie;
2140 struct arm_smmu_pte_info *pte_info;
2141
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002142 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002143
2144 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2145 if (!pte_info)
2146 return;
2147
2148 pte_info->virt_addr = addr;
2149 pte_info->size = size;
2150 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2151}
2152
2153static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2154{
2155 struct arm_smmu_domain *smmu_domain = cookie;
2156 struct arm_smmu_pte_info *pte_info;
2157
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002158 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002159
2160 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2161 if (!pte_info)
2162 return -ENOMEM;
2163 pte_info->virt_addr = addr;
2164 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2165 return 0;
2166}
2167
Will Deacon45ae7cf2013-06-24 18:31:25 +01002168static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2169{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002170 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01002171 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002172 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002173 struct arm_smmu_master_cfg *cfg;
Patrick Daly8befb662016-08-17 20:03:28 -07002174 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002175
Will Deacon8f68f8e2014-07-15 11:27:08 +01002176 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01002177 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002178 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2179 return -ENXIO;
2180 }
2181
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002182 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002183 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002184 if (ret)
2185 return ret;
2186
Will Deacon518f7132014-11-14 17:17:54 +00002187 /* Ensure that the domain is finalised */
2188 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002189 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002190 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002191
Patrick Dalyc190d932016-08-30 17:23:28 -07002192 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002193 if (is_dynamic_domain(domain)) {
2194 ret = 0;
2195 goto out_power_off;
2196 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002197
Will Deacon45ae7cf2013-06-24 18:31:25 +01002198 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002199 * Sanity check the domain. We don't support domains across
2200 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002201 */
Will Deacon518f7132014-11-14 17:17:54 +00002202 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002203 dev_err(dev,
2204 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002205 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002206 ret = -EINVAL;
2207 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002208 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002209
2210 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01002211 cfg = find_smmu_master_cfg(dev);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002212 if (!cfg) {
2213 ret = -ENODEV;
2214 goto out_power_off;
2215 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002216
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002217 /* Detach the dev from its current domain */
2218 if (dev->archdata.iommu)
Patrick Daly09801312016-08-29 17:02:52 -07002219 arm_smmu_detach_dev(dev->archdata.iommu, dev);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002220
Will Deacon844e35b2014-07-17 11:23:51 +01002221 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
2222 if (!ret)
2223 dev->archdata.iommu = domain;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002224
2225out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002226 /*
2227 * Keep an additional vote for non-atomic power until domain is
2228 * detached
2229 */
2230 if (!ret && atomic_domain) {
2231 WARN_ON(arm_smmu_power_on(smmu->pwr));
2232 arm_smmu_power_off_atomic(smmu->pwr);
2233 }
2234
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002235 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002236
Will Deacon45ae7cf2013-06-24 18:31:25 +01002237 return ret;
2238}
2239
Will Deacon45ae7cf2013-06-24 18:31:25 +01002240static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002241 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002242{
Will Deacon518f7132014-11-14 17:17:54 +00002243 int ret;
2244 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002245 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002246 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002247
Will Deacon518f7132014-11-14 17:17:54 +00002248 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002249 return -ENODEV;
2250
Patrick Dalye271f212016-10-04 13:24:49 -07002251 arm_smmu_secure_domain_lock(smmu_domain);
2252
Will Deacon518f7132014-11-14 17:17:54 +00002253 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2254 ret = ops->map(ops, iova, paddr, size, prot);
2255 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002256
2257 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002258 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002259
Will Deacon518f7132014-11-14 17:17:54 +00002260 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002261}
2262
2263static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2264 size_t size)
2265{
Will Deacon518f7132014-11-14 17:17:54 +00002266 size_t ret;
2267 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002268 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002269 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002270
Will Deacon518f7132014-11-14 17:17:54 +00002271 if (!ops)
2272 return 0;
2273
Patrick Daly8befb662016-08-17 20:03:28 -07002274 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002275 if (ret)
2276 return ret;
2277
Patrick Dalye271f212016-10-04 13:24:49 -07002278 arm_smmu_secure_domain_lock(smmu_domain);
2279
Will Deacon518f7132014-11-14 17:17:54 +00002280 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2281 ret = ops->unmap(ops, iova, size);
2282 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002283
Patrick Daly8befb662016-08-17 20:03:28 -07002284 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002285 /*
2286 * While splitting up block mappings, we might allocate page table
2287 * memory during unmap, so the vmids needs to be assigned to the
2288 * memory here as well.
2289 */
2290 arm_smmu_assign_table(smmu_domain);
2291 /* Also unassign any pages that were free'd during unmap */
2292 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002293 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002294 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002295}
2296
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002297static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2298 struct scatterlist *sg, unsigned int nents, int prot)
2299{
2300 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002301 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002302 unsigned long flags;
2303 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2304 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2305
2306 if (!ops)
2307 return -ENODEV;
2308
Patrick Daly8befb662016-08-17 20:03:28 -07002309 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002310 if (ret)
2311 return ret;
2312
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002313 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002314 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002315 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002316
2317 if (!ret)
2318 arm_smmu_unmap(domain, iova, size);
2319
Patrick Daly8befb662016-08-17 20:03:28 -07002320 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002321 arm_smmu_assign_table(smmu_domain);
2322
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002323 return ret;
2324}
2325
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002326static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002327 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002328{
Joerg Roedel1d672632015-03-26 13:43:10 +01002329 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002330 struct arm_smmu_device *smmu = smmu_domain->smmu;
2331 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2332 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2333 struct device *dev = smmu->dev;
2334 void __iomem *cb_base;
2335 u32 tmp;
2336 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002337 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002338
2339 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2340
Robin Murphy661d9622015-05-27 17:09:34 +01002341 /* ATS1 registers can only be written atomically */
2342 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002343 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002344 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2345 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002346 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002347
2348 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2349 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002350 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002351 dev_err(dev,
2352 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2353 &iova, &phys);
2354 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002355 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002356 }
2357
Robin Murphyf9a05f02016-04-13 18:13:01 +01002358 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002359 if (phys & CB_PAR_F) {
2360 dev_err(dev, "translation fault!\n");
2361 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002362 phys = 0;
2363 } else {
2364 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002365 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002366
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002367 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002368}
2369
Will Deacon45ae7cf2013-06-24 18:31:25 +01002370static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002371 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002372{
Will Deacon518f7132014-11-14 17:17:54 +00002373 phys_addr_t ret;
2374 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002375 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002376 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002377
Will Deacon518f7132014-11-14 17:17:54 +00002378 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002379 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002380
Will Deacon518f7132014-11-14 17:17:54 +00002381 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002382 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002383 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002384
Will Deacon518f7132014-11-14 17:17:54 +00002385 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002386}
2387
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002388/*
2389 * This function can sleep, and cannot be called from atomic context. Will
2390 * power on register block if required. This restriction does not apply to the
2391 * original iova_to_phys() op.
2392 */
2393static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2394 dma_addr_t iova)
2395{
2396 phys_addr_t ret = 0;
2397 unsigned long flags;
2398 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002399
Patrick Dalyad441dd2016-09-15 15:50:46 -07002400 if (smmu_domain->smmu->arch_ops &&
2401 smmu_domain->smmu->arch_ops->iova_to_phys_hard)
2402 return smmu_domain->smmu->arch_ops->iova_to_phys_hard(
2403 domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002404
2405 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2406 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2407 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002408 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002409
2410 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2411
2412 return ret;
2413}
2414
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002415static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002416{
Will Deacond0948942014-06-24 17:30:10 +01002417 switch (cap) {
2418 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002419 /*
2420 * Return true here as the SMMU can always send out coherent
2421 * requests.
2422 */
2423 return true;
Will Deacond0948942014-06-24 17:30:10 +01002424 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002425 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002426 case IOMMU_CAP_NOEXEC:
2427 return true;
Will Deacond0948942014-06-24 17:30:10 +01002428 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002429 return false;
Will Deacond0948942014-06-24 17:30:10 +01002430 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002431}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002432
Will Deacona9a1b0b2014-05-01 18:05:08 +01002433static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
2434{
2435 *((u16 *)data) = alias;
2436 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002437}
2438
Will Deacon8f68f8e2014-07-15 11:27:08 +01002439static void __arm_smmu_release_pci_iommudata(void *data)
2440{
2441 kfree(data);
2442}
2443
Joerg Roedelaf659932015-10-21 23:51:41 +02002444static int arm_smmu_init_pci_device(struct pci_dev *pdev,
2445 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002446{
Will Deacon03edb222015-01-19 14:27:33 +00002447 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02002448 u16 sid;
2449 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002450
Will Deacon03edb222015-01-19 14:27:33 +00002451 cfg = iommu_group_get_iommudata(group);
2452 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01002453 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002454 if (!cfg)
2455 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002456
Will Deacon03edb222015-01-19 14:27:33 +00002457 iommu_group_set_iommudata(group, cfg,
2458 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01002459 }
2460
Joerg Roedelaf659932015-10-21 23:51:41 +02002461 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
2462 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002463
Will Deacon03edb222015-01-19 14:27:33 +00002464 /*
2465 * Assume Stream ID == Requester ID for now.
2466 * We need a way to describe the ID mappings in FDT.
2467 */
2468 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
2469 for (i = 0; i < cfg->num_streamids; ++i)
2470 if (cfg->streamids[i] == sid)
2471 break;
2472
2473 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
Robin Murphy468f4942016-09-12 17:13:49 +01002474 if (i == cfg->num_streamids) {
2475 cfg->streamids[i] = sid;
2476 cfg->smendx[i] = INVALID_SMENDX;
2477 cfg->num_streamids++;
2478 }
Will Deacon03edb222015-01-19 14:27:33 +00002479
2480 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002481}
2482
Joerg Roedelaf659932015-10-21 23:51:41 +02002483static int arm_smmu_init_platform_device(struct device *dev,
2484 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00002485{
Will Deacon03edb222015-01-19 14:27:33 +00002486 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002487 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00002488
2489 if (!smmu)
2490 return -ENODEV;
2491
2492 master = find_smmu_master(smmu, dev->of_node);
2493 if (!master)
2494 return -ENODEV;
2495
Will Deacon03edb222015-01-19 14:27:33 +00002496 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002497
2498 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002499}
2500
2501static int arm_smmu_add_device(struct device *dev)
2502{
Joerg Roedelaf659932015-10-21 23:51:41 +02002503 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00002504
Joerg Roedelaf659932015-10-21 23:51:41 +02002505 group = iommu_group_get_for_dev(dev);
2506 if (IS_ERR(group))
2507 return PTR_ERR(group);
2508
Peng Fan9a4a9d82015-11-20 16:56:18 +08002509 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02002510 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002511}
2512
Will Deacon45ae7cf2013-06-24 18:31:25 +01002513static void arm_smmu_remove_device(struct device *dev)
2514{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002515 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002516}
2517
Joerg Roedelaf659932015-10-21 23:51:41 +02002518static struct iommu_group *arm_smmu_device_group(struct device *dev)
2519{
2520 struct iommu_group *group;
2521 int ret;
2522
2523 if (dev_is_pci(dev))
2524 group = pci_device_group(dev);
2525 else
2526 group = generic_device_group(dev);
2527
Patrick Daly26319442016-10-20 13:20:15 -07002528 if (IS_ERR_OR_NULL(group))
Joerg Roedelaf659932015-10-21 23:51:41 +02002529 return group;
2530
2531 if (dev_is_pci(dev))
2532 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
2533 else
2534 ret = arm_smmu_init_platform_device(dev, group);
2535
2536 if (ret) {
2537 iommu_group_put(group);
2538 group = ERR_PTR(ret);
2539 }
2540
2541 return group;
2542}
2543
Will Deaconc752ce42014-06-25 22:46:31 +01002544static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2545 enum iommu_attr attr, void *data)
2546{
Joerg Roedel1d672632015-03-26 13:43:10 +01002547 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002548 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002549
2550 switch (attr) {
2551 case DOMAIN_ATTR_NESTING:
2552 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2553 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002554 case DOMAIN_ATTR_PT_BASE_ADDR:
2555 *((phys_addr_t *)data) =
2556 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2557 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002558 case DOMAIN_ATTR_CONTEXT_BANK:
2559 /* context bank index isn't valid until we are attached */
2560 if (smmu_domain->smmu == NULL)
2561 return -ENODEV;
2562
2563 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2564 ret = 0;
2565 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002566 case DOMAIN_ATTR_TTBR0: {
2567 u64 val;
2568 struct arm_smmu_device *smmu = smmu_domain->smmu;
2569 /* not valid until we are attached */
2570 if (smmu == NULL)
2571 return -ENODEV;
2572
2573 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2574 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2575 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2576 << (TTBRn_ASID_SHIFT);
2577 *((u64 *)data) = val;
2578 ret = 0;
2579 break;
2580 }
2581 case DOMAIN_ATTR_CONTEXTIDR:
2582 /* not valid until attached */
2583 if (smmu_domain->smmu == NULL)
2584 return -ENODEV;
2585 *((u32 *)data) = smmu_domain->cfg.procid;
2586 ret = 0;
2587 break;
2588 case DOMAIN_ATTR_PROCID:
2589 *((u32 *)data) = smmu_domain->cfg.procid;
2590 ret = 0;
2591 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002592 case DOMAIN_ATTR_DYNAMIC:
2593 *((int *)data) = !!(smmu_domain->attributes
2594 & (1 << DOMAIN_ATTR_DYNAMIC));
2595 ret = 0;
2596 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002597 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2598 *((int *)data) = !!(smmu_domain->attributes
2599 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2600 ret = 0;
2601 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002602 case DOMAIN_ATTR_S1_BYPASS:
2603 *((int *)data) = !!(smmu_domain->attributes
2604 & (1 << DOMAIN_ATTR_S1_BYPASS));
2605 ret = 0;
2606 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002607 case DOMAIN_ATTR_SECURE_VMID:
2608 *((int *)data) = smmu_domain->secure_vmid;
2609 ret = 0;
2610 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002611 case DOMAIN_ATTR_PGTBL_INFO: {
2612 struct iommu_pgtbl_info *info = data;
2613
2614 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2615 ret = -ENODEV;
2616 break;
2617 }
2618 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2619 ret = 0;
2620 break;
2621 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002622 case DOMAIN_ATTR_FAST:
2623 *((int *)data) = !!(smmu_domain->attributes
2624 & (1 << DOMAIN_ATTR_FAST));
2625 ret = 0;
2626 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002627 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2628 *((int *)data) = !!(smmu_domain->attributes &
2629 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2630 ret = 0;
2631 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002632 default:
2633 return -ENODEV;
2634 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002635 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002636}
2637
2638static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2639 enum iommu_attr attr, void *data)
2640{
Will Deacon518f7132014-11-14 17:17:54 +00002641 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002642 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002643
Will Deacon518f7132014-11-14 17:17:54 +00002644 mutex_lock(&smmu_domain->init_mutex);
2645
Will Deaconc752ce42014-06-25 22:46:31 +01002646 switch (attr) {
2647 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002648 if (smmu_domain->smmu) {
2649 ret = -EPERM;
2650 goto out_unlock;
2651 }
2652
Will Deaconc752ce42014-06-25 22:46:31 +01002653 if (*(int *)data)
2654 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2655 else
2656 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2657
Will Deacon518f7132014-11-14 17:17:54 +00002658 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002659 case DOMAIN_ATTR_PROCID:
2660 if (smmu_domain->smmu != NULL) {
2661 dev_err(smmu_domain->smmu->dev,
2662 "cannot change procid attribute while attached\n");
2663 ret = -EBUSY;
2664 break;
2665 }
2666 smmu_domain->cfg.procid = *((u32 *)data);
2667 ret = 0;
2668 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002669 case DOMAIN_ATTR_DYNAMIC: {
2670 int dynamic = *((int *)data);
2671
2672 if (smmu_domain->smmu != NULL) {
2673 dev_err(smmu_domain->smmu->dev,
2674 "cannot change dynamic attribute while attached\n");
2675 ret = -EBUSY;
2676 break;
2677 }
2678
2679 if (dynamic)
2680 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2681 else
2682 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2683 ret = 0;
2684 break;
2685 }
2686 case DOMAIN_ATTR_CONTEXT_BANK:
2687 /* context bank can't be set while attached */
2688 if (smmu_domain->smmu != NULL) {
2689 ret = -EBUSY;
2690 break;
2691 }
2692 /* ... and it can only be set for dynamic contexts. */
2693 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2694 ret = -EINVAL;
2695 break;
2696 }
2697
2698 /* this will be validated during attach */
2699 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2700 ret = 0;
2701 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002702 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2703 u32 non_fatal_faults = *((int *)data);
2704
2705 if (non_fatal_faults)
2706 smmu_domain->attributes |=
2707 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2708 else
2709 smmu_domain->attributes &=
2710 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2711 ret = 0;
2712 break;
2713 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002714 case DOMAIN_ATTR_S1_BYPASS: {
2715 int bypass = *((int *)data);
2716
2717 /* bypass can't be changed while attached */
2718 if (smmu_domain->smmu != NULL) {
2719 ret = -EBUSY;
2720 break;
2721 }
2722 if (bypass)
2723 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2724 else
2725 smmu_domain->attributes &=
2726 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2727
2728 ret = 0;
2729 break;
2730 }
Patrick Daly8befb662016-08-17 20:03:28 -07002731 case DOMAIN_ATTR_ATOMIC:
2732 {
2733 int atomic_ctx = *((int *)data);
2734
2735 /* can't be changed while attached */
2736 if (smmu_domain->smmu != NULL) {
2737 ret = -EBUSY;
2738 break;
2739 }
2740 if (atomic_ctx)
2741 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2742 else
2743 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2744 break;
2745 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002746 case DOMAIN_ATTR_SECURE_VMID:
2747 if (smmu_domain->secure_vmid != VMID_INVAL) {
2748 ret = -ENODEV;
2749 WARN(1, "secure vmid already set!");
2750 break;
2751 }
2752 smmu_domain->secure_vmid = *((int *)data);
2753 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002754 case DOMAIN_ATTR_FAST:
2755 if (*((int *)data))
2756 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2757 ret = 0;
2758 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002759 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2760 /* can't be changed while attached */
2761 if (smmu_domain->smmu != NULL) {
2762 ret = -EBUSY;
2763 break;
2764 }
2765 if (*((int *)data))
2766 smmu_domain->attributes |=
2767 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
2768 ret = 0;
2769 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002770 default:
Will Deacon518f7132014-11-14 17:17:54 +00002771 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002772 }
Will Deacon518f7132014-11-14 17:17:54 +00002773
2774out_unlock:
2775 mutex_unlock(&smmu_domain->init_mutex);
2776 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002777}
2778
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002779static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2780 unsigned long flags)
2781{
2782 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2783 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2784 struct arm_smmu_device *smmu;
2785 void __iomem *cb_base;
2786
2787 if (!smmu_domain->smmu) {
2788 pr_err("Can't trigger faults on non-attached domains\n");
2789 return;
2790 }
2791
2792 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002793 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002794 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002795
2796 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2797 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2798 flags, cfg->cbndx);
2799 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002800 /* give the interrupt time to fire... */
2801 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002802
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002803 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002804}
2805
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002806static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2807 unsigned long offset)
2808{
2809 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2810 struct arm_smmu_device *smmu;
2811 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2812 void __iomem *cb_base;
2813 unsigned long val;
2814
2815 if (offset >= SZ_4K) {
2816 pr_err("Invalid offset: 0x%lx\n", offset);
2817 return 0;
2818 }
2819
2820 smmu = smmu_domain->smmu;
2821 if (!smmu) {
2822 WARN(1, "Can't read registers of a detached domain\n");
2823 val = 0;
2824 return val;
2825 }
2826
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002827 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002828 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002829
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002830 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2831 val = readl_relaxed(cb_base + offset);
2832
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002833 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002834 return val;
2835}
2836
2837static void arm_smmu_reg_write(struct iommu_domain *domain,
2838 unsigned long offset, unsigned long val)
2839{
2840 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2841 struct arm_smmu_device *smmu;
2842 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2843 void __iomem *cb_base;
2844
2845 if (offset >= SZ_4K) {
2846 pr_err("Invalid offset: 0x%lx\n", offset);
2847 return;
2848 }
2849
2850 smmu = smmu_domain->smmu;
2851 if (!smmu) {
2852 WARN(1, "Can't read registers of a detached domain\n");
2853 return;
2854 }
2855
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002856 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002857 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002858
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002859 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2860 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002861
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002862 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002863}
2864
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002865static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
2866{
2867 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
2868}
2869
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002870static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
2871{
2872 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2873
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002874 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002875}
2876
2877static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
2878{
2879 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2880
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002881 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002882}
2883
Will Deacon518f7132014-11-14 17:17:54 +00002884static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002885 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002886 .domain_alloc = arm_smmu_domain_alloc,
2887 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002888 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002889 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002890 .map = arm_smmu_map,
2891 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002892 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002893 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002894 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002895 .add_device = arm_smmu_add_device,
2896 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002897 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002898 .domain_get_attr = arm_smmu_domain_get_attr,
2899 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00002900 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002901 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002902 .reg_read = arm_smmu_reg_read,
2903 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002904 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002905 .enable_config_clocks = arm_smmu_enable_config_clocks,
2906 .disable_config_clocks = arm_smmu_disable_config_clocks,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002907};
2908
Patrick Dalyad441dd2016-09-15 15:50:46 -07002909#define IMPL_DEF1_MICRO_MMU_CTRL 0
2910#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
2911#define MICRO_MMU_CTRL_IDLE (1 << 3)
2912
2913/* Definitions for implementation-defined registers */
2914#define ACTLR_QCOM_OSH_SHIFT 28
2915#define ACTLR_QCOM_OSH 1
2916
2917#define ACTLR_QCOM_ISH_SHIFT 29
2918#define ACTLR_QCOM_ISH 1
2919
2920#define ACTLR_QCOM_NSH_SHIFT 30
2921#define ACTLR_QCOM_NSH 1
2922
2923static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002924{
2925 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002926 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002927
2928 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2929 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2930 0, 30000)) {
2931 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2932 return -EBUSY;
2933 }
2934
2935 return 0;
2936}
2937
Patrick Dalyad441dd2016-09-15 15:50:46 -07002938static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002939{
2940 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2941 u32 reg;
2942
2943 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2944 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2945 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2946
Patrick Dalyad441dd2016-09-15 15:50:46 -07002947 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002948}
2949
Patrick Dalyad441dd2016-09-15 15:50:46 -07002950static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002951{
Patrick Dalyad441dd2016-09-15 15:50:46 -07002952 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002953}
2954
Patrick Dalyad441dd2016-09-15 15:50:46 -07002955static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002956{
Patrick Dalyad441dd2016-09-15 15:50:46 -07002957 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002958}
2959
Patrick Dalyad441dd2016-09-15 15:50:46 -07002960static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002961{
2962 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2963 u32 reg;
2964
2965 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2966 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2967 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2968}
2969
Patrick Dalyad441dd2016-09-15 15:50:46 -07002970static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002971{
2972 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002973 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002974 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002975 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002976
Patrick Dalyad441dd2016-09-15 15:50:46 -07002977 /*
2978 * SCTLR.M must be disabled here per ARM SMMUv2 spec
2979 * to prevent table walks with an inconsistent state.
2980 */
2981 for (i = 0; i < smmu->num_context_banks; ++i) {
2982 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2983 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2984 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2985 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2986 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
2987 }
2988
2989 /* Program implementation defined registers */
2990 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002991 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2992 writel_relaxed(regs[i].value,
2993 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002994 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002995}
2996
Patrick Dalyad441dd2016-09-15 15:50:46 -07002997static phys_addr_t __qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
2998 dma_addr_t iova, bool halt)
2999{
3000 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3001 struct arm_smmu_device *smmu = smmu_domain->smmu;
3002 int ret;
3003 phys_addr_t phys = 0;
3004 unsigned long flags;
3005
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003006 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003007 if (ret)
3008 return 0;
3009
3010 if (halt) {
3011 ret = qsmmuv2_halt(smmu);
3012 if (ret)
3013 goto out_power_off;
3014 }
3015
3016 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
3017 spin_lock(&smmu->atos_lock);
3018 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
3019 spin_unlock(&smmu->atos_lock);
3020 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
3021
3022 if (halt)
3023 qsmmuv2_resume(smmu);
3024
3025out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003026 arm_smmu_power_off(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003027 return phys;
3028}
3029
3030static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
3031 dma_addr_t iova)
3032{
3033 return __qsmmuv2_iova_to_phys_hard(domain, iova, true);
3034}
3035
3036static void qsmmuv2_iova_to_phys_fault(
3037 struct iommu_domain *domain,
3038 dma_addr_t iova, phys_addr_t *phys,
3039 phys_addr_t *phys_post_tlbiall)
3040{
3041 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
3042 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
3043 struct arm_smmu_device *smmu;
3044 void __iomem *cb_base;
3045 u64 sctlr, sctlr_orig;
3046 u32 fsr;
3047
3048 smmu = smmu_domain->smmu;
3049 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
3050
3051 qsmmuv2_halt_nowait(smmu);
3052
3053 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
3054
3055 qsmmuv2_wait_for_halt(smmu);
3056
3057 /* clear FSR to allow ATOS to log any faults */
3058 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
3059 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
3060
3061 /* disable stall mode momentarily */
3062 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3063 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3064 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3065
3066 *phys = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
3067 arm_smmu_tlb_inv_context(smmu_domain);
3068 *phys_post_tlbiall = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
3069
3070 /* restore SCTLR */
3071 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3072
3073 qsmmuv2_resume(smmu);
3074}
3075
3076struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3077 .device_reset = qsmmuv2_device_reset,
3078 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
3079 .iova_to_phys_fault = qsmmuv2_iova_to_phys_fault,
3080};
3081
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003082static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003083{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003084 int i;
3085 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003086 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003087 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003088
Peng Fan3ca37122016-05-03 21:50:30 +08003089 /*
3090 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3091 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3092 * bit is only present in MMU-500r2 onwards.
3093 */
3094 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3095 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3096 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3097 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3098 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3099 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3100 }
3101
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003102 /* Make sure all context banks are disabled and clear CB_FSR */
3103 for (i = 0; i < smmu->num_context_banks; ++i) {
3104 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3105 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3106 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003107 /*
3108 * Disable MMU-500's not-particularly-beneficial next-page
3109 * prefetcher for the sake of errata #841119 and #826419.
3110 */
3111 if (smmu->model == ARM_MMU500) {
3112 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3113 reg &= ~ARM_MMU500_ACTLR_CPRE;
3114 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3115 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003116 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003117}
3118
3119static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3120{
3121 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003122 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003123 u32 reg;
3124
3125 /* clear global FSR */
3126 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3127 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3128
Robin Murphy468f4942016-09-12 17:13:49 +01003129 /*
3130 * Reset stream mapping groups: Initial values mark all SMRn as
3131 * invalid and all S2CRn as bypass unless overridden.
3132 */
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003133 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003134 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Robin Murphy468f4942016-09-12 17:13:49 +01003135 reg = disable_bypass ? S2CR_TYPE_FAULT
3136 : S2CR_TYPE_BYPASS;
3137 if (smmu->smrs)
3138 arm_smmu_write_smr(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003139 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
3140 }
3141
3142 arm_smmu_context_bank_reset(smmu);
3143 }
Will Deacon1463fe42013-07-31 19:21:27 +01003144
Will Deacon45ae7cf2013-06-24 18:31:25 +01003145 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003146 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3147 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3148
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003149 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003150
Will Deacon45ae7cf2013-06-24 18:31:25 +01003151 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003152 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003153
3154 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003155 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003156
Robin Murphy25a1c962016-02-10 14:25:33 +00003157 /* Enable client access, handling unmatched streams as appropriate */
3158 reg &= ~sCR0_CLIENTPD;
3159 if (disable_bypass)
3160 reg |= sCR0_USFCFG;
3161 else
3162 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003163
3164 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003165 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003166
3167 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003168 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003169
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003170 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3171 reg |= sCR0_VMID16EN;
3172
Will Deacon45ae7cf2013-06-24 18:31:25 +01003173 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003174 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003175 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003176
3177 /* Manage any implementation defined features */
3178 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003179}
3180
3181static int arm_smmu_id_size_to_bits(int size)
3182{
3183 switch (size) {
3184 case 0:
3185 return 32;
3186 case 1:
3187 return 36;
3188 case 2:
3189 return 40;
3190 case 3:
3191 return 42;
3192 case 4:
3193 return 44;
3194 case 5:
3195 default:
3196 return 48;
3197 }
3198}
3199
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003200static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3201{
3202 struct device *dev = smmu->dev;
3203 int i, ntuples, ret;
3204 u32 *tuples;
3205 struct arm_smmu_impl_def_reg *regs, *regit;
3206
3207 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3208 return 0;
3209
3210 ntuples /= sizeof(u32);
3211 if (ntuples % 2) {
3212 dev_err(dev,
3213 "Invalid number of attach-impl-defs registers: %d\n",
3214 ntuples);
3215 return -EINVAL;
3216 }
3217
3218 regs = devm_kmalloc(
3219 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3220 GFP_KERNEL);
3221 if (!regs)
3222 return -ENOMEM;
3223
3224 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3225 if (!tuples)
3226 return -ENOMEM;
3227
3228 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3229 tuples, ntuples);
3230 if (ret)
3231 return ret;
3232
3233 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3234 regit->offset = tuples[i];
3235 regit->value = tuples[i + 1];
3236 }
3237
3238 devm_kfree(dev, tuples);
3239
3240 smmu->impl_def_attach_registers = regs;
3241 smmu->num_impl_def_attach_registers = ntuples / 2;
3242
3243 return 0;
3244}
3245
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003246
3247static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003248{
3249 const char *cname;
3250 struct property *prop;
3251 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003252 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003253
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003254 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003255 of_property_count_strings(dev->of_node, "clock-names");
3256
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003257 if (pwr->num_clocks < 1) {
3258 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003259 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003260 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003261
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003262 pwr->clocks = devm_kzalloc(
3263 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003264 GFP_KERNEL);
3265
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003266 if (!pwr->clocks)
3267 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003268
3269 i = 0;
3270 of_property_for_each_string(dev->of_node, "clock-names",
3271 prop, cname) {
3272 struct clk *c = devm_clk_get(dev, cname);
3273
3274 if (IS_ERR(c)) {
3275 dev_err(dev, "Couldn't get clock: %s",
3276 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003277 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003278 }
3279
3280 if (clk_get_rate(c) == 0) {
3281 long rate = clk_round_rate(c, 1000);
3282
3283 clk_set_rate(c, rate);
3284 }
3285
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003286 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003287
3288 ++i;
3289 }
3290 return 0;
3291}
3292
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003293static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003294{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003295 const char *cname;
3296 struct property *prop;
3297 int i, ret = 0;
3298 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003299
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003300 pwr->num_gdscs =
3301 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3302
3303 if (pwr->num_gdscs < 1) {
3304 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003305 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003306 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003307
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003308 pwr->gdscs = devm_kzalloc(
3309 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3310
3311 if (!pwr->gdscs)
3312 return -ENOMEM;
3313
3314 i = 0;
3315 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3316 prop, cname)
3317 pwr->gdscs[i].supply = cname;
3318
3319 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3320 return ret;
3321}
3322
3323static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3324{
3325 struct device *dev = pwr->dev;
3326
3327 /* We don't want the bus APIs to print an error message */
3328 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3329 dev_dbg(dev, "No bus scaling info\n");
3330 return 0;
3331 }
3332
3333 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3334 if (!pwr->bus_dt_data) {
3335 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3336 return -EINVAL;
3337 }
3338
3339 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3340 if (!pwr->bus_client) {
3341 dev_err(dev, "Bus client registration failed\n");
3342 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
3343 return -EINVAL;
3344 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003345
3346 return 0;
3347}
3348
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003349/*
3350 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3351 */
3352static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3353 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003354{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003355 struct arm_smmu_power_resources *pwr;
3356 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003357
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003358 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3359 if (!pwr)
3360 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003361
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003362 pwr->dev = &pdev->dev;
3363 pwr->pdev = pdev;
3364 mutex_init(&pwr->power_lock);
3365 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003366
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003367 ret = arm_smmu_init_clocks(pwr);
3368 if (ret)
3369 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003370
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003371 ret = arm_smmu_init_regulators(pwr);
3372 if (ret)
3373 return ERR_PTR(ret);
3374
3375 ret = arm_smmu_init_bus_scaling(pwr);
3376 if (ret)
3377 return ERR_PTR(ret);
3378
3379 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003380}
3381
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003382/*
3383 * Bus APIs are not devm-safe.
3384 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003385static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003386{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003387 msm_bus_scale_unregister_client(pwr->bus_client);
3388 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003389}
3390
Will Deacon45ae7cf2013-06-24 18:31:25 +01003391static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3392{
3393 unsigned long size;
3394 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3395 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003396 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003397
Mitchel Humpherysba822582015-10-20 11:37:41 -07003398 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3399 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003400 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003401
3402 /* ID0 */
3403 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003404
3405 /* Restrict available stages based on module parameter */
3406 if (force_stage == 1)
3407 id &= ~(ID0_S2TS | ID0_NTS);
3408 else if (force_stage == 2)
3409 id &= ~(ID0_S1TS | ID0_NTS);
3410
Will Deacon45ae7cf2013-06-24 18:31:25 +01003411 if (id & ID0_S1TS) {
3412 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003413 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003414 }
3415
3416 if (id & ID0_S2TS) {
3417 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003418 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003419 }
3420
3421 if (id & ID0_NTS) {
3422 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003423 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003424 }
3425
3426 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003427 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003428 dev_err(smmu->dev, "\tno translation support!\n");
3429 return -ENODEV;
3430 }
3431
Robin Murphyb7862e32016-04-13 18:13:03 +01003432 if ((id & ID0_S1TS) &&
3433 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003434 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003435 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003436 }
3437
Robin Murphybae2c2d2015-07-29 19:46:05 +01003438 /*
3439 * In order for DMA API calls to work properly, we must defer to what
3440 * the DT says about coherency, regardless of what the hardware claims.
3441 * Fortunately, this also opens up a workaround for systems where the
3442 * ID register value has ended up configured incorrectly.
3443 */
3444 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3445 cttw_reg = !!(id & ID0_CTTW);
3446 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003447 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003448 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003449 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003450 cttw_dt ? "" : "non-");
3451 if (cttw_dt != cttw_reg)
3452 dev_notice(smmu->dev,
3453 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003454
Robin Murphy53867802016-09-12 17:13:48 +01003455 /* Max. number of entries we have for stream matching/indexing */
3456 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3457 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003458 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003459 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003460
3461 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003462 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3463 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003464 dev_err(smmu->dev,
3465 "stream-matching supported, but no SMRs present!\n");
3466 return -ENODEV;
3467 }
3468
Robin Murphy53867802016-09-12 17:13:48 +01003469 /*
3470 * SMR.ID bits may not be preserved if the corresponding MASK
3471 * bits are set, so check each one separately. We can reject
3472 * masters later if they try to claim IDs outside these masks.
3473 */
3474 smr = smmu->streamid_mask << SMR_ID_SHIFT;
3475 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3476 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3477 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003478
Robin Murphy53867802016-09-12 17:13:48 +01003479 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
3480 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3481 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3482 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003483
Robin Murphy468f4942016-09-12 17:13:49 +01003484 /* Zero-initialised to mark as invalid */
3485 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3486 GFP_KERNEL);
3487 if (!smmu->smrs)
3488 return -ENOMEM;
3489
Robin Murphy53867802016-09-12 17:13:48 +01003490 dev_notice(smmu->dev,
3491 "\tstream matching with %lu register groups, mask 0x%x",
3492 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003493 }
Robin Murphy53867802016-09-12 17:13:48 +01003494 smmu->num_mapping_groups = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003495
Robin Murphy7602b872016-04-28 17:12:09 +01003496 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3497 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3498 if (!(id & ID0_PTFS_NO_AARCH32S))
3499 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3500 }
3501
Will Deacon45ae7cf2013-06-24 18:31:25 +01003502 /* ID1 */
3503 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003504 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003505
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003506 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003507 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003508 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003509 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003510 dev_warn(smmu->dev,
3511 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3512 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003513
Will Deacon518f7132014-11-14 17:17:54 +00003514 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003515 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3516 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3517 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3518 return -ENODEV;
3519 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003520 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003521 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003522 /*
3523 * Cavium CN88xx erratum #27704.
3524 * Ensure ASID and VMID allocation is unique across all SMMUs in
3525 * the system.
3526 */
3527 if (smmu->model == CAVIUM_SMMUV2) {
3528 smmu->cavium_id_base =
3529 atomic_add_return(smmu->num_context_banks,
3530 &cavium_smmu_context_count);
3531 smmu->cavium_id_base -= smmu->num_context_banks;
3532 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003533
3534 /* ID2 */
3535 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3536 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003537 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003538
Will Deacon518f7132014-11-14 17:17:54 +00003539 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003540 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003541 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003542
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003543 if (id & ID2_VMID16)
3544 smmu->features |= ARM_SMMU_FEAT_VMID16;
3545
Robin Murphyf1d84542015-03-04 16:41:05 +00003546 /*
3547 * What the page table walker can address actually depends on which
3548 * descriptor format is in use, but since a) we don't know that yet,
3549 * and b) it can vary per context bank, this will have to do...
3550 */
3551 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3552 dev_warn(smmu->dev,
3553 "failed to set DMA mask for table walker\n");
3554
Robin Murphyb7862e32016-04-13 18:13:03 +01003555 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003556 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003557 if (smmu->version == ARM_SMMU_V1_64K)
3558 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003559 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003560 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003561 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003562 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003563 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003564 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003565 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003566 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003567 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003568 }
3569
Robin Murphy7602b872016-04-28 17:12:09 +01003570 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003571 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003572 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003573 if (smmu->features &
3574 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003575 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003576 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003577 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003578 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003579 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003580
Robin Murphyd5466352016-05-09 17:20:09 +01003581 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3582 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3583 else
3584 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003585 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003586 smmu->pgsize_bitmap);
3587
Will Deacon518f7132014-11-14 17:17:54 +00003588
Will Deacon28d60072014-09-01 16:24:48 +01003589 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003590 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3591 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003592
3593 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003594 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3595 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003596
Will Deacon45ae7cf2013-06-24 18:31:25 +01003597 return 0;
3598}
3599
Patrick Dalyd7476202016-09-08 18:23:28 -07003600static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
3601{
3602 if (!smmu->arch_ops)
3603 return 0;
3604 if (!smmu->arch_ops->init)
3605 return 0;
3606 return smmu->arch_ops->init(smmu);
3607}
3608
3609static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
3610{
3611 if (!smmu->arch_ops)
3612 return;
3613 if (!smmu->arch_ops->device_reset)
3614 return;
3615 return smmu->arch_ops->device_reset(smmu);
3616}
3617
Robin Murphy67b65a32016-04-13 18:12:57 +01003618struct arm_smmu_match_data {
3619 enum arm_smmu_arch_version version;
3620 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003621 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01003622};
3623
Patrick Dalyd7476202016-09-08 18:23:28 -07003624#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
3625static struct arm_smmu_match_data name = { \
3626.version = ver, \
3627.model = imp, \
3628.arch_ops = ops, \
3629} \
Robin Murphy67b65a32016-04-13 18:12:57 +01003630
Patrick Daly1f8a2882016-09-12 17:32:05 -07003631struct arm_smmu_arch_ops qsmmuv500_arch_ops;
3632
Patrick Dalyd7476202016-09-08 18:23:28 -07003633ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
3634ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
3635ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
3636ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
3637ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003638ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07003639ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
3640 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01003641
Joerg Roedel09b52692014-10-02 12:24:45 +02003642static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003643 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3644 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3645 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003646 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003647 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003648 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003649 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07003650 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01003651 { },
3652};
3653MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3654
Patrick Daly1f8a2882016-09-12 17:32:05 -07003655static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003656static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3657{
Robin Murphy09360402014-08-28 17:51:59 +01003658 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01003659 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003660 struct resource *res;
3661 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003662 struct device *dev = &pdev->dev;
3663 struct rb_node *node;
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -07003664 int num_irqs, i, err, num_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003665
3666 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3667 if (!smmu) {
3668 dev_err(dev, "failed to allocate arm_smmu_device\n");
3669 return -ENOMEM;
3670 }
3671 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003672 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003673 idr_init(&smmu->asid_idr);
3674 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003675
Robin Murphy09360402014-08-28 17:51:59 +01003676 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01003677 data = of_id->data;
3678 smmu->version = data->version;
3679 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003680 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01003681
Will Deacon45ae7cf2013-06-24 18:31:25 +01003682 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003683 smmu->base = devm_ioremap_resource(dev, res);
3684 if (IS_ERR(smmu->base))
3685 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003686 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003687
3688 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3689 &smmu->num_global_irqs)) {
3690 dev_err(dev, "missing #global-interrupts property\n");
3691 return -ENODEV;
3692 }
3693
3694 num_irqs = 0;
3695 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3696 num_irqs++;
3697 if (num_irqs > smmu->num_global_irqs)
3698 smmu->num_context_irqs++;
3699 }
3700
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003701 if (!smmu->num_context_irqs) {
3702 dev_err(dev, "found %d interrupts but expected at least %d\n",
3703 num_irqs, smmu->num_global_irqs + 1);
3704 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003705 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003706
3707 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3708 GFP_KERNEL);
3709 if (!smmu->irqs) {
3710 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3711 return -ENOMEM;
3712 }
3713
3714 for (i = 0; i < num_irqs; ++i) {
3715 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003716
Will Deacon45ae7cf2013-06-24 18:31:25 +01003717 if (irq < 0) {
3718 dev_err(dev, "failed to get irq index %d\n", i);
3719 return -ENODEV;
3720 }
3721 smmu->irqs[i] = irq;
3722 }
3723
Dhaval Patel031d7462015-05-09 14:47:29 -07003724 parse_driver_options(smmu);
3725
Olav Haugan3c8766d2014-08-22 17:12:32 -07003726
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003727 smmu->pwr = arm_smmu_init_power_resources(pdev);
3728 if (IS_ERR(smmu->pwr))
3729 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003730
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003731 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003732 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003733 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003734
3735 err = arm_smmu_device_cfg_probe(smmu);
3736 if (err)
3737 goto out_power_off;
3738
Will Deacon45ae7cf2013-06-24 18:31:25 +01003739 i = 0;
3740 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003741
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -07003742 err = arm_smmu_parse_iommus_properties(smmu, &num_masters);
3743 if (err)
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003744 goto out_put_masters;
3745
Mitchel Humpherysba822582015-10-20 11:37:41 -07003746 dev_dbg(dev, "registered %d master devices\n", num_masters);
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003747
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003748 err = arm_smmu_parse_impl_def_registers(smmu);
3749 if (err)
3750 goto out_put_masters;
3751
Robin Murphyb7862e32016-04-13 18:13:03 +01003752 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003753 smmu->num_context_banks != smmu->num_context_irqs) {
3754 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08003755 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
3756 smmu->num_context_irqs, smmu->num_context_banks,
3757 smmu->num_context_banks);
3758 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003759 }
3760
Will Deacon45ae7cf2013-06-24 18:31:25 +01003761 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003762 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3763 NULL, arm_smmu_global_fault,
3764 IRQF_ONESHOT | IRQF_SHARED,
3765 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003766 if (err) {
3767 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3768 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08003769 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003770 }
3771 }
3772
3773 INIT_LIST_HEAD(&smmu->list);
3774 spin_lock(&arm_smmu_devices_lock);
3775 list_add(&smmu->list, &arm_smmu_devices);
3776 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01003777
Patrick Dalyd7476202016-09-08 18:23:28 -07003778 err = arm_smmu_arch_init(smmu);
3779 if (err)
3780 goto out_put_masters;
3781
Will Deaconfd90cec2013-08-21 13:56:34 +01003782 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003783 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07003784
Will Deacon45ae7cf2013-06-24 18:31:25 +01003785 return 0;
3786
Will Deacon45ae7cf2013-06-24 18:31:25 +01003787out_put_masters:
3788 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003789 struct arm_smmu_master *master
3790 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003791 of_node_put(master->of_node);
3792 }
3793
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003794out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003795 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003796
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003797out_exit_power_resources:
3798 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003799
Will Deacon45ae7cf2013-06-24 18:31:25 +01003800 return err;
3801}
3802
3803static int arm_smmu_device_remove(struct platform_device *pdev)
3804{
Will Deacon45ae7cf2013-06-24 18:31:25 +01003805 struct device *dev = &pdev->dev;
3806 struct arm_smmu_device *curr, *smmu = NULL;
3807 struct rb_node *node;
3808
3809 spin_lock(&arm_smmu_devices_lock);
3810 list_for_each_entry(curr, &arm_smmu_devices, list) {
3811 if (curr->dev == dev) {
3812 smmu = curr;
3813 list_del(&smmu->list);
3814 break;
3815 }
3816 }
3817 spin_unlock(&arm_smmu_devices_lock);
3818
3819 if (!smmu)
3820 return -ENODEV;
3821
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003822 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003823 return -EINVAL;
3824
Will Deacon45ae7cf2013-06-24 18:31:25 +01003825 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003826 struct arm_smmu_master *master
3827 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003828 of_node_put(master->of_node);
3829 }
3830
Will Deaconecfadb62013-07-31 19:21:28 +01003831 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003832 dev_err(dev, "removing device with active domains!\n");
3833
Patrick Dalyc190d932016-08-30 17:23:28 -07003834 idr_destroy(&smmu->asid_idr);
3835
Will Deacon45ae7cf2013-06-24 18:31:25 +01003836 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003837 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003838 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003839
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003840 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003841
Will Deacon45ae7cf2013-06-24 18:31:25 +01003842 return 0;
3843}
3844
Will Deacon45ae7cf2013-06-24 18:31:25 +01003845static struct platform_driver arm_smmu_driver = {
3846 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003847 .name = "arm-smmu",
3848 .of_match_table = of_match_ptr(arm_smmu_of_match),
3849 },
3850 .probe = arm_smmu_device_dt_probe,
3851 .remove = arm_smmu_device_remove,
3852};
3853
3854static int __init arm_smmu_init(void)
3855{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003856 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003857 int ret;
3858
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003859 /*
3860 * Play nice with systems that don't have an ARM SMMU by checking that
3861 * an ARM SMMU exists in the system before proceeding with the driver
3862 * and IOMMU bus operation registration.
3863 */
3864 np = of_find_matching_node(NULL, arm_smmu_of_match);
3865 if (!np)
3866 return 0;
3867
3868 of_node_put(np);
3869
Will Deacon45ae7cf2013-06-24 18:31:25 +01003870 ret = platform_driver_register(&arm_smmu_driver);
3871 if (ret)
3872 return ret;
3873
3874 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01003875 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003876 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3877
Will Deacond123cf82014-02-04 22:17:53 +00003878#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01003879 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003880 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00003881#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003882
Will Deacona9a1b0b2014-05-01 18:05:08 +01003883#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08003884 if (!iommu_present(&pci_bus_type)) {
3885 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01003886 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08003887 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01003888#endif
3889
Will Deacon45ae7cf2013-06-24 18:31:25 +01003890 return 0;
3891}
3892
3893static void __exit arm_smmu_exit(void)
3894{
3895 return platform_driver_unregister(&arm_smmu_driver);
3896}
3897
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003898subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003899module_exit(arm_smmu_exit);
3900
Patrick Daly1f8a2882016-09-12 17:32:05 -07003901#define DEBUG_SID_HALT_REG 0x0
3902#define DEBUG_SID_HALT_VAL (0x1 << 16)
3903
3904#define DEBUG_SR_HALT_ACK_REG 0x20
3905#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
3906
3907#define TBU_DBG_TIMEOUT_US 30000
3908
3909struct qsmmuv500_tbu_device {
3910 struct list_head list;
3911 struct device *dev;
3912 struct arm_smmu_device *smmu;
3913 void __iomem *base;
3914 void __iomem *status_reg;
3915
3916 struct arm_smmu_power_resources *pwr;
3917
3918 /* Protects halt count */
3919 spinlock_t halt_lock;
3920 u32 halt_count;
3921};
3922
3923static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
3924{
3925 struct qsmmuv500_tbu_device *tbu;
3926 struct list_head *list = smmu->archdata;
3927 int ret = 0;
3928
3929 list_for_each_entry(tbu, list, list) {
3930 ret = arm_smmu_power_on(tbu->pwr);
3931 if (ret)
3932 break;
3933 }
3934 if (!ret)
3935 return 0;
3936
3937 list_for_each_entry_continue_reverse(tbu, list, list) {
3938 arm_smmu_power_off(tbu->pwr);
3939 }
3940 return ret;
3941}
3942
3943static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
3944{
3945 struct qsmmuv500_tbu_device *tbu;
3946 struct list_head *list = smmu->archdata;
3947
3948 list_for_each_entry_reverse(tbu, list, list) {
3949 arm_smmu_power_off(tbu->pwr);
3950 }
3951}
3952
3953static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
3954{
3955 unsigned long flags;
3956 u32 val;
3957 void __iomem *base;
3958
3959 spin_lock_irqsave(&tbu->halt_lock, flags);
3960 if (tbu->halt_count) {
3961 tbu->halt_count++;
3962 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3963 return 0;
3964 }
3965
3966 base = tbu->base;
3967 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
3968 val |= DEBUG_SID_HALT_VAL;
3969 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
3970
3971 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
3972 val, (val & DEBUG_SR_HALT_ACK_VAL),
3973 0, TBU_DBG_TIMEOUT_US)) {
3974 dev_err(tbu->dev, "Couldn't halt TBU!\n");
3975 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3976 return -ETIMEDOUT;
3977 }
3978
3979 tbu->halt_count = 1;
3980 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3981 return 0;
3982}
3983
3984static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
3985{
3986 unsigned long flags;
3987 u32 val;
3988 void __iomem *base;
3989
3990 spin_lock_irqsave(&tbu->halt_lock, flags);
3991 if (!tbu->halt_count) {
3992 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
3993 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3994 return;
3995
3996 } else if (tbu->halt_count > 1) {
3997 tbu->halt_count--;
3998 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3999 return;
4000 }
4001
4002 base = tbu->base;
4003 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
4004 val &= ~DEBUG_SID_HALT_VAL;
4005 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
4006
4007 tbu->halt_count = 0;
4008 spin_unlock_irqrestore(&tbu->halt_lock, flags);
4009}
4010
4011static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
4012{
4013 struct qsmmuv500_tbu_device *tbu;
4014 struct list_head *list = smmu->archdata;
4015 int ret = 0;
4016
4017 list_for_each_entry(tbu, list, list) {
4018 ret = qsmmuv500_tbu_halt(tbu);
4019 if (ret)
4020 break;
4021 }
4022
4023 if (!ret)
4024 return 0;
4025
4026 list_for_each_entry_continue_reverse(tbu, list, list) {
4027 qsmmuv500_tbu_resume(tbu);
4028 }
4029 return ret;
4030}
4031
4032static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
4033{
4034 struct qsmmuv500_tbu_device *tbu;
4035 struct list_head *list = smmu->archdata;
4036
4037 list_for_each_entry(tbu, list, list) {
4038 qsmmuv500_tbu_resume(tbu);
4039 }
4040}
4041
4042static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
4043{
4044 int i, ret;
4045 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
4046
4047 ret = qsmmuv500_tbu_power_on_all(smmu);
4048 if (ret)
4049 return;
4050
4051 /* Program implementation defined registers */
4052 qsmmuv500_halt_all(smmu);
4053 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
4054 writel_relaxed(regs[i].value,
4055 ARM_SMMU_GR0(smmu) + regs[i].offset);
4056 qsmmuv500_resume_all(smmu);
4057 qsmmuv500_tbu_power_off_all(smmu);
4058}
4059
4060static int qsmmuv500_tbu_register(struct device *dev, void *data)
4061{
4062 struct arm_smmu_device *smmu = data;
4063 struct qsmmuv500_tbu_device *tbu;
4064 struct list_head *list = smmu->archdata;
4065
4066 if (!dev->driver) {
4067 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4068 return -EINVAL;
4069 }
4070
4071 tbu = dev_get_drvdata(dev);
4072
4073 INIT_LIST_HEAD(&tbu->list);
4074 tbu->smmu = smmu;
4075 list_add(&tbu->list, list);
4076 return 0;
4077}
4078
4079static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4080{
4081 struct device *dev = smmu->dev;
4082 struct list_head *list;
4083 int ret;
4084
4085 list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
4086 if (!list)
4087 return -ENOMEM;
4088
4089 INIT_LIST_HEAD(list);
4090 smmu->archdata = list;
4091
4092 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4093 if (ret)
4094 return ret;
4095
4096 /* Attempt to register child devices */
4097 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
4098 if (ret)
4099 return -EINVAL;
4100
4101 return 0;
4102}
4103
4104struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
4105 .init = qsmmuv500_arch_init,
4106 .device_reset = qsmmuv500_device_reset,
4107};
4108
4109static const struct of_device_id qsmmuv500_tbu_of_match[] = {
4110 {.compatible = "qcom,qsmmuv500-tbu"},
4111 {}
4112};
4113
4114static int qsmmuv500_tbu_probe(struct platform_device *pdev)
4115{
4116 struct resource *res;
4117 struct device *dev = &pdev->dev;
4118 struct qsmmuv500_tbu_device *tbu;
4119
4120 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
4121 if (!tbu)
4122 return -ENOMEM;
4123
4124 INIT_LIST_HEAD(&tbu->list);
4125 tbu->dev = dev;
4126 spin_lock_init(&tbu->halt_lock);
4127
4128 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
4129 tbu->base = devm_ioremap_resource(dev, res);
4130 if (IS_ERR(tbu->base))
4131 return PTR_ERR(tbu->base);
4132
4133 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
4134 tbu->status_reg = devm_ioremap_resource(dev, res);
4135 if (IS_ERR(tbu->status_reg))
4136 return PTR_ERR(tbu->status_reg);
4137
4138 tbu->pwr = arm_smmu_init_power_resources(pdev);
4139 if (IS_ERR(tbu->pwr))
4140 return PTR_ERR(tbu->pwr);
4141
4142 dev_set_drvdata(dev, tbu);
4143 return 0;
4144}
4145
4146static struct platform_driver qsmmuv500_tbu_driver = {
4147 .driver = {
4148 .name = "qsmmuv500-tbu",
4149 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
4150 },
4151 .probe = qsmmuv500_tbu_probe,
4152};
4153
4154static int __init qsmmuv500_tbu_init(void)
4155{
4156 return platform_driver_register(&qsmmuv500_tbu_driver);
4157}
4158subsys_initcall(qsmmuv500_tbu_init);
4159
Will Deacon45ae7cf2013-06-24 18:31:25 +01004160MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
4161MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
4162MODULE_LICENSE("GPL v2");