blob: b2eb2e56c1b952394b7fcb568a8ccd613a1cc113 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070047#include <soc/qcom/secure_buffer.h>
Patrick Daly2764f952016-09-06 19:22:44 -070048#include <linux/msm-bus.h>
49#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050
51#include <linux/amba/bus.h>
52
Will Deacon518f7132014-11-14 17:17:54 +000053#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020056#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
58/* Maximum number of context banks per SMMU */
59#define ARM_SMMU_MAX_CBS 128
60
61/* Maximum number of mapping groups per SMMU */
62#define ARM_SMMU_MAX_SMRS 128
63
Will Deacon45ae7cf2013-06-24 18:31:25 +010064/* SMMU global address space */
65#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010066#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010067
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000068/*
69 * SMMU global address space with conditional offset to access secure
70 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
71 * nsGFSYNR0: 0x450)
72 */
73#define ARM_SMMU_GR0_NS(smmu) \
74 ((smmu)->base + \
75 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
76 ? 0x400 : 0))
77
Robin Murphyf9a05f02016-04-13 18:13:01 +010078/*
79 * Some 64-bit registers only make sense to write atomically, but in such
80 * cases all the data relevant to AArch32 formats lies within the lower word,
81 * therefore this actually makes more sense than it might first appear.
82 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010086#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010087#endif
88
Will Deacon45ae7cf2013-06-24 18:31:25 +010089/* Configuration registers */
90#define ARM_SMMU_GR0_sCR0 0x0
91#define sCR0_CLIENTPD (1 << 0)
92#define sCR0_GFRE (1 << 1)
93#define sCR0_GFIE (1 << 2)
94#define sCR0_GCFGFRE (1 << 4)
95#define sCR0_GCFGFIE (1 << 5)
96#define sCR0_USFCFG (1 << 10)
97#define sCR0_VMIDPNE (1 << 11)
98#define sCR0_PTM (1 << 12)
99#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800100#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100101#define sCR0_BSU_SHIFT 14
102#define sCR0_BSU_MASK 0x3
103
Peng Fan3ca37122016-05-03 21:50:30 +0800104/* Auxiliary Configuration register */
105#define ARM_SMMU_GR0_sACR 0x10
106
Will Deacon45ae7cf2013-06-24 18:31:25 +0100107/* Identification registers */
108#define ARM_SMMU_GR0_ID0 0x20
109#define ARM_SMMU_GR0_ID1 0x24
110#define ARM_SMMU_GR0_ID2 0x28
111#define ARM_SMMU_GR0_ID3 0x2c
112#define ARM_SMMU_GR0_ID4 0x30
113#define ARM_SMMU_GR0_ID5 0x34
114#define ARM_SMMU_GR0_ID6 0x38
115#define ARM_SMMU_GR0_ID7 0x3c
116#define ARM_SMMU_GR0_sGFSR 0x48
117#define ARM_SMMU_GR0_sGFSYNR0 0x50
118#define ARM_SMMU_GR0_sGFSYNR1 0x54
119#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100120
121#define ID0_S1TS (1 << 30)
122#define ID0_S2TS (1 << 29)
123#define ID0_NTS (1 << 28)
124#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000125#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100126#define ID0_PTFS_NO_AARCH32 (1 << 25)
127#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128#define ID0_CTTW (1 << 14)
129#define ID0_NUMIRPT_SHIFT 16
130#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700131#define ID0_NUMSIDB_SHIFT 9
132#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100133#define ID0_NUMSMRG_SHIFT 0
134#define ID0_NUMSMRG_MASK 0xff
135
136#define ID1_PAGESIZE (1 << 31)
137#define ID1_NUMPAGENDXB_SHIFT 28
138#define ID1_NUMPAGENDXB_MASK 7
139#define ID1_NUMS2CB_SHIFT 16
140#define ID1_NUMS2CB_MASK 0xff
141#define ID1_NUMCB_SHIFT 0
142#define ID1_NUMCB_MASK 0xff
143
144#define ID2_OAS_SHIFT 4
145#define ID2_OAS_MASK 0xf
146#define ID2_IAS_SHIFT 0
147#define ID2_IAS_MASK 0xf
148#define ID2_UBS_SHIFT 8
149#define ID2_UBS_MASK 0xf
150#define ID2_PTFS_4K (1 << 12)
151#define ID2_PTFS_16K (1 << 13)
152#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800153#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Peng Fan3ca37122016-05-03 21:50:30 +0800155#define ID7_MAJOR_SHIFT 4
156#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100159#define ARM_SMMU_GR0_TLBIVMID 0x64
160#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
161#define ARM_SMMU_GR0_TLBIALLH 0x6c
162#define ARM_SMMU_GR0_sTLBGSYNC 0x70
163#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
164#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800165#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167/* Stream mapping registers */
168#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
169#define SMR_VALID (1 << 31)
170#define SMR_MASK_SHIFT 16
171#define SMR_MASK_MASK 0x7fff
172#define SMR_ID_SHIFT 0
173#define SMR_ID_MASK 0x7fff
174
175#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
176#define S2CR_CBNDX_SHIFT 0
177#define S2CR_CBNDX_MASK 0xff
178#define S2CR_TYPE_SHIFT 16
179#define S2CR_TYPE_MASK 0x3
180#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
181#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
182#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
183
184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
Shalaj Jain04059c52015-03-03 13:34:59 -0800203#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
204#define CBFRSYNRA_SID_MASK (0xffff)
205
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600223#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000225#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100226#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700228#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100229#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100232#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000233#define ARM_SMMU_CB_S1_TLBIVAL 0x620
234#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
235#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700236#define ARM_SMMU_CB_TLBSYNC 0x7f0
237#define ARM_SMMU_CB_TLBSTATUS 0x7f4
238#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100239#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000240#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241
242#define SCTLR_S1_ASIDPNE (1 << 12)
243#define SCTLR_CFCFG (1 << 7)
244#define SCTLR_CFIE (1 << 6)
245#define SCTLR_CFRE (1 << 5)
246#define SCTLR_E (1 << 4)
247#define SCTLR_AFE (1 << 2)
248#define SCTLR_TRE (1 << 1)
249#define SCTLR_M (1 << 0)
250#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
251
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100252#define ARM_MMU500_ACTLR_CPRE (1 << 1)
253
Peng Fan3ca37122016-05-03 21:50:30 +0800254#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
255
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700256/* Definitions for implementation-defined registers */
257#define ACTLR_QCOM_OSH_SHIFT 28
258#define ACTLR_QCOM_OSH 1
259
260#define ACTLR_QCOM_ISH_SHIFT 29
261#define ACTLR_QCOM_ISH 1
262
263#define ACTLR_QCOM_NSH_SHIFT 30
264#define ACTLR_QCOM_NSH 1
265
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700266#define ARM_SMMU_IMPL_DEF0(smmu) \
267 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
268#define ARM_SMMU_IMPL_DEF1(smmu) \
269 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
270#define IMPL_DEF1_MICRO_MMU_CTRL 0
271#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
272#define MICRO_MMU_CTRL_IDLE (1 << 3)
273
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000274#define CB_PAR_F (1 << 0)
275
276#define ATSR_ACTIVE (1 << 0)
277
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278#define RESUME_RETRY (0 << 0)
279#define RESUME_TERMINATE (1 << 0)
280
Will Deacon45ae7cf2013-06-24 18:31:25 +0100281#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100282#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100283
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100284#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100285
286#define FSR_MULTI (1 << 31)
287#define FSR_SS (1 << 30)
288#define FSR_UUT (1 << 8)
289#define FSR_ASF (1 << 7)
290#define FSR_TLBLKF (1 << 6)
291#define FSR_TLBMCF (1 << 5)
292#define FSR_EF (1 << 4)
293#define FSR_PF (1 << 3)
294#define FSR_AFF (1 << 2)
295#define FSR_TF (1 << 1)
296
Mitchel Humpherys29073202014-07-08 09:52:18 -0700297#define FSR_IGN (FSR_AFF | FSR_ASF | \
298 FSR_TLBMCF | FSR_TLBLKF)
299#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100300 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100301
302#define FSYNR0_WNR (1 << 4)
303
Will Deacon4cf740b2014-07-14 19:47:39 +0100304static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000305module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100306MODULE_PARM_DESC(force_stage,
307 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000308static bool disable_bypass;
309module_param(disable_bypass, bool, S_IRUGO);
310MODULE_PARM_DESC(disable_bypass,
311 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100312
Robin Murphy09360402014-08-28 17:51:59 +0100313enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100314 ARM_SMMU_V1,
315 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100316 ARM_SMMU_V2,
317};
318
Robin Murphy67b65a32016-04-13 18:12:57 +0100319enum arm_smmu_implementation {
320 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100321 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100322 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700323 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100324};
325
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700326struct arm_smmu_impl_def_reg {
327 u32 offset;
328 u32 value;
329};
330
Will Deacon45ae7cf2013-06-24 18:31:25 +0100331struct arm_smmu_smr {
332 u8 idx;
333 u16 mask;
334 u16 id;
335};
336
Will Deacona9a1b0b2014-05-01 18:05:08 +0100337struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100338 int num_streamids;
339 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100340 struct arm_smmu_smr *smrs;
341};
342
Will Deacona9a1b0b2014-05-01 18:05:08 +0100343struct arm_smmu_master {
344 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100345 struct rb_node node;
346 struct arm_smmu_master_cfg cfg;
347};
348
Will Deacon45ae7cf2013-06-24 18:31:25 +0100349struct arm_smmu_device {
350 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351
352 void __iomem *base;
353 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100354 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355
356#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
357#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
358#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
359#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
360#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000361#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800362#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100363#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
364#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
365#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
366#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
367#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100368 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000369
370#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800371#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800372#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700373#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000374 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100375 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100376 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100377
378 u32 num_context_banks;
379 u32 num_s2_context_banks;
380 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
381 atomic_t irptndx;
382
383 u32 num_mapping_groups;
384 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
385
Will Deacon518f7132014-11-14 17:17:54 +0000386 unsigned long va_size;
387 unsigned long ipa_size;
388 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100389 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100390
391 u32 num_global_irqs;
392 u32 num_context_irqs;
393 unsigned int *irqs;
394
Will Deacon45ae7cf2013-06-24 18:31:25 +0100395 struct list_head list;
396 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800397
398 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700399 /* Specific to QCOM */
400 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
401 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800402
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700403 int num_clocks;
404 struct clk **clocks;
405
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700406 struct regulator *gdsc;
407
Patrick Daly2764f952016-09-06 19:22:44 -0700408 struct msm_bus_client_handle *bus_client;
409 char *bus_client_name;
410
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700411 /* Protects power_count */
412 struct mutex power_lock;
413 int power_count;
Patrick Daly8befb662016-08-17 20:03:28 -0700414 /* Protects clock_refs_count */
415 spinlock_t clock_refs_lock;
416 int clock_refs_count;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700417
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800418 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700419
420 /* protects idr */
421 struct mutex idr_mutex;
422 struct idr asid_idr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100423};
424
Robin Murphy7602b872016-04-28 17:12:09 +0100425enum arm_smmu_context_fmt {
426 ARM_SMMU_CTX_FMT_NONE,
427 ARM_SMMU_CTX_FMT_AARCH64,
428 ARM_SMMU_CTX_FMT_AARCH32_L,
429 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100430};
431
432struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100433 u8 cbndx;
434 u8 irptndx;
435 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600436 u32 procid;
437 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100438 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100439};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100440#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600441#define INVALID_CBNDX 0xff
442#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700443/*
444 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
445 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
446 */
447#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100448
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600449#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800450#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100451
Will Deaconc752ce42014-06-25 22:46:31 +0100452enum arm_smmu_domain_stage {
453 ARM_SMMU_DOMAIN_S1 = 0,
454 ARM_SMMU_DOMAIN_S2,
455 ARM_SMMU_DOMAIN_NESTED,
456};
457
Patrick Dalyc11d1082016-09-01 15:52:44 -0700458struct arm_smmu_pte_info {
459 void *virt_addr;
460 size_t size;
461 struct list_head entry;
462};
463
Will Deacon45ae7cf2013-06-24 18:31:25 +0100464struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100465 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000466 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700467 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000468 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100469 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100470 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000471 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700472 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700473 u32 secure_vmid;
474 struct list_head pte_info_list;
475 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700476 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700477 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100478 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100479};
480
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200481struct arm_smmu_phandle_args {
482 struct device_node *np;
483 int args_count;
484 uint32_t args[MAX_MASTER_STREAMIDS];
485};
486
Will Deacon45ae7cf2013-06-24 18:31:25 +0100487static DEFINE_SPINLOCK(arm_smmu_devices_lock);
488static LIST_HEAD(arm_smmu_devices);
489
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000490struct arm_smmu_option_prop {
491 u32 opt;
492 const char *prop;
493};
494
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800495static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
496
Mitchel Humpherys29073202014-07-08 09:52:18 -0700497static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000498 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800499 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800500 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700501 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000502 { 0, NULL},
503};
504
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800505static int arm_smmu_halt(struct arm_smmu_device *smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700506static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
507static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800508static void arm_smmu_resume(struct arm_smmu_device *smmu);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800509static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
510 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700511static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
512 dma_addr_t iova);
513static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
514 struct iommu_domain *domain, dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600515static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800516
Patrick Dalyc11d1082016-09-01 15:52:44 -0700517static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
518static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700519static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700520static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
521
Joerg Roedel1d672632015-03-26 13:43:10 +0100522static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
523{
524 return container_of(dom, struct arm_smmu_domain, domain);
525}
526
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000527static void parse_driver_options(struct arm_smmu_device *smmu)
528{
529 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700530
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000531 do {
532 if (of_property_read_bool(smmu->dev->of_node,
533 arm_smmu_options[i].prop)) {
534 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700535 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000536 arm_smmu_options[i].prop);
537 }
538 } while (arm_smmu_options[++i].opt);
539}
540
Patrick Dalyc190d932016-08-30 17:23:28 -0700541static bool is_dynamic_domain(struct iommu_domain *domain)
542{
543 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
544
545 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
546}
547
Patrick Dalye271f212016-10-04 13:24:49 -0700548static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
549{
550 return (smmu_domain->secure_vmid != VMID_INVAL);
551}
552
553static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
554{
555 if (arm_smmu_is_domain_secure(smmu_domain))
556 mutex_lock(&smmu_domain->assign_lock);
557}
558
559static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
560{
561 if (arm_smmu_is_domain_secure(smmu_domain))
562 mutex_unlock(&smmu_domain->assign_lock);
563}
564
Will Deacon8f68f8e2014-07-15 11:27:08 +0100565static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100566{
567 if (dev_is_pci(dev)) {
568 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700569
Will Deacona9a1b0b2014-05-01 18:05:08 +0100570 while (!pci_is_root_bus(bus))
571 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100572 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100573 }
574
Will Deacon8f68f8e2014-07-15 11:27:08 +0100575 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100576}
577
Will Deacon45ae7cf2013-06-24 18:31:25 +0100578static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
579 struct device_node *dev_node)
580{
581 struct rb_node *node = smmu->masters.rb_node;
582
583 while (node) {
584 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700585
Will Deacon45ae7cf2013-06-24 18:31:25 +0100586 master = container_of(node, struct arm_smmu_master, node);
587
588 if (dev_node < master->of_node)
589 node = node->rb_left;
590 else if (dev_node > master->of_node)
591 node = node->rb_right;
592 else
593 return master;
594 }
595
596 return NULL;
597}
598
Will Deacona9a1b0b2014-05-01 18:05:08 +0100599static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100600find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100601{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100602 struct arm_smmu_master_cfg *cfg = NULL;
603 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100604
Will Deacon8f68f8e2014-07-15 11:27:08 +0100605 if (group) {
606 cfg = iommu_group_get_iommudata(group);
607 iommu_group_put(group);
608 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100609
Will Deacon8f68f8e2014-07-15 11:27:08 +0100610 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100611}
612
Will Deacon45ae7cf2013-06-24 18:31:25 +0100613static int insert_smmu_master(struct arm_smmu_device *smmu,
614 struct arm_smmu_master *master)
615{
616 struct rb_node **new, *parent;
617
618 new = &smmu->masters.rb_node;
619 parent = NULL;
620 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700621 struct arm_smmu_master *this
622 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100623
624 parent = *new;
625 if (master->of_node < this->of_node)
626 new = &((*new)->rb_left);
627 else if (master->of_node > this->of_node)
628 new = &((*new)->rb_right);
629 else
630 return -EEXIST;
631 }
632
633 rb_link_node(&master->node, parent, new);
634 rb_insert_color(&master->node, &smmu->masters);
635 return 0;
636}
637
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700638struct iommus_entry {
639 struct list_head list;
640 struct device_node *node;
641 u16 streamids[MAX_MASTER_STREAMIDS];
642 int num_sids;
643};
644
Will Deacon45ae7cf2013-06-24 18:31:25 +0100645static int register_smmu_master(struct arm_smmu_device *smmu,
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700646 struct iommus_entry *entry)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100647{
648 int i;
649 struct arm_smmu_master *master;
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700650 struct device *dev = smmu->dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100651
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700652 master = find_smmu_master(smmu, entry->node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100653 if (master) {
654 dev_err(dev,
655 "rejecting multiple registrations for master device %s\n",
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700656 entry->node->name);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100657 return -EBUSY;
658 }
659
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700660 if (entry->num_sids > MAX_MASTER_STREAMIDS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100661 dev_err(dev,
662 "reached maximum number (%d) of stream IDs for master device %s\n",
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700663 MAX_MASTER_STREAMIDS, entry->node->name);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100664 return -ENOSPC;
665 }
666
667 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
668 if (!master)
669 return -ENOMEM;
670
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700671 master->of_node = entry->node;
672 master->cfg.num_streamids = entry->num_sids;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100673
Olav Haugan3c8766d2014-08-22 17:12:32 -0700674 for (i = 0; i < master->cfg.num_streamids; ++i) {
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700675 u16 streamid = entry->streamids[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100676
Olav Haugan3c8766d2014-08-22 17:12:32 -0700677 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
678 (streamid >= smmu->num_mapping_groups)) {
679 dev_err(dev,
680 "stream ID for master device %s greater than maximum allowed (%d)\n",
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700681 entry->node->name, smmu->num_mapping_groups);
Olav Haugan3c8766d2014-08-22 17:12:32 -0700682 return -ERANGE;
683 }
684 master->cfg.streamids[i] = streamid;
685 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100686 return insert_smmu_master(smmu, master);
687}
688
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700689static int arm_smmu_parse_iommus_properties(struct arm_smmu_device *smmu,
690 int *num_masters)
691{
692 struct of_phandle_args iommuspec;
693 struct device_node *master;
694
695 *num_masters = 0;
696
697 for_each_node_with_property(master, "iommus") {
698 int arg_ind = 0;
699 struct iommus_entry *entry, *n;
700 LIST_HEAD(iommus);
701
702 while (!of_parse_phandle_with_args(
703 master, "iommus", "#iommu-cells",
704 arg_ind, &iommuspec)) {
705 if (iommuspec.np != smmu->dev->of_node) {
706 arg_ind++;
707 continue;
708 }
709
710 list_for_each_entry(entry, &iommus, list)
711 if (entry->node == master)
712 break;
713 if (&entry->list == &iommus) {
714 entry = devm_kzalloc(smmu->dev, sizeof(*entry),
715 GFP_KERNEL);
716 if (!entry)
717 return -ENOMEM;
718 entry->node = master;
719 list_add(&entry->list, &iommus);
720 }
Patrick Dalya571f732016-09-26 15:12:36 -0700721 switch (iommuspec.args_count) {
722 case 0:
723 /*
724 * For pci-e devices the SIDs are provided
725 * at device attach time.
726 */
727 break;
728 case 1:
729 entry->num_sids++;
730 entry->streamids[entry->num_sids - 1]
731 = iommuspec.args[0];
732 break;
733 default:
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700734 dev_err(smmu->dev, "iommus property has wrong #iommu-cells");
735 return -EINVAL;
736 }
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700737 arg_ind++;
738 }
739
740 list_for_each_entry_safe(entry, n, &iommus, list) {
Mitchel Humpherys4c775602014-10-02 17:55:41 -0700741 int rc = register_smmu_master(smmu, entry);
742
743 if (rc) {
744 dev_err(smmu->dev, "Couldn't register %s\n",
745 entry->node->name);
746 } else {
747 (*num_masters)++;
748 }
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700749 list_del(&entry->list);
750 devm_kfree(smmu->dev, entry);
751 }
752 }
753
754 return 0;
755}
756
Will Deacon44680ee2014-06-25 11:29:12 +0100757static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100758{
Will Deacon44680ee2014-06-25 11:29:12 +0100759 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100760 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100761 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100762
763 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100764 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100765 master = find_smmu_master(smmu, dev_node);
766 if (master)
767 break;
768 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100769 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100770
Will Deacona9a1b0b2014-05-01 18:05:08 +0100771 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100772}
773
774static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
775{
776 int idx;
777
778 do {
779 idx = find_next_zero_bit(map, end, start);
780 if (idx == end)
781 return -ENOSPC;
782 } while (test_and_set_bit(idx, map));
783
784 return idx;
785}
786
787static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
788{
789 clear_bit(idx, map);
790}
791
Patrick Daly8befb662016-08-17 20:03:28 -0700792static int arm_smmu_prepare_clocks(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700793{
794 int i, ret = 0;
795
796 for (i = 0; i < smmu->num_clocks; ++i) {
Patrick Daly8befb662016-08-17 20:03:28 -0700797 ret = clk_prepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700798 if (ret) {
Patrick Daly8befb662016-08-17 20:03:28 -0700799 dev_err(smmu->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700800 while (i--)
Patrick Daly8befb662016-08-17 20:03:28 -0700801 clk_unprepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700802 break;
803 }
804 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700805 return ret;
806}
807
Patrick Daly8befb662016-08-17 20:03:28 -0700808static void arm_smmu_unprepare_clocks(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700809{
810 int i;
811
Liam Mark3ddf8d12016-04-13 12:42:01 -0700812 for (i = smmu->num_clocks; i; --i)
813 clk_unprepare(smmu->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700814}
815
Patrick Daly8befb662016-08-17 20:03:28 -0700816/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
817static int arm_smmu_enable_clocks_atomic(struct arm_smmu_device *smmu)
818{
819 int i, ret = 0;
820 unsigned long flags;
821
822 spin_lock_irqsave(&smmu->clock_refs_lock, flags);
823 if (smmu->clock_refs_count > 0) {
824 smmu->clock_refs_count++;
825 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
826 return 0;
827 }
828
829 for (i = 0; i < smmu->num_clocks; ++i) {
830 ret = clk_enable(smmu->clocks[i]);
831 if (ret) {
832 dev_err(smmu->dev, "Couldn't enable clock #%d\n", i);
833 while (i--)
834 clk_disable(smmu->clocks[i]);
835 break;
836 }
837 }
838
839 if (!ret)
840 smmu->clock_refs_count++;
841
842 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
843 return ret;
844}
845
846/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
847static void arm_smmu_disable_clocks_atomic(struct arm_smmu_device *smmu)
848{
849 int i;
850 unsigned long flags;
851
852 spin_lock_irqsave(&smmu->clock_refs_lock, flags);
853 WARN_ON(smmu->clock_refs_count == 0);
854 if (smmu->clock_refs_count > 1) {
855 smmu->clock_refs_count--;
856 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
857 return;
858 }
859
Liam Mark3ddf8d12016-04-13 12:42:01 -0700860 for (i = smmu->num_clocks; i; --i)
861 clk_disable(smmu->clocks[i - 1]);
Patrick Daly8befb662016-08-17 20:03:28 -0700862
863 smmu->clock_refs_count--;
864 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
865}
866
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700867static int arm_smmu_enable_regulators(struct arm_smmu_device *smmu)
868{
869 if (!smmu->gdsc)
870 return 0;
871
872 return regulator_enable(smmu->gdsc);
873}
874
875static int arm_smmu_disable_regulators(struct arm_smmu_device *smmu)
876{
877 if (!smmu->gdsc)
878 return 0;
879
880 return regulator_disable(smmu->gdsc);
881}
882
Patrick Daly2764f952016-09-06 19:22:44 -0700883static int arm_smmu_request_bus(struct arm_smmu_device *smmu)
884{
885 if (!smmu->bus_client)
886 return 0;
887 return msm_bus_scale_update_bw(smmu->bus_client, 0, 1000);
888}
889
890static int arm_smmu_unrequest_bus(struct arm_smmu_device *smmu)
891{
892 if (!smmu->bus_client)
893 return 0;
894 return msm_bus_scale_update_bw(smmu->bus_client, 0, 0);
895}
896
897
Patrick Daly8befb662016-08-17 20:03:28 -0700898static int arm_smmu_power_on_slow(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700899{
900 int ret;
901
902 mutex_lock(&smmu->power_lock);
903 if (smmu->power_count > 0) {
904 smmu->power_count += 1;
905 mutex_unlock(&smmu->power_lock);
906 return 0;
907 }
908
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700909 ret = arm_smmu_enable_regulators(smmu);
910 if (ret)
911 goto out_unlock;
912
Patrick Daly2764f952016-09-06 19:22:44 -0700913 ret = arm_smmu_request_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700914 if (ret)
915 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700916
Patrick Daly2764f952016-09-06 19:22:44 -0700917 ret = arm_smmu_prepare_clocks(smmu);
918 if (ret)
919 goto out_disable_bus;
920
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700921 smmu->power_count += 1;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700922 mutex_unlock(&smmu->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700923 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700924
Patrick Daly2764f952016-09-06 19:22:44 -0700925out_disable_bus:
926 arm_smmu_unrequest_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700927out_disable_regulators:
928 arm_smmu_disable_regulators(smmu);
929out_unlock:
930 mutex_unlock(&smmu->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700931 return ret;
932}
933
Patrick Daly8befb662016-08-17 20:03:28 -0700934static void arm_smmu_power_off_slow(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700935{
936 mutex_lock(&smmu->power_lock);
937 smmu->power_count--;
938 WARN_ON(smmu->power_count < 0);
939
940 if (smmu->power_count > 0) {
941 mutex_unlock(&smmu->power_lock);
942 return;
943 }
944
Patrick Daly8befb662016-08-17 20:03:28 -0700945 arm_smmu_unprepare_clocks(smmu);
Patrick Daly2764f952016-09-06 19:22:44 -0700946 arm_smmu_unrequest_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700947 arm_smmu_disable_regulators(smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700948
949 mutex_unlock(&smmu->power_lock);
950}
951
Patrick Daly8befb662016-08-17 20:03:28 -0700952static int arm_smmu_power_on(struct arm_smmu_device *smmu)
953{
954 int ret;
955
956 ret = arm_smmu_power_on_slow(smmu);
957 if (ret)
958 return ret;
959
960 ret = arm_smmu_enable_clocks_atomic(smmu);
961 if (ret)
962 goto out_disable;
963
964 return 0;
965
966out_disable:
967 arm_smmu_power_off_slow(smmu);
968 return ret;
969}
970
971static void arm_smmu_power_off(struct arm_smmu_device *smmu)
972{
973 arm_smmu_disable_clocks_atomic(smmu);
974 arm_smmu_power_off_slow(smmu);
975}
976
977/*
978 * Must be used instead of arm_smmu_power_on if it may be called from
979 * atomic context
980 */
981static int arm_smmu_domain_power_on(struct iommu_domain *domain,
982 struct arm_smmu_device *smmu)
983{
984 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
985 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
986
987 if (atomic_domain)
988 return arm_smmu_enable_clocks_atomic(smmu);
989
990 return arm_smmu_power_on(smmu);
991}
992
993/*
994 * Must be used instead of arm_smmu_power_on if it may be called from
995 * atomic context
996 */
997static void arm_smmu_domain_power_off(struct iommu_domain *domain,
998 struct arm_smmu_device *smmu)
999{
1000 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1001 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1002
1003 if (atomic_domain) {
1004 arm_smmu_disable_clocks_atomic(smmu);
1005 return;
1006 }
1007
1008 arm_smmu_power_off(smmu);
1009}
1010
Will Deacon45ae7cf2013-06-24 18:31:25 +01001011/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001012static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1013 int cbndx)
1014{
1015 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1016 u32 val;
1017
1018 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1019 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1020 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -07001021 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001022 dev_err(smmu->dev, "TLBSYNC timeout!\n");
1023}
1024
Will Deacon518f7132014-11-14 17:17:54 +00001025static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001026{
1027 int count = 0;
1028 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1029
1030 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1031 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1032 & sTLBGSTATUS_GSACTIVE) {
1033 cpu_relax();
1034 if (++count == TLB_LOOP_TIMEOUT) {
1035 dev_err_ratelimited(smmu->dev,
1036 "TLB sync timed out -- SMMU may be deadlocked\n");
1037 return;
1038 }
1039 udelay(1);
1040 }
1041}
1042
Will Deacon518f7132014-11-14 17:17:54 +00001043static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001044{
Will Deacon518f7132014-11-14 17:17:54 +00001045 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001046 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001047}
1048
Patrick Daly8befb662016-08-17 20:03:28 -07001049/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001050static void arm_smmu_tlb_inv_context(void *cookie)
1051{
1052 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +01001053 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1054 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001055 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001056 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +01001057
1058 if (stage1) {
1059 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001060 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001061 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001062 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001063 } else {
1064 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001065 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001066 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001067 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001068 }
Will Deacon1463fe42013-07-31 19:21:27 +01001069}
1070
Will Deacon518f7132014-11-14 17:17:54 +00001071static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001072 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001073{
1074 struct arm_smmu_domain *smmu_domain = cookie;
1075 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1076 struct arm_smmu_device *smmu = smmu_domain->smmu;
1077 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1078 void __iomem *reg;
1079
1080 if (stage1) {
1081 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1082 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1083
Robin Murphy7602b872016-04-28 17:12:09 +01001084 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001085 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001086 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001087 do {
1088 writel_relaxed(iova, reg);
1089 iova += granule;
1090 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001091 } else {
1092 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001093 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001094 do {
1095 writeq_relaxed(iova, reg);
1096 iova += granule >> 12;
1097 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001098 }
Will Deacon518f7132014-11-14 17:17:54 +00001099 } else if (smmu->version == ARM_SMMU_V2) {
1100 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1101 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1102 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001103 iova >>= 12;
1104 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001105 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001106 iova += granule >> 12;
1107 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001108 } else {
1109 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001110 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001111 }
1112}
1113
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001114struct arm_smmu_secure_pool_chunk {
1115 void *addr;
1116 size_t size;
1117 struct list_head list;
1118};
1119
1120static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1121 size_t size)
1122{
1123 struct arm_smmu_secure_pool_chunk *it;
1124
1125 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1126 if (it->size == size) {
1127 void *addr = it->addr;
1128
1129 list_del(&it->list);
1130 kfree(it);
1131 return addr;
1132 }
1133 }
1134
1135 return NULL;
1136}
1137
1138static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1139 void *addr, size_t size)
1140{
1141 struct arm_smmu_secure_pool_chunk *chunk;
1142
1143 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1144 if (!chunk)
1145 return -ENOMEM;
1146
1147 chunk->addr = addr;
1148 chunk->size = size;
1149 memset(addr, 0, size);
1150 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1151
1152 return 0;
1153}
1154
1155static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1156{
1157 struct arm_smmu_secure_pool_chunk *it, *i;
1158
1159 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1160 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1161 /* pages will be freed later (after being unassigned) */
1162 kfree(it);
1163 }
1164}
1165
Patrick Dalyc11d1082016-09-01 15:52:44 -07001166static void *arm_smmu_alloc_pages_exact(void *cookie,
1167 size_t size, gfp_t gfp_mask)
1168{
1169 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001170 void *page;
1171 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001172
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001173 if (!arm_smmu_is_domain_secure(smmu_domain))
1174 return alloc_pages_exact(size, gfp_mask);
1175
1176 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1177 if (page)
1178 return page;
1179
1180 page = alloc_pages_exact(size, gfp_mask);
1181 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001182 ret = arm_smmu_prepare_pgtable(page, cookie);
1183 if (ret) {
1184 free_pages_exact(page, size);
1185 return NULL;
1186 }
1187 }
1188
1189 return page;
1190}
1191
1192static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1193{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001194 struct arm_smmu_domain *smmu_domain = cookie;
1195
1196 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1197 free_pages_exact(virt, size);
1198 return;
1199 }
1200
1201 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1202 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001203}
1204
Will Deacon518f7132014-11-14 17:17:54 +00001205static struct iommu_gather_ops arm_smmu_gather_ops = {
1206 .tlb_flush_all = arm_smmu_tlb_inv_context,
1207 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1208 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001209 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1210 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001211};
1212
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001213static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1214 dma_addr_t iova, u32 fsr)
1215{
1216 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1217 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1218 struct arm_smmu_device *smmu;
1219 void __iomem *cb_base;
1220 u64 sctlr, sctlr_orig;
1221 phys_addr_t phys;
1222
1223 smmu = smmu_domain->smmu;
1224 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1225
1226 arm_smmu_halt_nowait(smmu);
1227
1228 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
1229
1230 arm_smmu_wait_for_halt(smmu);
1231
1232 /* clear FSR to allow ATOS to log any faults */
1233 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
1234
1235 /* disable stall mode momentarily */
1236 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
1237 sctlr = sctlr_orig & ~SCTLR_CFCFG;
1238 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
1239
1240 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
1241
1242 if (!phys) {
1243 dev_err(smmu->dev,
1244 "ATOS failed. Will issue a TLBIALL and try again...\n");
1245 arm_smmu_tlb_inv_context(smmu_domain);
1246 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
1247 if (phys)
1248 dev_err(smmu->dev,
1249 "ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
1250 else
1251 dev_err(smmu->dev,
1252 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1253 }
1254
1255 /* restore SCTLR */
1256 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
1257
1258 arm_smmu_resume(smmu);
1259
1260 return phys;
1261}
1262
Will Deacon45ae7cf2013-06-24 18:31:25 +01001263static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1264{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001265 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001266 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267 unsigned long iova;
1268 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001269 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001270 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1271 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001273 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001274 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001275 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001276 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001277 bool non_fatal_fault = !!(smmu_domain->attributes &
1278 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001279
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001280 static DEFINE_RATELIMIT_STATE(_rs,
1281 DEFAULT_RATELIMIT_INTERVAL,
1282 DEFAULT_RATELIMIT_BURST);
1283
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001284 ret = arm_smmu_power_on(smmu);
1285 if (ret)
1286 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001287
Shalaj Jain04059c52015-03-03 13:34:59 -08001288 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001289 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001290 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1291
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001292 if (!(fsr & FSR_FAULT)) {
1293 ret = IRQ_NONE;
1294 goto out_power_off;
1295 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001296
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001297 if (fatal_asf && (fsr & FSR_ASF)) {
1298 dev_err(smmu->dev,
1299 "Took an address size fault. Refusing to recover.\n");
1300 BUG();
1301 }
1302
Will Deacon45ae7cf2013-06-24 18:31:25 +01001303 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001304 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001305 if (fsr & FSR_TF)
1306 flags |= IOMMU_FAULT_TRANSLATION;
1307 if (fsr & FSR_PF)
1308 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001309 if (fsr & FSR_EF)
1310 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001311 if (fsr & FSR_SS)
1312 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001313
Robin Murphyf9a05f02016-04-13 18:13:01 +01001314 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001315 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001316 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1317 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001318 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1319 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001320 dev_dbg(smmu->dev,
1321 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1322 iova, fsr, fsynr, cfg->cbndx);
1323 dev_dbg(smmu->dev,
1324 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001325 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001326 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001327 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001328 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1329 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001330 if (__ratelimit(&_rs)) {
1331 dev_err(smmu->dev,
1332 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1333 iova, fsr, fsynr, cfg->cbndx);
1334 dev_err(smmu->dev, "FAR = %016lx\n",
1335 (unsigned long)iova);
1336 dev_err(smmu->dev,
1337 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1338 fsr,
1339 (fsr & 0x02) ? "TF " : "",
1340 (fsr & 0x04) ? "AFF " : "",
1341 (fsr & 0x08) ? "PF " : "",
1342 (fsr & 0x10) ? "EF " : "",
1343 (fsr & 0x20) ? "TLBMCF " : "",
1344 (fsr & 0x40) ? "TLBLKF " : "",
1345 (fsr & 0x80) ? "MHF " : "",
1346 (fsr & 0x40000000) ? "SS " : "",
1347 (fsr & 0x80000000) ? "MULTI " : "");
1348 dev_err(smmu->dev,
1349 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001350 if (!phys_soft)
1351 dev_err(smmu->dev,
1352 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1353 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001354 dev_err(smmu->dev,
1355 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1356 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1357 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001358 ret = IRQ_NONE;
1359 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001360 if (!non_fatal_fault) {
1361 dev_err(smmu->dev,
1362 "Unhandled arm-smmu context fault!\n");
1363 BUG();
1364 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001365 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001366
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001367 /*
1368 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1369 * if stalled. This is required to keep the IOMMU client stalled on
1370 * the outstanding fault. This gives the client a chance to take any
1371 * debug action and then terminate the stalled transaction.
1372 * So, the sequence in case of stall on fault should be:
1373 * 1) Do not clear FSR or write to RESUME here
1374 * 2) Client takes any debug action
1375 * 3) Client terminates the stalled transaction and resumes the IOMMU
1376 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1377 * not before so that the fault remains outstanding. This ensures
1378 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1379 * need to be terminated.
1380 */
1381 if (tmp != -EBUSY) {
1382 /* Clear the faulting FSR */
1383 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001384
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001385 /*
1386 * Barrier required to ensure that the FSR is cleared
1387 * before resuming SMMU operation
1388 */
1389 wmb();
1390
1391 /* Retry or terminate any stalled transactions */
1392 if (fsr & FSR_SS)
1393 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1394 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001395
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001396out_power_off:
1397 arm_smmu_power_off(smmu);
1398
Patrick Daly5ba28112016-08-30 19:18:52 -07001399 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001400}
1401
1402static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1403{
1404 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1405 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001406 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001407
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001408 if (arm_smmu_power_on(smmu))
1409 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001410
Will Deacon45ae7cf2013-06-24 18:31:25 +01001411 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1412 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1413 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1414 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1415
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001416 if (!gfsr) {
1417 arm_smmu_power_off(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001418 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001419 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001420
Will Deacon45ae7cf2013-06-24 18:31:25 +01001421 dev_err_ratelimited(smmu->dev,
1422 "Unexpected global fault, this could be serious\n");
1423 dev_err_ratelimited(smmu->dev,
1424 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1425 gfsr, gfsynr0, gfsynr1, gfsynr2);
1426
1427 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001428 arm_smmu_power_off(smmu);
Will Deaconadaba322013-07-31 19:21:26 +01001429 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001430}
1431
Will Deacon518f7132014-11-14 17:17:54 +00001432static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1433 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001434{
1435 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001436 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001437 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001438 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1439 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001440 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001441
Will Deacon45ae7cf2013-06-24 18:31:25 +01001442 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001443 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1444 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001445
Will Deacon4a1c93c2015-03-04 12:21:03 +00001446 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001447 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1448 reg = CBA2R_RW64_64BIT;
1449 else
1450 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001451 /* 16-bit VMIDs live in CBA2R */
1452 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001453 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001454
Will Deacon4a1c93c2015-03-04 12:21:03 +00001455 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1456 }
1457
Will Deacon45ae7cf2013-06-24 18:31:25 +01001458 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001459 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001460 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001461 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001462
Will Deacon57ca90f2014-02-06 14:59:05 +00001463 /*
1464 * Use the weakest shareability/memory types, so they are
1465 * overridden by the ttbcr/pte.
1466 */
1467 if (stage1) {
1468 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1469 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001470 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1471 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001472 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001473 }
Will Deacon44680ee2014-06-25 11:29:12 +01001474 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001475
Will Deacon518f7132014-11-14 17:17:54 +00001476 /* TTBRs */
1477 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001478 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001479
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001480 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001481 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001482
1483 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001484 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001485 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001486 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001487 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001488 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001489 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001490
Will Deacon518f7132014-11-14 17:17:54 +00001491 /* TTBCR */
1492 if (stage1) {
1493 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1494 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
1495 if (smmu->version > ARM_SMMU_V1) {
1496 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +01001497 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +00001498 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001499 }
1500 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001501 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1502 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001503 }
1504
Will Deacon518f7132014-11-14 17:17:54 +00001505 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001506 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +00001507 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001508 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +00001509 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1510 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001511 }
1512
Will Deacon45ae7cf2013-06-24 18:31:25 +01001513 /* SCTLR */
Patrick Dalye62d3362016-03-15 18:58:28 -07001514 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_EAE_SBOP;
1515
1516 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1517 !stage1)
1518 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001519 if (stage1)
1520 reg |= SCTLR_S1_ASIDPNE;
1521#ifdef __BIG_ENDIAN
1522 reg |= SCTLR_E;
1523#endif
Will Deacon25724842013-08-21 13:49:53 +01001524 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001525}
1526
Patrick Dalyc190d932016-08-30 17:23:28 -07001527static int arm_smmu_init_asid(struct iommu_domain *domain,
1528 struct arm_smmu_device *smmu)
1529{
1530 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1531 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1532 bool dynamic = is_dynamic_domain(domain);
1533 int ret;
1534
1535 if (!dynamic) {
1536 cfg->asid = cfg->cbndx + 1;
1537 } else {
1538 mutex_lock(&smmu->idr_mutex);
1539 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1540 smmu->num_context_banks + 2,
1541 MAX_ASID + 1, GFP_KERNEL);
1542
1543 mutex_unlock(&smmu->idr_mutex);
1544 if (ret < 0) {
1545 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1546 ret);
1547 return ret;
1548 }
1549 cfg->asid = ret;
1550 }
1551 return 0;
1552}
1553
1554static void arm_smmu_free_asid(struct iommu_domain *domain)
1555{
1556 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1557 struct arm_smmu_device *smmu = smmu_domain->smmu;
1558 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1559 bool dynamic = is_dynamic_domain(domain);
1560
1561 if (cfg->asid == INVALID_ASID || !dynamic)
1562 return;
1563
1564 mutex_lock(&smmu->idr_mutex);
1565 idr_remove(&smmu->asid_idr, cfg->asid);
1566 mutex_unlock(&smmu->idr_mutex);
1567}
1568
Will Deacon45ae7cf2013-06-24 18:31:25 +01001569static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001570 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001571{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001572 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001573 unsigned long ias, oas;
1574 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001575 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001576 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001577 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001578 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyc190d932016-08-30 17:23:28 -07001579 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001580
Will Deacon518f7132014-11-14 17:17:54 +00001581 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001582 if (smmu_domain->smmu)
1583 goto out_unlock;
1584
Patrick Dalyc190d932016-08-30 17:23:28 -07001585 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1586 smmu_domain->cfg.asid = INVALID_ASID;
1587
Robin Murphy98006992016-04-20 14:53:33 +01001588 /* We're bypassing these SIDs, so don't allocate an actual context */
1589 if (domain->type == IOMMU_DOMAIN_DMA) {
1590 smmu_domain->smmu = smmu;
1591 goto out_unlock;
1592 }
1593
Patrick Dalyc190d932016-08-30 17:23:28 -07001594 dynamic = is_dynamic_domain(domain);
1595 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1596 dev_err(smmu->dev, "dynamic domains not supported\n");
1597 ret = -EPERM;
1598 goto out_unlock;
1599 }
1600
Will Deaconc752ce42014-06-25 22:46:31 +01001601 /*
1602 * Mapping the requested stage onto what we support is surprisingly
1603 * complicated, mainly because the spec allows S1+S2 SMMUs without
1604 * support for nested translation. That means we end up with the
1605 * following table:
1606 *
1607 * Requested Supported Actual
1608 * S1 N S1
1609 * S1 S1+S2 S1
1610 * S1 S2 S2
1611 * S1 S1 S1
1612 * N N N
1613 * N S1+S2 S2
1614 * N S2 S2
1615 * N S1 S1
1616 *
1617 * Note that you can't actually request stage-2 mappings.
1618 */
1619 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1620 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1621 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1622 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1623
Robin Murphy7602b872016-04-28 17:12:09 +01001624 /*
1625 * Choosing a suitable context format is even more fiddly. Until we
1626 * grow some way for the caller to express a preference, and/or move
1627 * the decision into the io-pgtable code where it arguably belongs,
1628 * just aim for the closest thing to the rest of the system, and hope
1629 * that the hardware isn't esoteric enough that we can't assume AArch64
1630 * support to be a superset of AArch32 support...
1631 */
1632 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1633 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1634 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1635 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1636 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1637 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1638 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1639
1640 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1641 ret = -EINVAL;
1642 goto out_unlock;
1643 }
1644
Will Deaconc752ce42014-06-25 22:46:31 +01001645 switch (smmu_domain->stage) {
1646 case ARM_SMMU_DOMAIN_S1:
1647 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1648 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001649 ias = smmu->va_size;
1650 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001651 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001652 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001653 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001654 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001655 ias = min(ias, 32UL);
1656 oas = min(oas, 40UL);
1657 }
Will Deaconc752ce42014-06-25 22:46:31 +01001658 break;
1659 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001660 /*
1661 * We will likely want to change this if/when KVM gets
1662 * involved.
1663 */
Will Deaconc752ce42014-06-25 22:46:31 +01001664 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001665 cfg->cbar = CBAR_TYPE_S2_TRANS;
1666 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001667 ias = smmu->ipa_size;
1668 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001669 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001670 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001671 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001672 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001673 ias = min(ias, 40UL);
1674 oas = min(oas, 40UL);
1675 }
Will Deaconc752ce42014-06-25 22:46:31 +01001676 break;
1677 default:
1678 ret = -EINVAL;
1679 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001680 }
1681
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001682 if (is_fast)
1683 fmt = ARM_V8L_FAST;
1684
1685
Patrick Dalyc190d932016-08-30 17:23:28 -07001686 /* Dynamic domains must set cbndx through domain attribute */
1687 if (!dynamic) {
1688 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001689 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001690 if (ret < 0)
1691 goto out_unlock;
1692 cfg->cbndx = ret;
1693 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001694 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001695 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1696 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001697 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001698 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001699 }
1700
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001701 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001702 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001703 .ias = ias,
1704 .oas = oas,
1705 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001706 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001707 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001708
Will Deacon518f7132014-11-14 17:17:54 +00001709 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001710 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1711 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001712 if (!pgtbl_ops) {
1713 ret = -ENOMEM;
1714 goto out_clear_smmu;
1715 }
1716
Patrick Dalyc11d1082016-09-01 15:52:44 -07001717 /*
1718 * assign any page table memory that might have been allocated
1719 * during alloc_io_pgtable_ops
1720 */
Patrick Dalye271f212016-10-04 13:24:49 -07001721 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001722 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001723 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001724
Robin Murphyd5466352016-05-09 17:20:09 +01001725 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001726 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001727
Patrick Dalyc190d932016-08-30 17:23:28 -07001728 /* Assign an asid */
1729 ret = arm_smmu_init_asid(domain, smmu);
1730 if (ret)
1731 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001732
Patrick Dalyc190d932016-08-30 17:23:28 -07001733 if (!dynamic) {
1734 /* Initialise the context bank with our page table cfg */
1735 arm_smmu_init_context_bank(smmu_domain,
1736 &smmu_domain->pgtbl_cfg);
1737
1738 /*
1739 * Request context fault interrupt. Do this last to avoid the
1740 * handler seeing a half-initialised domain state.
1741 */
1742 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1743 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001744 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1745 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001746 if (ret < 0) {
1747 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1748 cfg->irptndx, irq);
1749 cfg->irptndx = INVALID_IRPTNDX;
1750 goto out_clear_smmu;
1751 }
1752 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001753 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001754 }
Will Deacon518f7132014-11-14 17:17:54 +00001755 mutex_unlock(&smmu_domain->init_mutex);
1756
1757 /* Publish page table ops for map/unmap */
1758 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001759 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001760
Will Deacon518f7132014-11-14 17:17:54 +00001761out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001762 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001763 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001764out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001765 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001766 return ret;
1767}
1768
1769static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1770{
Joerg Roedel1d672632015-03-26 13:43:10 +01001771 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001772 struct arm_smmu_device *smmu = smmu_domain->smmu;
1773 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001774 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001776 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001777 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001778
Robin Murphy98006992016-04-20 14:53:33 +01001779 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001780 return;
1781
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001782 ret = arm_smmu_power_on(smmu);
1783 if (ret) {
1784 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1785 smmu);
1786 return;
1787 }
1788
Patrick Dalyc190d932016-08-30 17:23:28 -07001789 dynamic = is_dynamic_domain(domain);
1790 if (dynamic) {
1791 arm_smmu_free_asid(domain);
1792 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001793 arm_smmu_power_off(smmu);
Patrick Dalye271f212016-10-04 13:24:49 -07001794 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001795 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001796 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001797 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001798 return;
1799 }
1800
Will Deacon518f7132014-11-14 17:17:54 +00001801 /*
1802 * Disable the context bank and free the page tables before freeing
1803 * it.
1804 */
Will Deacon44680ee2014-06-25 11:29:12 +01001805 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001806 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001807
Will Deacon44680ee2014-06-25 11:29:12 +01001808 if (cfg->irptndx != INVALID_IRPTNDX) {
1809 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001810 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001811 }
1812
Markus Elfring44830b02015-11-06 18:32:41 +01001813 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001814 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001815 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001816 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001817 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001818 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001819
1820 arm_smmu_power_off(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001821}
1822
Joerg Roedel1d672632015-03-26 13:43:10 +01001823static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001824{
1825 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001826
Patrick Daly09801312016-08-29 17:02:52 -07001827 /* Do not support DOMAIN_DMA for now */
1828 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001829 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001830 /*
1831 * Allocate the domain and initialise some of its data structures.
1832 * We can't really do anything meaningful until we've added a
1833 * master.
1834 */
1835 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1836 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001837 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001838
Robin Murphy9adb9592016-01-26 18:06:36 +00001839 if (type == IOMMU_DOMAIN_DMA &&
1840 iommu_get_dma_cookie(&smmu_domain->domain)) {
1841 kfree(smmu_domain);
1842 return NULL;
1843 }
1844
Will Deacon518f7132014-11-14 17:17:54 +00001845 mutex_init(&smmu_domain->init_mutex);
1846 spin_lock_init(&smmu_domain->pgtbl_lock);
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06001847 smmu_domain->cfg.cbndx = INVALID_CBNDX;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001848 smmu_domain->secure_vmid = VMID_INVAL;
1849 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1850 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001851 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001852 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Joerg Roedel1d672632015-03-26 13:43:10 +01001853
1854 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001855}
1856
Joerg Roedel1d672632015-03-26 13:43:10 +01001857static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001858{
Joerg Roedel1d672632015-03-26 13:43:10 +01001859 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001860
1861 /*
1862 * Free the domain resources. We assume that all devices have
1863 * already been detached.
1864 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001865 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001866 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001867 kfree(smmu_domain);
1868}
1869
1870static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001871 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001872{
1873 int i;
1874 struct arm_smmu_smr *smrs;
1875 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1876
1877 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1878 return 0;
1879
Will Deacona9a1b0b2014-05-01 18:05:08 +01001880 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001881 return -EEXIST;
1882
Mitchel Humpherys29073202014-07-08 09:52:18 -07001883 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001884 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001885 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1886 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001887 return -ENOMEM;
1888 }
1889
Will Deacon44680ee2014-06-25 11:29:12 +01001890 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001891 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001892 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1893 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001894 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001895 dev_err(smmu->dev, "failed to allocate free SMR\n");
1896 goto err_free_smrs;
1897 }
1898
1899 smrs[i] = (struct arm_smmu_smr) {
1900 .idx = idx,
1901 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001902 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001903 };
1904 }
1905
1906 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001907 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001908 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1909 smrs[i].mask << SMR_MASK_SHIFT;
1910 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1911 }
1912
Will Deacona9a1b0b2014-05-01 18:05:08 +01001913 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001914 return 0;
1915
1916err_free_smrs:
1917 while (--i >= 0)
1918 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1919 kfree(smrs);
1920 return -ENOSPC;
1921}
1922
1923static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001924 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001925{
1926 int i;
1927 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001928 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001929
Will Deacon43b412b2014-07-15 11:22:24 +01001930 if (!smrs)
1931 return;
1932
Will Deacon45ae7cf2013-06-24 18:31:25 +01001933 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001934 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001935 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001936
Will Deacon45ae7cf2013-06-24 18:31:25 +01001937 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1938 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1939 }
1940
Will Deacona9a1b0b2014-05-01 18:05:08 +01001941 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001942 kfree(smrs);
1943}
1944
Will Deacon45ae7cf2013-06-24 18:31:25 +01001945static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001946 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001947{
1948 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001949 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001950 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1951
Will Deacon5f634952016-04-20 14:53:32 +01001952 /*
1953 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1954 * for all devices behind the SMMU. Note that we need to take
1955 * care configuring SMRs for devices both a platform_device and
1956 * and a PCI device (i.e. a PCI host controller)
1957 */
1958 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1959 return 0;
1960
Will Deacon8f68f8e2014-07-15 11:27:08 +01001961 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001962 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001964 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001965
Will Deacona9a1b0b2014-05-01 18:05:08 +01001966 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001967 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001968
Will Deacona9a1b0b2014-05-01 18:05:08 +01001969 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Patrick Dalyf4930442016-06-27 20:50:14 -07001970 s2cr = S2CR_TYPE_TRANS |
Will Deacon44680ee2014-06-25 11:29:12 +01001971 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1973 }
1974
1975 return 0;
1976}
1977
1978static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001979 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980{
Will Deacon43b412b2014-07-15 11:22:24 +01001981 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001982 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001983 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001984
Will Deacon8f68f8e2014-07-15 11:27:08 +01001985 /* An IOMMU group is torn down by the first device to be removed */
1986 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1987 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001988
1989 /*
1990 * We *must* clear the S2CR first, because freeing the SMR means
1991 * that it can be re-allocated immediately.
1992 */
Will Deacon43b412b2014-07-15 11:22:24 +01001993 for (i = 0; i < cfg->num_streamids; ++i) {
1994 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001995 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001996
Robin Murphy25a1c962016-02-10 14:25:33 +00001997 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001998 }
1999
Will Deacona9a1b0b2014-05-01 18:05:08 +01002000 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002001}
2002
Patrick Daly09801312016-08-29 17:02:52 -07002003static void arm_smmu_detach_dev(struct iommu_domain *domain,
2004 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002005{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002006 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002007 struct arm_smmu_device *smmu = smmu_domain->smmu;
2008 struct arm_smmu_master_cfg *cfg;
2009 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002010 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002011
2012 if (dynamic)
2013 return;
2014
2015 cfg = find_smmu_master_cfg(dev);
2016 if (!cfg)
2017 return;
2018
2019 if (!smmu) {
2020 dev_err(dev, "Domain not attached; cannot detach!\n");
2021 return;
2022 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002023
2024 dev->archdata.iommu = NULL;
2025 arm_smmu_domain_remove_master(smmu_domain, cfg);
Patrick Daly8befb662016-08-17 20:03:28 -07002026
2027 /* Remove additional vote for atomic power */
2028 if (atomic_domain) {
2029 WARN_ON(arm_smmu_enable_clocks_atomic(smmu));
2030 arm_smmu_power_off(smmu);
2031 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002032}
2033
Patrick Dalye271f212016-10-04 13:24:49 -07002034static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002035{
Patrick Dalye271f212016-10-04 13:24:49 -07002036 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002037 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2038 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2039 int source_vmid = VMID_HLOS;
2040 struct arm_smmu_pte_info *pte_info, *temp;
2041
Patrick Dalye271f212016-10-04 13:24:49 -07002042 if (!arm_smmu_is_domain_secure(smmu_domain))
2043 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002044
Patrick Dalye271f212016-10-04 13:24:49 -07002045 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002046 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2047 PAGE_SIZE, &source_vmid, 1,
2048 dest_vmids, dest_perms, 2);
2049 if (WARN_ON(ret))
2050 break;
2051 }
2052
2053 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2054 entry) {
2055 list_del(&pte_info->entry);
2056 kfree(pte_info);
2057 }
Patrick Dalye271f212016-10-04 13:24:49 -07002058 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002059}
2060
2061static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2062{
2063 int ret;
2064 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002065 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002066 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2067 struct arm_smmu_pte_info *pte_info, *temp;
2068
Patrick Dalye271f212016-10-04 13:24:49 -07002069 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002070 return;
2071
2072 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2073 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2074 PAGE_SIZE, source_vmlist, 2,
2075 &dest_vmids, &dest_perms, 1);
2076 if (WARN_ON(ret))
2077 break;
2078 free_pages_exact(pte_info->virt_addr, pte_info->size);
2079 }
2080
2081 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2082 entry) {
2083 list_del(&pte_info->entry);
2084 kfree(pte_info);
2085 }
2086}
2087
2088static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2089{
2090 struct arm_smmu_domain *smmu_domain = cookie;
2091 struct arm_smmu_pte_info *pte_info;
2092
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002093 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002094
2095 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2096 if (!pte_info)
2097 return;
2098
2099 pte_info->virt_addr = addr;
2100 pte_info->size = size;
2101 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2102}
2103
2104static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2105{
2106 struct arm_smmu_domain *smmu_domain = cookie;
2107 struct arm_smmu_pte_info *pte_info;
2108
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002109 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002110
2111 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2112 if (!pte_info)
2113 return -ENOMEM;
2114 pte_info->virt_addr = addr;
2115 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2116 return 0;
2117}
2118
Will Deacon45ae7cf2013-06-24 18:31:25 +01002119static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2120{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002121 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01002122 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002123 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002124 struct arm_smmu_master_cfg *cfg;
Patrick Daly8befb662016-08-17 20:03:28 -07002125 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002126
Will Deacon8f68f8e2014-07-15 11:27:08 +01002127 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01002128 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002129 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2130 return -ENXIO;
2131 }
2132
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002133 /* Enable Clocks and Power */
2134 ret = arm_smmu_power_on(smmu);
2135 if (ret)
2136 return ret;
2137
Patrick Daly8befb662016-08-17 20:03:28 -07002138 /*
2139 * Keep an additional vote for non-atomic power until domain is
2140 * detached
2141 */
2142 if (atomic_domain) {
2143 ret = arm_smmu_power_on(smmu);
2144 if (ret)
2145 goto out_power_off;
2146
2147 arm_smmu_disable_clocks_atomic(smmu);
2148 }
2149
Will Deacon518f7132014-11-14 17:17:54 +00002150 /* Ensure that the domain is finalised */
2151 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002152 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002153 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002154
Patrick Dalyc190d932016-08-30 17:23:28 -07002155 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002156 if (is_dynamic_domain(domain)) {
2157 ret = 0;
2158 goto out_power_off;
2159 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002160
Will Deacon45ae7cf2013-06-24 18:31:25 +01002161 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002162 * Sanity check the domain. We don't support domains across
2163 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002164 */
Will Deacon518f7132014-11-14 17:17:54 +00002165 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002166 dev_err(dev,
2167 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002168 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002169 ret = -EINVAL;
2170 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002171 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002172
2173 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01002174 cfg = find_smmu_master_cfg(dev);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002175 if (!cfg) {
2176 ret = -ENODEV;
2177 goto out_power_off;
2178 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002179
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002180 /* Detach the dev from its current domain */
2181 if (dev->archdata.iommu)
Patrick Daly09801312016-08-29 17:02:52 -07002182 arm_smmu_detach_dev(dev->archdata.iommu, dev);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002183
Will Deacon844e35b2014-07-17 11:23:51 +01002184 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
2185 if (!ret)
2186 dev->archdata.iommu = domain;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002187
2188out_power_off:
2189 arm_smmu_power_off(smmu);
2190
Will Deacon45ae7cf2013-06-24 18:31:25 +01002191 return ret;
2192}
2193
Will Deacon45ae7cf2013-06-24 18:31:25 +01002194static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002195 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002196{
Will Deacon518f7132014-11-14 17:17:54 +00002197 int ret;
2198 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002199 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002200 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002201
Will Deacon518f7132014-11-14 17:17:54 +00002202 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002203 return -ENODEV;
2204
Patrick Dalye271f212016-10-04 13:24:49 -07002205 arm_smmu_secure_domain_lock(smmu_domain);
2206
Will Deacon518f7132014-11-14 17:17:54 +00002207 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2208 ret = ops->map(ops, iova, paddr, size, prot);
2209 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002210
2211 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002212 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002213
Will Deacon518f7132014-11-14 17:17:54 +00002214 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002215}
2216
2217static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2218 size_t size)
2219{
Will Deacon518f7132014-11-14 17:17:54 +00002220 size_t ret;
2221 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002222 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002223 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002224
Will Deacon518f7132014-11-14 17:17:54 +00002225 if (!ops)
2226 return 0;
2227
Patrick Daly8befb662016-08-17 20:03:28 -07002228 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002229 if (ret)
2230 return ret;
2231
Patrick Dalye271f212016-10-04 13:24:49 -07002232 arm_smmu_secure_domain_lock(smmu_domain);
2233
Will Deacon518f7132014-11-14 17:17:54 +00002234 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2235 ret = ops->unmap(ops, iova, size);
2236 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002237
Patrick Daly8befb662016-08-17 20:03:28 -07002238 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002239 /*
2240 * While splitting up block mappings, we might allocate page table
2241 * memory during unmap, so the vmids needs to be assigned to the
2242 * memory here as well.
2243 */
2244 arm_smmu_assign_table(smmu_domain);
2245 /* Also unassign any pages that were free'd during unmap */
2246 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002247 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002248 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002249}
2250
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002251static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2252 struct scatterlist *sg, unsigned int nents, int prot)
2253{
2254 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002255 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002256 unsigned long flags;
2257 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2258 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2259
2260 if (!ops)
2261 return -ENODEV;
2262
Patrick Daly8befb662016-08-17 20:03:28 -07002263 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002264 if (ret)
2265 return ret;
2266
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002267 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002268 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002269 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002270
2271 if (!ret)
2272 arm_smmu_unmap(domain, iova, size);
2273
Patrick Daly8befb662016-08-17 20:03:28 -07002274 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002275 arm_smmu_assign_table(smmu_domain);
2276
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002277 return ret;
2278}
2279
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002280static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002281 dma_addr_t iova, bool do_halt)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002282{
Joerg Roedel1d672632015-03-26 13:43:10 +01002283 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002284 struct arm_smmu_device *smmu = smmu_domain->smmu;
2285 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2286 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2287 struct device *dev = smmu->dev;
2288 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002289 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002290 u32 tmp;
2291 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002292 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002293
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002294 spin_lock_irqsave(&smmu->atos_lock, flags);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002295 if (do_halt && arm_smmu_halt(smmu)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002296 phys = 0;
2297 goto out_unlock;
2298 }
2299
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002300 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2301
Robin Murphy661d9622015-05-27 17:09:34 +01002302 /* ATS1 registers can only be written atomically */
2303 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002304 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002305 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2306 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002307 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002308
2309 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2310 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002311 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002312 dev_err(dev,
2313 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2314 &iova, &phys);
2315 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002316 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002317 }
2318
Robin Murphyf9a05f02016-04-13 18:13:01 +01002319 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002320 if (phys & CB_PAR_F) {
2321 dev_err(dev, "translation fault!\n");
2322 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002323 phys = 0;
2324 } else {
2325 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002326 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002327out_resume:
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002328 if (do_halt)
2329 arm_smmu_resume(smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002330out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002331 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002332 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002333}
2334
Will Deacon45ae7cf2013-06-24 18:31:25 +01002335static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002336 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002337{
Will Deacon518f7132014-11-14 17:17:54 +00002338 phys_addr_t ret;
2339 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002340 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002341 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002342
Will Deacon518f7132014-11-14 17:17:54 +00002343 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002344 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002345
Will Deacon518f7132014-11-14 17:17:54 +00002346 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002347 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002348 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002349
Will Deacon518f7132014-11-14 17:17:54 +00002350 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002351}
2352
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002353/*
2354 * This function can sleep, and cannot be called from atomic context. Will
2355 * power on register block if required. This restriction does not apply to the
2356 * original iova_to_phys() op.
2357 */
2358static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2359 dma_addr_t iova)
2360{
2361 phys_addr_t ret = 0;
2362 unsigned long flags;
2363 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002364 int err;
2365
2366 err = arm_smmu_power_on(smmu_domain->smmu);
2367 if (err)
2368 return 0;
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002369
2370 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2371 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2372 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002373 ret = __arm_smmu_iova_to_phys_hard(domain, iova, true);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002374
2375 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2376
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002377 arm_smmu_power_off(smmu_domain->smmu);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002378 return ret;
2379}
2380
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002381static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
2382 struct iommu_domain *domain, dma_addr_t iova)
2383{
2384 return __arm_smmu_iova_to_phys_hard(domain, iova, false);
2385}
2386
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002387static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002388{
Will Deacond0948942014-06-24 17:30:10 +01002389 switch (cap) {
2390 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002391 /*
2392 * Return true here as the SMMU can always send out coherent
2393 * requests.
2394 */
2395 return true;
Will Deacond0948942014-06-24 17:30:10 +01002396 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002397 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002398 case IOMMU_CAP_NOEXEC:
2399 return true;
Will Deacond0948942014-06-24 17:30:10 +01002400 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002401 return false;
Will Deacond0948942014-06-24 17:30:10 +01002402 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002403}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002404
Will Deacona9a1b0b2014-05-01 18:05:08 +01002405static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
2406{
2407 *((u16 *)data) = alias;
2408 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002409}
2410
Will Deacon8f68f8e2014-07-15 11:27:08 +01002411static void __arm_smmu_release_pci_iommudata(void *data)
2412{
2413 kfree(data);
2414}
2415
Joerg Roedelaf659932015-10-21 23:51:41 +02002416static int arm_smmu_init_pci_device(struct pci_dev *pdev,
2417 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002418{
Will Deacon03edb222015-01-19 14:27:33 +00002419 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02002420 u16 sid;
2421 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002422
Will Deacon03edb222015-01-19 14:27:33 +00002423 cfg = iommu_group_get_iommudata(group);
2424 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01002425 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002426 if (!cfg)
2427 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002428
Will Deacon03edb222015-01-19 14:27:33 +00002429 iommu_group_set_iommudata(group, cfg,
2430 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01002431 }
2432
Joerg Roedelaf659932015-10-21 23:51:41 +02002433 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
2434 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002435
Will Deacon03edb222015-01-19 14:27:33 +00002436 /*
2437 * Assume Stream ID == Requester ID for now.
2438 * We need a way to describe the ID mappings in FDT.
2439 */
2440 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
2441 for (i = 0; i < cfg->num_streamids; ++i)
2442 if (cfg->streamids[i] == sid)
2443 break;
2444
2445 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
2446 if (i == cfg->num_streamids)
2447 cfg->streamids[cfg->num_streamids++] = sid;
2448
2449 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002450}
2451
Joerg Roedelaf659932015-10-21 23:51:41 +02002452static int arm_smmu_init_platform_device(struct device *dev,
2453 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00002454{
Will Deacon03edb222015-01-19 14:27:33 +00002455 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002456 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00002457
2458 if (!smmu)
2459 return -ENODEV;
2460
2461 master = find_smmu_master(smmu, dev->of_node);
2462 if (!master)
2463 return -ENODEV;
2464
Will Deacon03edb222015-01-19 14:27:33 +00002465 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002466
2467 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002468}
2469
2470static int arm_smmu_add_device(struct device *dev)
2471{
Joerg Roedelaf659932015-10-21 23:51:41 +02002472 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00002473
Joerg Roedelaf659932015-10-21 23:51:41 +02002474 group = iommu_group_get_for_dev(dev);
2475 if (IS_ERR(group))
2476 return PTR_ERR(group);
2477
Peng Fan9a4a9d82015-11-20 16:56:18 +08002478 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02002479 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002480}
2481
Will Deacon45ae7cf2013-06-24 18:31:25 +01002482static void arm_smmu_remove_device(struct device *dev)
2483{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002484 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002485}
2486
Joerg Roedelaf659932015-10-21 23:51:41 +02002487static struct iommu_group *arm_smmu_device_group(struct device *dev)
2488{
2489 struct iommu_group *group;
2490 int ret;
2491
2492 if (dev_is_pci(dev))
2493 group = pci_device_group(dev);
2494 else
2495 group = generic_device_group(dev);
2496
2497 if (IS_ERR(group))
2498 return group;
2499
2500 if (dev_is_pci(dev))
2501 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
2502 else
2503 ret = arm_smmu_init_platform_device(dev, group);
2504
2505 if (ret) {
2506 iommu_group_put(group);
2507 group = ERR_PTR(ret);
2508 }
2509
2510 return group;
2511}
2512
Will Deaconc752ce42014-06-25 22:46:31 +01002513static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2514 enum iommu_attr attr, void *data)
2515{
Joerg Roedel1d672632015-03-26 13:43:10 +01002516 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002517 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002518
2519 switch (attr) {
2520 case DOMAIN_ATTR_NESTING:
2521 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2522 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002523 case DOMAIN_ATTR_PT_BASE_ADDR:
2524 *((phys_addr_t *)data) =
2525 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2526 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002527 case DOMAIN_ATTR_CONTEXT_BANK:
2528 /* context bank index isn't valid until we are attached */
2529 if (smmu_domain->smmu == NULL)
2530 return -ENODEV;
2531
2532 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2533 ret = 0;
2534 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002535 case DOMAIN_ATTR_TTBR0: {
2536 u64 val;
2537 struct arm_smmu_device *smmu = smmu_domain->smmu;
2538 /* not valid until we are attached */
2539 if (smmu == NULL)
2540 return -ENODEV;
2541
2542 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2543 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2544 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2545 << (TTBRn_ASID_SHIFT);
2546 *((u64 *)data) = val;
2547 ret = 0;
2548 break;
2549 }
2550 case DOMAIN_ATTR_CONTEXTIDR:
2551 /* not valid until attached */
2552 if (smmu_domain->smmu == NULL)
2553 return -ENODEV;
2554 *((u32 *)data) = smmu_domain->cfg.procid;
2555 ret = 0;
2556 break;
2557 case DOMAIN_ATTR_PROCID:
2558 *((u32 *)data) = smmu_domain->cfg.procid;
2559 ret = 0;
2560 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002561 case DOMAIN_ATTR_DYNAMIC:
2562 *((int *)data) = !!(smmu_domain->attributes
2563 & (1 << DOMAIN_ATTR_DYNAMIC));
2564 ret = 0;
2565 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002566 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2567 *((int *)data) = !!(smmu_domain->attributes
2568 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2569 ret = 0;
2570 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002571 case DOMAIN_ATTR_S1_BYPASS:
2572 *((int *)data) = !!(smmu_domain->attributes
2573 & (1 << DOMAIN_ATTR_S1_BYPASS));
2574 ret = 0;
2575 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002576 case DOMAIN_ATTR_SECURE_VMID:
2577 *((int *)data) = smmu_domain->secure_vmid;
2578 ret = 0;
2579 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002580 case DOMAIN_ATTR_PGTBL_INFO: {
2581 struct iommu_pgtbl_info *info = data;
2582
2583 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2584 ret = -ENODEV;
2585 break;
2586 }
2587 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2588 ret = 0;
2589 break;
2590 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002591 case DOMAIN_ATTR_FAST:
2592 *((int *)data) = !!(smmu_domain->attributes
2593 & (1 << DOMAIN_ATTR_FAST));
2594 ret = 0;
2595 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002596 default:
2597 return -ENODEV;
2598 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002599 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002600}
2601
2602static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2603 enum iommu_attr attr, void *data)
2604{
Will Deacon518f7132014-11-14 17:17:54 +00002605 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002606 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002607
Will Deacon518f7132014-11-14 17:17:54 +00002608 mutex_lock(&smmu_domain->init_mutex);
2609
Will Deaconc752ce42014-06-25 22:46:31 +01002610 switch (attr) {
2611 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002612 if (smmu_domain->smmu) {
2613 ret = -EPERM;
2614 goto out_unlock;
2615 }
2616
Will Deaconc752ce42014-06-25 22:46:31 +01002617 if (*(int *)data)
2618 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2619 else
2620 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2621
Will Deacon518f7132014-11-14 17:17:54 +00002622 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002623 case DOMAIN_ATTR_PROCID:
2624 if (smmu_domain->smmu != NULL) {
2625 dev_err(smmu_domain->smmu->dev,
2626 "cannot change procid attribute while attached\n");
2627 ret = -EBUSY;
2628 break;
2629 }
2630 smmu_domain->cfg.procid = *((u32 *)data);
2631 ret = 0;
2632 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002633 case DOMAIN_ATTR_DYNAMIC: {
2634 int dynamic = *((int *)data);
2635
2636 if (smmu_domain->smmu != NULL) {
2637 dev_err(smmu_domain->smmu->dev,
2638 "cannot change dynamic attribute while attached\n");
2639 ret = -EBUSY;
2640 break;
2641 }
2642
2643 if (dynamic)
2644 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2645 else
2646 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2647 ret = 0;
2648 break;
2649 }
2650 case DOMAIN_ATTR_CONTEXT_BANK:
2651 /* context bank can't be set while attached */
2652 if (smmu_domain->smmu != NULL) {
2653 ret = -EBUSY;
2654 break;
2655 }
2656 /* ... and it can only be set for dynamic contexts. */
2657 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2658 ret = -EINVAL;
2659 break;
2660 }
2661
2662 /* this will be validated during attach */
2663 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2664 ret = 0;
2665 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002666 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2667 u32 non_fatal_faults = *((int *)data);
2668
2669 if (non_fatal_faults)
2670 smmu_domain->attributes |=
2671 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2672 else
2673 smmu_domain->attributes &=
2674 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2675 ret = 0;
2676 break;
2677 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002678 case DOMAIN_ATTR_S1_BYPASS: {
2679 int bypass = *((int *)data);
2680
2681 /* bypass can't be changed while attached */
2682 if (smmu_domain->smmu != NULL) {
2683 ret = -EBUSY;
2684 break;
2685 }
2686 if (bypass)
2687 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2688 else
2689 smmu_domain->attributes &=
2690 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2691
2692 ret = 0;
2693 break;
2694 }
Patrick Daly8befb662016-08-17 20:03:28 -07002695 case DOMAIN_ATTR_ATOMIC:
2696 {
2697 int atomic_ctx = *((int *)data);
2698
2699 /* can't be changed while attached */
2700 if (smmu_domain->smmu != NULL) {
2701 ret = -EBUSY;
2702 break;
2703 }
2704 if (atomic_ctx)
2705 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2706 else
2707 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2708 break;
2709 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002710 case DOMAIN_ATTR_SECURE_VMID:
2711 if (smmu_domain->secure_vmid != VMID_INVAL) {
2712 ret = -ENODEV;
2713 WARN(1, "secure vmid already set!");
2714 break;
2715 }
2716 smmu_domain->secure_vmid = *((int *)data);
2717 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002718 case DOMAIN_ATTR_FAST:
2719 if (*((int *)data))
2720 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2721 ret = 0;
2722 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002723 default:
Will Deacon518f7132014-11-14 17:17:54 +00002724 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002725 }
Will Deacon518f7132014-11-14 17:17:54 +00002726
2727out_unlock:
2728 mutex_unlock(&smmu_domain->init_mutex);
2729 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002730}
2731
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002732static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2733 unsigned long flags)
2734{
2735 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2736 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2737 struct arm_smmu_device *smmu;
2738 void __iomem *cb_base;
2739
2740 if (!smmu_domain->smmu) {
2741 pr_err("Can't trigger faults on non-attached domains\n");
2742 return;
2743 }
2744
2745 smmu = smmu_domain->smmu;
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002746 if (arm_smmu_power_on(smmu))
2747 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002748
2749 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2750 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2751 flags, cfg->cbndx);
2752 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002753 /* give the interrupt time to fire... */
2754 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002755
2756 arm_smmu_power_off(smmu);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002757}
2758
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002759static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2760 unsigned long offset)
2761{
2762 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2763 struct arm_smmu_device *smmu;
2764 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2765 void __iomem *cb_base;
2766 unsigned long val;
2767
2768 if (offset >= SZ_4K) {
2769 pr_err("Invalid offset: 0x%lx\n", offset);
2770 return 0;
2771 }
2772
2773 smmu = smmu_domain->smmu;
2774 if (!smmu) {
2775 WARN(1, "Can't read registers of a detached domain\n");
2776 val = 0;
2777 return val;
2778 }
2779
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002780 if (arm_smmu_power_on(smmu))
2781 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002782
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002783 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2784 val = readl_relaxed(cb_base + offset);
2785
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002786 arm_smmu_power_off(smmu);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002787 return val;
2788}
2789
2790static void arm_smmu_reg_write(struct iommu_domain *domain,
2791 unsigned long offset, unsigned long val)
2792{
2793 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2794 struct arm_smmu_device *smmu;
2795 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2796 void __iomem *cb_base;
2797
2798 if (offset >= SZ_4K) {
2799 pr_err("Invalid offset: 0x%lx\n", offset);
2800 return;
2801 }
2802
2803 smmu = smmu_domain->smmu;
2804 if (!smmu) {
2805 WARN(1, "Can't read registers of a detached domain\n");
2806 return;
2807 }
2808
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002809 if (arm_smmu_power_on(smmu))
2810 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002811
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002812 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2813 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002814
2815 arm_smmu_power_off(smmu);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002816}
2817
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002818static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
2819{
2820 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
2821}
2822
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002823static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
2824{
2825 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2826
2827 return arm_smmu_power_on(smmu_domain->smmu);
2828}
2829
2830static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
2831{
2832 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2833
2834 arm_smmu_power_off(smmu_domain->smmu);
2835}
2836
Will Deacon518f7132014-11-14 17:17:54 +00002837static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002838 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002839 .domain_alloc = arm_smmu_domain_alloc,
2840 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002841 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002842 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002843 .map = arm_smmu_map,
2844 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002845 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002846 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002847 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002848 .add_device = arm_smmu_add_device,
2849 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002850 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002851 .domain_get_attr = arm_smmu_domain_get_attr,
2852 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00002853 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002854 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002855 .reg_read = arm_smmu_reg_read,
2856 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002857 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002858 .enable_config_clocks = arm_smmu_enable_config_clocks,
2859 .disable_config_clocks = arm_smmu_disable_config_clocks,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002860};
2861
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002862static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002863{
2864 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002865 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002866
2867 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2868 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2869 0, 30000)) {
2870 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2871 return -EBUSY;
2872 }
2873
2874 return 0;
2875}
2876
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002877static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
2878{
2879 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2880 u32 reg;
2881
2882 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2883 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2884 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2885
2886 return wait ? arm_smmu_wait_for_halt(smmu) : 0;
2887}
2888
2889static int arm_smmu_halt(struct arm_smmu_device *smmu)
2890{
2891 return __arm_smmu_halt(smmu, true);
2892}
2893
2894static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
2895{
2896 return __arm_smmu_halt(smmu, false);
2897}
2898
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002899static void arm_smmu_resume(struct arm_smmu_device *smmu)
2900{
2901 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2902 u32 reg;
2903
2904 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2905 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2906 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2907}
2908
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002909static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
2910{
2911 int i;
2912 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
2913
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002914 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002915 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2916 writel_relaxed(regs[i].value,
2917 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002918 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002919}
2920
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002921static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002922{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002923 int i;
2924 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002925 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002926 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002927
Peng Fan3ca37122016-05-03 21:50:30 +08002928 /*
2929 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
2930 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
2931 * bit is only present in MMU-500r2 onwards.
2932 */
2933 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
2934 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
2935 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
2936 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
2937 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
2938 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
2939 }
2940
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002941 /* Make sure all context banks are disabled and clear CB_FSR */
2942 for (i = 0; i < smmu->num_context_banks; ++i) {
2943 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2944 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
2945 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002946 /*
2947 * Disable MMU-500's not-particularly-beneficial next-page
2948 * prefetcher for the sake of errata #841119 and #826419.
2949 */
2950 if (smmu->model == ARM_MMU500) {
2951 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
2952 reg &= ~ARM_MMU500_ACTLR_CPRE;
2953 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2954 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002955
2956 if (smmu->model == QCOM_SMMUV2) {
2957 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2958 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2959 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2960 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2961 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002962 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002963}
2964
2965static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
2966{
2967 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
2968 int i = 0;
2969 u32 reg;
2970
2971 /* clear global FSR */
2972 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2973 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2974
2975 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
2976 /*
2977 * Mark all SMRn as invalid and all S2CRn as bypass unless
2978 * overridden
2979 */
2980 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
2981 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2982 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
2983 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
2984 }
2985
2986 arm_smmu_context_bank_reset(smmu);
2987 }
Will Deacon1463fe42013-07-31 19:21:27 +01002988
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002989 /* Program implementation defined registers */
2990 arm_smmu_impl_def_programming(smmu);
2991
Will Deacon45ae7cf2013-06-24 18:31:25 +01002992 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002993 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
2994 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
2995
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002996 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002997
Will Deacon45ae7cf2013-06-24 18:31:25 +01002998 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002999 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003000
3001 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003002 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003003
Robin Murphy25a1c962016-02-10 14:25:33 +00003004 /* Enable client access, handling unmatched streams as appropriate */
3005 reg &= ~sCR0_CLIENTPD;
3006 if (disable_bypass)
3007 reg |= sCR0_USFCFG;
3008 else
3009 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003010
3011 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003012 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003013
3014 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003015 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003016
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003017 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3018 reg |= sCR0_VMID16EN;
3019
Will Deacon45ae7cf2013-06-24 18:31:25 +01003020 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003021 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003022 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003023}
3024
3025static int arm_smmu_id_size_to_bits(int size)
3026{
3027 switch (size) {
3028 case 0:
3029 return 32;
3030 case 1:
3031 return 36;
3032 case 2:
3033 return 40;
3034 case 3:
3035 return 42;
3036 case 4:
3037 return 44;
3038 case 5:
3039 default:
3040 return 48;
3041 }
3042}
3043
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003044static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3045{
3046 struct device *dev = smmu->dev;
3047 int i, ntuples, ret;
3048 u32 *tuples;
3049 struct arm_smmu_impl_def_reg *regs, *regit;
3050
3051 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3052 return 0;
3053
3054 ntuples /= sizeof(u32);
3055 if (ntuples % 2) {
3056 dev_err(dev,
3057 "Invalid number of attach-impl-defs registers: %d\n",
3058 ntuples);
3059 return -EINVAL;
3060 }
3061
3062 regs = devm_kmalloc(
3063 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3064 GFP_KERNEL);
3065 if (!regs)
3066 return -ENOMEM;
3067
3068 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3069 if (!tuples)
3070 return -ENOMEM;
3071
3072 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3073 tuples, ntuples);
3074 if (ret)
3075 return ret;
3076
3077 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3078 regit->offset = tuples[i];
3079 regit->value = tuples[i + 1];
3080 }
3081
3082 devm_kfree(dev, tuples);
3083
3084 smmu->impl_def_attach_registers = regs;
3085 smmu->num_impl_def_attach_registers = ntuples / 2;
3086
3087 return 0;
3088}
3089
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003090static int arm_smmu_init_clocks(struct arm_smmu_device *smmu)
3091{
3092 const char *cname;
3093 struct property *prop;
3094 int i;
3095 struct device *dev = smmu->dev;
3096
3097 smmu->num_clocks =
3098 of_property_count_strings(dev->of_node, "clock-names");
3099
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003100 if (smmu->num_clocks < 1) {
3101 smmu->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003102 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003103 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003104
3105 smmu->clocks = devm_kzalloc(
3106 dev, sizeof(*smmu->clocks) * smmu->num_clocks,
3107 GFP_KERNEL);
3108
3109 if (!smmu->clocks) {
3110 dev_err(dev,
3111 "Failed to allocate memory for clocks\n");
3112 return -ENODEV;
3113 }
3114
3115 i = 0;
3116 of_property_for_each_string(dev->of_node, "clock-names",
3117 prop, cname) {
3118 struct clk *c = devm_clk_get(dev, cname);
3119
3120 if (IS_ERR(c)) {
3121 dev_err(dev, "Couldn't get clock: %s",
3122 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003123 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003124 }
3125
3126 if (clk_get_rate(c) == 0) {
3127 long rate = clk_round_rate(c, 1000);
3128
3129 clk_set_rate(c, rate);
3130 }
3131
3132 smmu->clocks[i] = c;
3133
3134 ++i;
3135 }
3136 return 0;
3137}
3138
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003139static int arm_smmu_init_regulators(struct arm_smmu_device *smmu)
3140{
3141 struct device *dev = smmu->dev;
3142
3143 if (!of_get_property(dev->of_node, "vdd-supply", NULL))
3144 return 0;
3145
3146 smmu->gdsc = devm_regulator_get(dev, "vdd");
3147 if (IS_ERR(smmu->gdsc))
3148 return PTR_ERR(smmu->gdsc);
3149
3150 return 0;
3151}
3152
Patrick Daly2764f952016-09-06 19:22:44 -07003153static int arm_smmu_init_bus_scaling(struct platform_device *pdev,
3154 struct arm_smmu_device *smmu)
3155{
3156 u32 master_id;
3157
3158 if (of_property_read_u32(pdev->dev.of_node, "qcom,bus-master-id",
3159 &master_id)) {
3160 dev_dbg(smmu->dev, "No bus scaling info\n");
3161 return 0;
3162 }
3163
3164 smmu->bus_client_name = devm_kasprintf(
3165 smmu->dev, GFP_KERNEL, "smmu-bus-client-%s",
3166 dev_name(smmu->dev));
3167
3168 if (!smmu->bus_client_name)
3169 return -ENOMEM;
3170
3171 smmu->bus_client = msm_bus_scale_register(
3172 master_id, MSM_BUS_SLAVE_EBI_CH0, smmu->bus_client_name, true);
3173 if (IS_ERR(&smmu->bus_client)) {
3174 int ret = PTR_ERR(smmu->bus_client);
3175
3176 if (ret != -EPROBE_DEFER)
3177 dev_err(smmu->dev, "Bus client registration failed\n");
3178 return ret;
3179 }
3180
3181 return 0;
3182}
3183
Will Deacon45ae7cf2013-06-24 18:31:25 +01003184static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3185{
3186 unsigned long size;
3187 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3188 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003189 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003190
Mitchel Humpherysba822582015-10-20 11:37:41 -07003191 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3192 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003193 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003194
3195 /* ID0 */
3196 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003197
3198 /* Restrict available stages based on module parameter */
3199 if (force_stage == 1)
3200 id &= ~(ID0_S2TS | ID0_NTS);
3201 else if (force_stage == 2)
3202 id &= ~(ID0_S1TS | ID0_NTS);
3203
Will Deacon45ae7cf2013-06-24 18:31:25 +01003204 if (id & ID0_S1TS) {
3205 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003206 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003207 }
3208
3209 if (id & ID0_S2TS) {
3210 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003211 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003212 }
3213
3214 if (id & ID0_NTS) {
3215 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003216 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003217 }
3218
3219 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003220 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003221 dev_err(smmu->dev, "\tno translation support!\n");
3222 return -ENODEV;
3223 }
3224
Robin Murphyb7862e32016-04-13 18:13:03 +01003225 if ((id & ID0_S1TS) &&
3226 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003227 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003228 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003229 }
3230
Robin Murphybae2c2d2015-07-29 19:46:05 +01003231 /*
3232 * In order for DMA API calls to work properly, we must defer to what
3233 * the DT says about coherency, regardless of what the hardware claims.
3234 * Fortunately, this also opens up a workaround for systems where the
3235 * ID register value has ended up configured incorrectly.
3236 */
3237 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3238 cttw_reg = !!(id & ID0_CTTW);
3239 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003240 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003241 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003242 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003243 cttw_dt ? "" : "non-");
3244 if (cttw_dt != cttw_reg)
3245 dev_notice(smmu->dev,
3246 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003247
3248 if (id & ID0_SMS) {
3249 u32 smr, sid, mask;
3250
3251 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
3252 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
3253 ID0_NUMSMRG_MASK;
3254 if (smmu->num_mapping_groups == 0) {
3255 dev_err(smmu->dev,
3256 "stream-matching supported, but no SMRs present!\n");
3257 return -ENODEV;
3258 }
3259
Dhaval Patel031d7462015-05-09 14:47:29 -07003260 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3261 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
3262 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
3263 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3264 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Will Deacon45ae7cf2013-06-24 18:31:25 +01003265
Dhaval Patel031d7462015-05-09 14:47:29 -07003266 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3267 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
3268 if ((mask & sid) != sid) {
3269 dev_err(smmu->dev,
3270 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
3271 mask, sid);
3272 return -ENODEV;
3273 }
3274
Mitchel Humpherysba822582015-10-20 11:37:41 -07003275 dev_dbg(smmu->dev,
Dhaval Patel031d7462015-05-09 14:47:29 -07003276 "\tstream matching with %u register groups, mask 0x%x",
3277 smmu->num_mapping_groups, mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003278 }
Olav Haugan3c8766d2014-08-22 17:12:32 -07003279 } else {
3280 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
3281 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003282 }
3283
Robin Murphy7602b872016-04-28 17:12:09 +01003284 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3285 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3286 if (!(id & ID0_PTFS_NO_AARCH32S))
3287 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3288 }
3289
Will Deacon45ae7cf2013-06-24 18:31:25 +01003290 /* ID1 */
3291 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003292 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003293
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003294 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003295 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003296 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003297 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003298 dev_warn(smmu->dev,
3299 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3300 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003301
Will Deacon518f7132014-11-14 17:17:54 +00003302 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003303 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3304 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3305 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3306 return -ENODEV;
3307 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003308 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003309 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003310 /*
3311 * Cavium CN88xx erratum #27704.
3312 * Ensure ASID and VMID allocation is unique across all SMMUs in
3313 * the system.
3314 */
3315 if (smmu->model == CAVIUM_SMMUV2) {
3316 smmu->cavium_id_base =
3317 atomic_add_return(smmu->num_context_banks,
3318 &cavium_smmu_context_count);
3319 smmu->cavium_id_base -= smmu->num_context_banks;
3320 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003321
3322 /* ID2 */
3323 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3324 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003325 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003326
Will Deacon518f7132014-11-14 17:17:54 +00003327 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003328 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003329 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003330
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003331 if (id & ID2_VMID16)
3332 smmu->features |= ARM_SMMU_FEAT_VMID16;
3333
Robin Murphyf1d84542015-03-04 16:41:05 +00003334 /*
3335 * What the page table walker can address actually depends on which
3336 * descriptor format is in use, but since a) we don't know that yet,
3337 * and b) it can vary per context bank, this will have to do...
3338 */
3339 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3340 dev_warn(smmu->dev,
3341 "failed to set DMA mask for table walker\n");
3342
Robin Murphyb7862e32016-04-13 18:13:03 +01003343 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003344 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003345 if (smmu->version == ARM_SMMU_V1_64K)
3346 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003347 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003348 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003349 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003350 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003351 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003352 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003353 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003354 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003355 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003356 }
3357
Robin Murphy7602b872016-04-28 17:12:09 +01003358 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003359 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003360 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003361 if (smmu->features &
3362 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003363 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003364 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003365 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003366 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003367 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003368
Robin Murphyd5466352016-05-09 17:20:09 +01003369 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3370 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3371 else
3372 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003373 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003374 smmu->pgsize_bitmap);
3375
Will Deacon518f7132014-11-14 17:17:54 +00003376
Will Deacon28d60072014-09-01 16:24:48 +01003377 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003378 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3379 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003380
3381 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003382 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3383 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003384
Will Deacon45ae7cf2013-06-24 18:31:25 +01003385 return 0;
3386}
3387
Robin Murphy67b65a32016-04-13 18:12:57 +01003388struct arm_smmu_match_data {
3389 enum arm_smmu_arch_version version;
3390 enum arm_smmu_implementation model;
3391};
3392
3393#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
3394static struct arm_smmu_match_data name = { .version = ver, .model = imp }
3395
3396ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
3397ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01003398ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003399ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01003400ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003401ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01003402
Joerg Roedel09b52692014-10-02 12:24:45 +02003403static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003404 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3405 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3406 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003407 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003408 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003409 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003410 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01003411 { },
3412};
3413MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3414
Will Deacon45ae7cf2013-06-24 18:31:25 +01003415static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3416{
Robin Murphy09360402014-08-28 17:51:59 +01003417 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01003418 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003419 struct resource *res;
3420 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003421 struct device *dev = &pdev->dev;
3422 struct rb_node *node;
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -07003423 int num_irqs, i, err, num_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003424
3425 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3426 if (!smmu) {
3427 dev_err(dev, "failed to allocate arm_smmu_device\n");
3428 return -ENOMEM;
3429 }
3430 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003431 spin_lock_init(&smmu->atos_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003432 mutex_init(&smmu->power_lock);
Patrick Daly8befb662016-08-17 20:03:28 -07003433 spin_lock_init(&smmu->clock_refs_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003434 idr_init(&smmu->asid_idr);
3435 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003436
Robin Murphy09360402014-08-28 17:51:59 +01003437 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01003438 data = of_id->data;
3439 smmu->version = data->version;
3440 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01003441
Will Deacon45ae7cf2013-06-24 18:31:25 +01003442 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003443 smmu->base = devm_ioremap_resource(dev, res);
3444 if (IS_ERR(smmu->base))
3445 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003446 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003447
3448 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3449 &smmu->num_global_irqs)) {
3450 dev_err(dev, "missing #global-interrupts property\n");
3451 return -ENODEV;
3452 }
3453
3454 num_irqs = 0;
3455 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3456 num_irqs++;
3457 if (num_irqs > smmu->num_global_irqs)
3458 smmu->num_context_irqs++;
3459 }
3460
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003461 if (!smmu->num_context_irqs) {
3462 dev_err(dev, "found %d interrupts but expected at least %d\n",
3463 num_irqs, smmu->num_global_irqs + 1);
3464 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003465 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003466
3467 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3468 GFP_KERNEL);
3469 if (!smmu->irqs) {
3470 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3471 return -ENOMEM;
3472 }
3473
3474 for (i = 0; i < num_irqs; ++i) {
3475 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003476
Will Deacon45ae7cf2013-06-24 18:31:25 +01003477 if (irq < 0) {
3478 dev_err(dev, "failed to get irq index %d\n", i);
3479 return -ENODEV;
3480 }
3481 smmu->irqs[i] = irq;
3482 }
3483
Dhaval Patel031d7462015-05-09 14:47:29 -07003484 parse_driver_options(smmu);
3485
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003486 err = arm_smmu_init_clocks(smmu);
Olav Haugan3c8766d2014-08-22 17:12:32 -07003487 if (err)
3488 return err;
3489
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003490 err = arm_smmu_init_regulators(smmu);
3491 if (err)
3492 return err;
3493
Patrick Daly2764f952016-09-06 19:22:44 -07003494 err = arm_smmu_init_bus_scaling(pdev, smmu);
3495 if (err)
3496 return err;
3497
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003498 err = arm_smmu_power_on(smmu);
3499 if (err)
3500 return err;
3501
3502 err = arm_smmu_device_cfg_probe(smmu);
3503 if (err)
3504 goto out_power_off;
3505
Will Deacon45ae7cf2013-06-24 18:31:25 +01003506 i = 0;
3507 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003508
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -07003509 err = arm_smmu_parse_iommus_properties(smmu, &num_masters);
3510 if (err)
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003511 goto out_put_masters;
3512
Mitchel Humpherysba822582015-10-20 11:37:41 -07003513 dev_dbg(dev, "registered %d master devices\n", num_masters);
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003514
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003515 err = arm_smmu_parse_impl_def_registers(smmu);
3516 if (err)
3517 goto out_put_masters;
3518
Robin Murphyb7862e32016-04-13 18:13:03 +01003519 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003520 smmu->num_context_banks != smmu->num_context_irqs) {
3521 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08003522 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
3523 smmu->num_context_irqs, smmu->num_context_banks,
3524 smmu->num_context_banks);
3525 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003526 }
3527
Will Deacon45ae7cf2013-06-24 18:31:25 +01003528 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003529 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3530 NULL, arm_smmu_global_fault,
3531 IRQF_ONESHOT | IRQF_SHARED,
3532 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003533 if (err) {
3534 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3535 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08003536 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003537 }
3538 }
3539
3540 INIT_LIST_HEAD(&smmu->list);
3541 spin_lock(&arm_smmu_devices_lock);
3542 list_add(&smmu->list, &arm_smmu_devices);
3543 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01003544
3545 arm_smmu_device_reset(smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003546 arm_smmu_power_off(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003547 return 0;
3548
Will Deacon45ae7cf2013-06-24 18:31:25 +01003549out_put_masters:
3550 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003551 struct arm_smmu_master *master
3552 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003553 of_node_put(master->of_node);
3554 }
3555
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003556out_power_off:
3557 arm_smmu_power_off(smmu);
3558
Will Deacon45ae7cf2013-06-24 18:31:25 +01003559 return err;
3560}
3561
3562static int arm_smmu_device_remove(struct platform_device *pdev)
3563{
3564 int i;
3565 struct device *dev = &pdev->dev;
3566 struct arm_smmu_device *curr, *smmu = NULL;
3567 struct rb_node *node;
3568
3569 spin_lock(&arm_smmu_devices_lock);
3570 list_for_each_entry(curr, &arm_smmu_devices, list) {
3571 if (curr->dev == dev) {
3572 smmu = curr;
3573 list_del(&smmu->list);
3574 break;
3575 }
3576 }
3577 spin_unlock(&arm_smmu_devices_lock);
3578
3579 if (!smmu)
3580 return -ENODEV;
3581
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003582 if (arm_smmu_power_on(smmu))
3583 return -EINVAL;
3584
Will Deacon45ae7cf2013-06-24 18:31:25 +01003585 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003586 struct arm_smmu_master *master
3587 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003588 of_node_put(master->of_node);
3589 }
3590
Will Deaconecfadb62013-07-31 19:21:28 +01003591 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003592 dev_err(dev, "removing device with active domains!\n");
3593
3594 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08003595 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003596
Patrick Dalyc190d932016-08-30 17:23:28 -07003597 idr_destroy(&smmu->asid_idr);
3598
Will Deacon45ae7cf2013-06-24 18:31:25 +01003599 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003600 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003601 arm_smmu_power_off(smmu);
3602
Patrick Daly2764f952016-09-06 19:22:44 -07003603 msm_bus_scale_unregister(smmu->bus_client);
3604
Will Deacon45ae7cf2013-06-24 18:31:25 +01003605 return 0;
3606}
3607
Will Deacon45ae7cf2013-06-24 18:31:25 +01003608static struct platform_driver arm_smmu_driver = {
3609 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003610 .name = "arm-smmu",
3611 .of_match_table = of_match_ptr(arm_smmu_of_match),
3612 },
3613 .probe = arm_smmu_device_dt_probe,
3614 .remove = arm_smmu_device_remove,
3615};
3616
3617static int __init arm_smmu_init(void)
3618{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003619 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003620 int ret;
3621
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003622 /*
3623 * Play nice with systems that don't have an ARM SMMU by checking that
3624 * an ARM SMMU exists in the system before proceeding with the driver
3625 * and IOMMU bus operation registration.
3626 */
3627 np = of_find_matching_node(NULL, arm_smmu_of_match);
3628 if (!np)
3629 return 0;
3630
3631 of_node_put(np);
3632
Will Deacon45ae7cf2013-06-24 18:31:25 +01003633 ret = platform_driver_register(&arm_smmu_driver);
3634 if (ret)
3635 return ret;
3636
3637 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01003638 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003639 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3640
Will Deacond123cf82014-02-04 22:17:53 +00003641#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01003642 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003643 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00003644#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003645
Will Deacona9a1b0b2014-05-01 18:05:08 +01003646#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08003647 if (!iommu_present(&pci_bus_type)) {
3648 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01003649 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08003650 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01003651#endif
3652
Will Deacon45ae7cf2013-06-24 18:31:25 +01003653 return 0;
3654}
3655
3656static void __exit arm_smmu_exit(void)
3657{
3658 return platform_driver_unregister(&arm_smmu_driver);
3659}
3660
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003661subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003662module_exit(arm_smmu_exit);
3663
3664MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
3665MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
3666MODULE_LICENSE("GPL v2");