blob: 69a922abc8ebd80cd717f5dc633e275b9047e675 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
Patrick Daly2764f952016-09-06 19:22:44 -070047#include <linux/msm-bus.h>
48#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010049
50#include <linux/amba/bus.h>
51
Will Deacon518f7132014-11-14 17:17:54 +000052#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010053
54/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020055#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010056
57/* Maximum number of context banks per SMMU */
58#define ARM_SMMU_MAX_CBS 128
59
60/* Maximum number of mapping groups per SMMU */
61#define ARM_SMMU_MAX_SMRS 128
62
Will Deacon45ae7cf2013-06-24 18:31:25 +010063/* SMMU global address space */
64#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010065#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010066
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000067/*
68 * SMMU global address space with conditional offset to access secure
69 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
70 * nsGFSYNR0: 0x450)
71 */
72#define ARM_SMMU_GR0_NS(smmu) \
73 ((smmu)->base + \
74 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
75 ? 0x400 : 0))
76
Robin Murphyf9a05f02016-04-13 18:13:01 +010077/*
78 * Some 64-bit registers only make sense to write atomically, but in such
79 * cases all the data relevant to AArch32 formats lies within the lower word,
80 * therefore this actually makes more sense than it might first appear.
81 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010085#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010086#endif
87
Will Deacon45ae7cf2013-06-24 18:31:25 +010088/* Configuration registers */
89#define ARM_SMMU_GR0_sCR0 0x0
90#define sCR0_CLIENTPD (1 << 0)
91#define sCR0_GFRE (1 << 1)
92#define sCR0_GFIE (1 << 2)
93#define sCR0_GCFGFRE (1 << 4)
94#define sCR0_GCFGFIE (1 << 5)
95#define sCR0_USFCFG (1 << 10)
96#define sCR0_VMIDPNE (1 << 11)
97#define sCR0_PTM (1 << 12)
98#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080099#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100100#define sCR0_BSU_SHIFT 14
101#define sCR0_BSU_MASK 0x3
102
Peng Fan3ca37122016-05-03 21:50:30 +0800103/* Auxiliary Configuration register */
104#define ARM_SMMU_GR0_sACR 0x10
105
Will Deacon45ae7cf2013-06-24 18:31:25 +0100106/* Identification registers */
107#define ARM_SMMU_GR0_ID0 0x20
108#define ARM_SMMU_GR0_ID1 0x24
109#define ARM_SMMU_GR0_ID2 0x28
110#define ARM_SMMU_GR0_ID3 0x2c
111#define ARM_SMMU_GR0_ID4 0x30
112#define ARM_SMMU_GR0_ID5 0x34
113#define ARM_SMMU_GR0_ID6 0x38
114#define ARM_SMMU_GR0_ID7 0x3c
115#define ARM_SMMU_GR0_sGFSR 0x48
116#define ARM_SMMU_GR0_sGFSYNR0 0x50
117#define ARM_SMMU_GR0_sGFSYNR1 0x54
118#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100119
120#define ID0_S1TS (1 << 30)
121#define ID0_S2TS (1 << 29)
122#define ID0_NTS (1 << 28)
123#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000124#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100125#define ID0_PTFS_NO_AARCH32 (1 << 25)
126#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100127#define ID0_CTTW (1 << 14)
128#define ID0_NUMIRPT_SHIFT 16
129#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700130#define ID0_NUMSIDB_SHIFT 9
131#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100132#define ID0_NUMSMRG_SHIFT 0
133#define ID0_NUMSMRG_MASK 0xff
134
135#define ID1_PAGESIZE (1 << 31)
136#define ID1_NUMPAGENDXB_SHIFT 28
137#define ID1_NUMPAGENDXB_MASK 7
138#define ID1_NUMS2CB_SHIFT 16
139#define ID1_NUMS2CB_MASK 0xff
140#define ID1_NUMCB_SHIFT 0
141#define ID1_NUMCB_MASK 0xff
142
143#define ID2_OAS_SHIFT 4
144#define ID2_OAS_MASK 0xf
145#define ID2_IAS_SHIFT 0
146#define ID2_IAS_MASK 0xf
147#define ID2_UBS_SHIFT 8
148#define ID2_UBS_MASK 0xf
149#define ID2_PTFS_4K (1 << 12)
150#define ID2_PTFS_16K (1 << 13)
151#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800152#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153
Peng Fan3ca37122016-05-03 21:50:30 +0800154#define ID7_MAJOR_SHIFT 4
155#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158#define ARM_SMMU_GR0_TLBIVMID 0x64
159#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
160#define ARM_SMMU_GR0_TLBIALLH 0x6c
161#define ARM_SMMU_GR0_sTLBGSYNC 0x70
162#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
163#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800164#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165
166/* Stream mapping registers */
167#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
168#define SMR_VALID (1 << 31)
169#define SMR_MASK_SHIFT 16
170#define SMR_MASK_MASK 0x7fff
171#define SMR_ID_SHIFT 0
172#define SMR_ID_MASK 0x7fff
173
174#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
175#define S2CR_CBNDX_SHIFT 0
176#define S2CR_CBNDX_MASK 0xff
177#define S2CR_TYPE_SHIFT 16
178#define S2CR_TYPE_MASK 0x3
179#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
180#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
181#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
182
183/* Context bank attribute registers */
184#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
185#define CBAR_VMID_SHIFT 0
186#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000187#define CBAR_S1_BPSHCFG_SHIFT 8
188#define CBAR_S1_BPSHCFG_MASK 3
189#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100190#define CBAR_S1_MEMATTR_SHIFT 12
191#define CBAR_S1_MEMATTR_MASK 0xf
192#define CBAR_S1_MEMATTR_WB 0xf
193#define CBAR_TYPE_SHIFT 16
194#define CBAR_TYPE_MASK 0x3
195#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
196#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
199#define CBAR_IRPTNDX_SHIFT 24
200#define CBAR_IRPTNDX_MASK 0xff
201
Shalaj Jain04059c52015-03-03 13:34:59 -0800202#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
203#define CBFRSYNRA_SID_MASK (0xffff)
204
Will Deacon45ae7cf2013-06-24 18:31:25 +0100205#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
206#define CBA2R_RW64_32BIT (0 << 0)
207#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800208#define CBA2R_VMID_SHIFT 16
209#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100210
211/* Translation context bank */
212#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100213#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100214
215#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100216#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100217#define ARM_SMMU_CB_RESUME 0x8
218#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100219#define ARM_SMMU_CB_TTBR0 0x20
220#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100221#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600222#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000224#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100225#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700227#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100228#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000230#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100231#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_TLBIVAL 0x620
233#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
234#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700235#define ARM_SMMU_CB_TLBSYNC 0x7f0
236#define ARM_SMMU_CB_TLBSTATUS 0x7f4
237#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100238#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000239#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100240
241#define SCTLR_S1_ASIDPNE (1 << 12)
242#define SCTLR_CFCFG (1 << 7)
243#define SCTLR_CFIE (1 << 6)
244#define SCTLR_CFRE (1 << 5)
245#define SCTLR_E (1 << 4)
246#define SCTLR_AFE (1 << 2)
247#define SCTLR_TRE (1 << 1)
248#define SCTLR_M (1 << 0)
249#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
250
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100251#define ARM_MMU500_ACTLR_CPRE (1 << 1)
252
Peng Fan3ca37122016-05-03 21:50:30 +0800253#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
254
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700255/* Definitions for implementation-defined registers */
256#define ACTLR_QCOM_OSH_SHIFT 28
257#define ACTLR_QCOM_OSH 1
258
259#define ACTLR_QCOM_ISH_SHIFT 29
260#define ACTLR_QCOM_ISH 1
261
262#define ACTLR_QCOM_NSH_SHIFT 30
263#define ACTLR_QCOM_NSH 1
264
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700265#define ARM_SMMU_IMPL_DEF0(smmu) \
266 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
267#define ARM_SMMU_IMPL_DEF1(smmu) \
268 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
269#define IMPL_DEF1_MICRO_MMU_CTRL 0
270#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
271#define MICRO_MMU_CTRL_IDLE (1 << 3)
272
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000273#define CB_PAR_F (1 << 0)
274
275#define ATSR_ACTIVE (1 << 0)
276
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277#define RESUME_RETRY (0 << 0)
278#define RESUME_TERMINATE (1 << 0)
279
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100281#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100282
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100283#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100284
285#define FSR_MULTI (1 << 31)
286#define FSR_SS (1 << 30)
287#define FSR_UUT (1 << 8)
288#define FSR_ASF (1 << 7)
289#define FSR_TLBLKF (1 << 6)
290#define FSR_TLBMCF (1 << 5)
291#define FSR_EF (1 << 4)
292#define FSR_PF (1 << 3)
293#define FSR_AFF (1 << 2)
294#define FSR_TF (1 << 1)
295
Mitchel Humpherys29073202014-07-08 09:52:18 -0700296#define FSR_IGN (FSR_AFF | FSR_ASF | \
297 FSR_TLBMCF | FSR_TLBLKF)
298#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100299 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100300
301#define FSYNR0_WNR (1 << 4)
302
Will Deacon4cf740b2014-07-14 19:47:39 +0100303static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000304module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100305MODULE_PARM_DESC(force_stage,
306 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000307static bool disable_bypass;
308module_param(disable_bypass, bool, S_IRUGO);
309MODULE_PARM_DESC(disable_bypass,
310 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100311
Robin Murphy09360402014-08-28 17:51:59 +0100312enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100313 ARM_SMMU_V1,
314 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100315 ARM_SMMU_V2,
316};
317
Robin Murphy67b65a32016-04-13 18:12:57 +0100318enum arm_smmu_implementation {
319 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100320 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100321 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700322 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100323};
324
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700325struct arm_smmu_impl_def_reg {
326 u32 offset;
327 u32 value;
328};
329
Will Deacon45ae7cf2013-06-24 18:31:25 +0100330struct arm_smmu_smr {
331 u8 idx;
332 u16 mask;
333 u16 id;
334};
335
Will Deacona9a1b0b2014-05-01 18:05:08 +0100336struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100337 int num_streamids;
338 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100339 struct arm_smmu_smr *smrs;
340};
341
Will Deacona9a1b0b2014-05-01 18:05:08 +0100342struct arm_smmu_master {
343 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100344 struct rb_node node;
345 struct arm_smmu_master_cfg cfg;
346};
347
Will Deacon45ae7cf2013-06-24 18:31:25 +0100348struct arm_smmu_device {
349 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100350
351 void __iomem *base;
352 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100353 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100354
355#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
356#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
357#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
358#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
359#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000360#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800361#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100362#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
363#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
364#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
365#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
366#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100367 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000368
369#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800370#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800371#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700372#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000373 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100374 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100375 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100376
377 u32 num_context_banks;
378 u32 num_s2_context_banks;
379 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
380 atomic_t irptndx;
381
382 u32 num_mapping_groups;
383 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
384
Will Deacon518f7132014-11-14 17:17:54 +0000385 unsigned long va_size;
386 unsigned long ipa_size;
387 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100388 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100389
390 u32 num_global_irqs;
391 u32 num_context_irqs;
392 unsigned int *irqs;
393
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394 struct list_head list;
395 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800396
397 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700398 /* Specific to QCOM */
399 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
400 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800401
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700402 int num_clocks;
403 struct clk **clocks;
404
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700405 struct regulator *gdsc;
406
Patrick Daly2764f952016-09-06 19:22:44 -0700407 struct msm_bus_client_handle *bus_client;
408 char *bus_client_name;
409
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700410 /* Protects power_count */
411 struct mutex power_lock;
412 int power_count;
Patrick Daly8befb662016-08-17 20:03:28 -0700413 /* Protects clock_refs_count */
414 spinlock_t clock_refs_lock;
415 int clock_refs_count;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700416
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800417 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700418
419 /* protects idr */
420 struct mutex idr_mutex;
421 struct idr asid_idr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100422};
423
Robin Murphy7602b872016-04-28 17:12:09 +0100424enum arm_smmu_context_fmt {
425 ARM_SMMU_CTX_FMT_NONE,
426 ARM_SMMU_CTX_FMT_AARCH64,
427 ARM_SMMU_CTX_FMT_AARCH32_L,
428 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100429};
430
431struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100432 u8 cbndx;
433 u8 irptndx;
434 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600435 u32 procid;
436 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100437 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100438};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100439#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600440#define INVALID_CBNDX 0xff
441#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700442/*
443 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
444 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
445 */
446#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100447
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600448#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800449#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100450
Will Deaconc752ce42014-06-25 22:46:31 +0100451enum arm_smmu_domain_stage {
452 ARM_SMMU_DOMAIN_S1 = 0,
453 ARM_SMMU_DOMAIN_S2,
454 ARM_SMMU_DOMAIN_NESTED,
455};
456
Will Deacon45ae7cf2013-06-24 18:31:25 +0100457struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100458 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000459 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700460 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000461 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100462 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100463 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000464 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700465 u32 attributes;
Joerg Roedel1d672632015-03-26 13:43:10 +0100466 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100467};
468
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200469struct arm_smmu_phandle_args {
470 struct device_node *np;
471 int args_count;
472 uint32_t args[MAX_MASTER_STREAMIDS];
473};
474
Will Deacon45ae7cf2013-06-24 18:31:25 +0100475static DEFINE_SPINLOCK(arm_smmu_devices_lock);
476static LIST_HEAD(arm_smmu_devices);
477
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000478struct arm_smmu_option_prop {
479 u32 opt;
480 const char *prop;
481};
482
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800483static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
484
Mitchel Humpherys29073202014-07-08 09:52:18 -0700485static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000486 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800487 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800488 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700489 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000490 { 0, NULL},
491};
492
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800493static int arm_smmu_halt(struct arm_smmu_device *smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700494static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
495static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800496static void arm_smmu_resume(struct arm_smmu_device *smmu);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800497static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
498 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700499static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
500 dma_addr_t iova);
501static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
502 struct iommu_domain *domain, dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600503static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800504
Joerg Roedel1d672632015-03-26 13:43:10 +0100505static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
506{
507 return container_of(dom, struct arm_smmu_domain, domain);
508}
509
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000510static void parse_driver_options(struct arm_smmu_device *smmu)
511{
512 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700513
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000514 do {
515 if (of_property_read_bool(smmu->dev->of_node,
516 arm_smmu_options[i].prop)) {
517 smmu->options |= arm_smmu_options[i].opt;
518 dev_notice(smmu->dev, "option %s\n",
519 arm_smmu_options[i].prop);
520 }
521 } while (arm_smmu_options[++i].opt);
522}
523
Patrick Dalyc190d932016-08-30 17:23:28 -0700524static bool is_dynamic_domain(struct iommu_domain *domain)
525{
526 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
527
528 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
529}
530
Will Deacon8f68f8e2014-07-15 11:27:08 +0100531static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100532{
533 if (dev_is_pci(dev)) {
534 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700535
Will Deacona9a1b0b2014-05-01 18:05:08 +0100536 while (!pci_is_root_bus(bus))
537 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100538 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100539 }
540
Will Deacon8f68f8e2014-07-15 11:27:08 +0100541 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100542}
543
Will Deacon45ae7cf2013-06-24 18:31:25 +0100544static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
545 struct device_node *dev_node)
546{
547 struct rb_node *node = smmu->masters.rb_node;
548
549 while (node) {
550 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700551
Will Deacon45ae7cf2013-06-24 18:31:25 +0100552 master = container_of(node, struct arm_smmu_master, node);
553
554 if (dev_node < master->of_node)
555 node = node->rb_left;
556 else if (dev_node > master->of_node)
557 node = node->rb_right;
558 else
559 return master;
560 }
561
562 return NULL;
563}
564
Will Deacona9a1b0b2014-05-01 18:05:08 +0100565static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100566find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100567{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100568 struct arm_smmu_master_cfg *cfg = NULL;
569 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100570
Will Deacon8f68f8e2014-07-15 11:27:08 +0100571 if (group) {
572 cfg = iommu_group_get_iommudata(group);
573 iommu_group_put(group);
574 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100575
Will Deacon8f68f8e2014-07-15 11:27:08 +0100576 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100577}
578
Will Deacon45ae7cf2013-06-24 18:31:25 +0100579static int insert_smmu_master(struct arm_smmu_device *smmu,
580 struct arm_smmu_master *master)
581{
582 struct rb_node **new, *parent;
583
584 new = &smmu->masters.rb_node;
585 parent = NULL;
586 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700587 struct arm_smmu_master *this
588 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100589
590 parent = *new;
591 if (master->of_node < this->of_node)
592 new = &((*new)->rb_left);
593 else if (master->of_node > this->of_node)
594 new = &((*new)->rb_right);
595 else
596 return -EEXIST;
597 }
598
599 rb_link_node(&master->node, parent, new);
600 rb_insert_color(&master->node, &smmu->masters);
601 return 0;
602}
603
604static int register_smmu_master(struct arm_smmu_device *smmu,
605 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200606 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100607{
608 int i;
609 struct arm_smmu_master *master;
610
611 master = find_smmu_master(smmu, masterspec->np);
612 if (master) {
613 dev_err(dev,
614 "rejecting multiple registrations for master device %s\n",
615 masterspec->np->name);
616 return -EBUSY;
617 }
618
619 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
620 dev_err(dev,
621 "reached maximum number (%d) of stream IDs for master device %s\n",
622 MAX_MASTER_STREAMIDS, masterspec->np->name);
623 return -ENOSPC;
624 }
625
626 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
627 if (!master)
628 return -ENOMEM;
629
Will Deacona9a1b0b2014-05-01 18:05:08 +0100630 master->of_node = masterspec->np;
631 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100632
Olav Haugan3c8766d2014-08-22 17:12:32 -0700633 for (i = 0; i < master->cfg.num_streamids; ++i) {
634 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100635
Olav Haugan3c8766d2014-08-22 17:12:32 -0700636 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
637 (streamid >= smmu->num_mapping_groups)) {
638 dev_err(dev,
639 "stream ID for master device %s greater than maximum allowed (%d)\n",
640 masterspec->np->name, smmu->num_mapping_groups);
641 return -ERANGE;
642 }
643 master->cfg.streamids[i] = streamid;
644 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100645 return insert_smmu_master(smmu, master);
646}
647
Will Deacon44680ee2014-06-25 11:29:12 +0100648static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100649{
Will Deacon44680ee2014-06-25 11:29:12 +0100650 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100651 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100652 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100653
654 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100655 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100656 master = find_smmu_master(smmu, dev_node);
657 if (master)
658 break;
659 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100660 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100661
Will Deacona9a1b0b2014-05-01 18:05:08 +0100662 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100663}
664
665static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
666{
667 int idx;
668
669 do {
670 idx = find_next_zero_bit(map, end, start);
671 if (idx == end)
672 return -ENOSPC;
673 } while (test_and_set_bit(idx, map));
674
675 return idx;
676}
677
678static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
679{
680 clear_bit(idx, map);
681}
682
Patrick Daly8befb662016-08-17 20:03:28 -0700683static int arm_smmu_prepare_clocks(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700684{
685 int i, ret = 0;
686
687 for (i = 0; i < smmu->num_clocks; ++i) {
Patrick Daly8befb662016-08-17 20:03:28 -0700688 ret = clk_prepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700689 if (ret) {
Patrick Daly8befb662016-08-17 20:03:28 -0700690 dev_err(smmu->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700691 while (i--)
Patrick Daly8befb662016-08-17 20:03:28 -0700692 clk_unprepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700693 break;
694 }
695 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700696 return ret;
697}
698
Patrick Daly8befb662016-08-17 20:03:28 -0700699static void arm_smmu_unprepare_clocks(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700700{
701 int i;
702
703 for (i = 0; i < smmu->num_clocks; ++i)
Patrick Daly8befb662016-08-17 20:03:28 -0700704 clk_unprepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700705}
706
Patrick Daly8befb662016-08-17 20:03:28 -0700707/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
708static int arm_smmu_enable_clocks_atomic(struct arm_smmu_device *smmu)
709{
710 int i, ret = 0;
711 unsigned long flags;
712
713 spin_lock_irqsave(&smmu->clock_refs_lock, flags);
714 if (smmu->clock_refs_count > 0) {
715 smmu->clock_refs_count++;
716 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
717 return 0;
718 }
719
720 for (i = 0; i < smmu->num_clocks; ++i) {
721 ret = clk_enable(smmu->clocks[i]);
722 if (ret) {
723 dev_err(smmu->dev, "Couldn't enable clock #%d\n", i);
724 while (i--)
725 clk_disable(smmu->clocks[i]);
726 break;
727 }
728 }
729
730 if (!ret)
731 smmu->clock_refs_count++;
732
733 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
734 return ret;
735}
736
737/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
738static void arm_smmu_disable_clocks_atomic(struct arm_smmu_device *smmu)
739{
740 int i;
741 unsigned long flags;
742
743 spin_lock_irqsave(&smmu->clock_refs_lock, flags);
744 WARN_ON(smmu->clock_refs_count == 0);
745 if (smmu->clock_refs_count > 1) {
746 smmu->clock_refs_count--;
747 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
748 return;
749 }
750
751 for (i = 0; i < smmu->num_clocks; ++i)
752 clk_disable(smmu->clocks[i]);
753
754 smmu->clock_refs_count--;
755 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
756}
757
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700758static int arm_smmu_enable_regulators(struct arm_smmu_device *smmu)
759{
760 if (!smmu->gdsc)
761 return 0;
762
763 return regulator_enable(smmu->gdsc);
764}
765
766static int arm_smmu_disable_regulators(struct arm_smmu_device *smmu)
767{
768 if (!smmu->gdsc)
769 return 0;
770
771 return regulator_disable(smmu->gdsc);
772}
773
Patrick Daly2764f952016-09-06 19:22:44 -0700774static int arm_smmu_request_bus(struct arm_smmu_device *smmu)
775{
776 if (!smmu->bus_client)
777 return 0;
778 return msm_bus_scale_update_bw(smmu->bus_client, 0, 1000);
779}
780
781static int arm_smmu_unrequest_bus(struct arm_smmu_device *smmu)
782{
783 if (!smmu->bus_client)
784 return 0;
785 return msm_bus_scale_update_bw(smmu->bus_client, 0, 0);
786}
787
788
Patrick Daly8befb662016-08-17 20:03:28 -0700789static int arm_smmu_power_on_slow(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700790{
791 int ret;
792
793 mutex_lock(&smmu->power_lock);
794 if (smmu->power_count > 0) {
795 smmu->power_count += 1;
796 mutex_unlock(&smmu->power_lock);
797 return 0;
798 }
799
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700800 ret = arm_smmu_enable_regulators(smmu);
801 if (ret)
802 goto out_unlock;
803
Patrick Daly2764f952016-09-06 19:22:44 -0700804 ret = arm_smmu_request_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700805 if (ret)
806 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700807
Patrick Daly2764f952016-09-06 19:22:44 -0700808 ret = arm_smmu_prepare_clocks(smmu);
809 if (ret)
810 goto out_disable_bus;
811
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700812 smmu->power_count += 1;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700813 mutex_unlock(&smmu->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700814 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700815
Patrick Daly2764f952016-09-06 19:22:44 -0700816out_disable_bus:
817 arm_smmu_unrequest_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700818out_disable_regulators:
819 arm_smmu_disable_regulators(smmu);
820out_unlock:
821 mutex_unlock(&smmu->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700822 return ret;
823}
824
Patrick Daly8befb662016-08-17 20:03:28 -0700825static void arm_smmu_power_off_slow(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700826{
827 mutex_lock(&smmu->power_lock);
828 smmu->power_count--;
829 WARN_ON(smmu->power_count < 0);
830
831 if (smmu->power_count > 0) {
832 mutex_unlock(&smmu->power_lock);
833 return;
834 }
835
Patrick Daly8befb662016-08-17 20:03:28 -0700836 arm_smmu_unprepare_clocks(smmu);
Patrick Daly2764f952016-09-06 19:22:44 -0700837 arm_smmu_unrequest_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700838 arm_smmu_disable_regulators(smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700839
840 mutex_unlock(&smmu->power_lock);
841}
842
Patrick Daly8befb662016-08-17 20:03:28 -0700843static int arm_smmu_power_on(struct arm_smmu_device *smmu)
844{
845 int ret;
846
847 ret = arm_smmu_power_on_slow(smmu);
848 if (ret)
849 return ret;
850
851 ret = arm_smmu_enable_clocks_atomic(smmu);
852 if (ret)
853 goto out_disable;
854
855 return 0;
856
857out_disable:
858 arm_smmu_power_off_slow(smmu);
859 return ret;
860}
861
862static void arm_smmu_power_off(struct arm_smmu_device *smmu)
863{
864 arm_smmu_disable_clocks_atomic(smmu);
865 arm_smmu_power_off_slow(smmu);
866}
867
868/*
869 * Must be used instead of arm_smmu_power_on if it may be called from
870 * atomic context
871 */
872static int arm_smmu_domain_power_on(struct iommu_domain *domain,
873 struct arm_smmu_device *smmu)
874{
875 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
876 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
877
878 if (atomic_domain)
879 return arm_smmu_enable_clocks_atomic(smmu);
880
881 return arm_smmu_power_on(smmu);
882}
883
884/*
885 * Must be used instead of arm_smmu_power_on if it may be called from
886 * atomic context
887 */
888static void arm_smmu_domain_power_off(struct iommu_domain *domain,
889 struct arm_smmu_device *smmu)
890{
891 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
892 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
893
894 if (atomic_domain) {
895 arm_smmu_disable_clocks_atomic(smmu);
896 return;
897 }
898
899 arm_smmu_power_off(smmu);
900}
901
Will Deacon45ae7cf2013-06-24 18:31:25 +0100902/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700903static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
904 int cbndx)
905{
906 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
907 u32 val;
908
909 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
910 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
911 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700912 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700913 dev_err(smmu->dev, "TLBSYNC timeout!\n");
914}
915
Will Deacon518f7132014-11-14 17:17:54 +0000916static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100917{
918 int count = 0;
919 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
920
921 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
922 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
923 & sTLBGSTATUS_GSACTIVE) {
924 cpu_relax();
925 if (++count == TLB_LOOP_TIMEOUT) {
926 dev_err_ratelimited(smmu->dev,
927 "TLB sync timed out -- SMMU may be deadlocked\n");
928 return;
929 }
930 udelay(1);
931 }
932}
933
Will Deacon518f7132014-11-14 17:17:54 +0000934static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100935{
Will Deacon518f7132014-11-14 17:17:54 +0000936 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700937 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000938}
939
Patrick Daly8befb662016-08-17 20:03:28 -0700940/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000941static void arm_smmu_tlb_inv_context(void *cookie)
942{
943 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100944 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
945 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100946 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000947 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100948
949 if (stage1) {
950 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800951 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100952 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700953 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100954 } else {
955 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800956 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100957 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700958 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100959 }
Will Deacon1463fe42013-07-31 19:21:27 +0100960}
961
Will Deacon518f7132014-11-14 17:17:54 +0000962static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000963 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000964{
965 struct arm_smmu_domain *smmu_domain = cookie;
966 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
967 struct arm_smmu_device *smmu = smmu_domain->smmu;
968 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
969 void __iomem *reg;
970
971 if (stage1) {
972 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
973 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
974
Robin Murphy7602b872016-04-28 17:12:09 +0100975 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000976 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800977 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000978 do {
979 writel_relaxed(iova, reg);
980 iova += granule;
981 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000982 } else {
983 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800984 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000985 do {
986 writeq_relaxed(iova, reg);
987 iova += granule >> 12;
988 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000989 }
Will Deacon518f7132014-11-14 17:17:54 +0000990 } else if (smmu->version == ARM_SMMU_V2) {
991 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
992 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
993 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000994 iova >>= 12;
995 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100996 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000997 iova += granule >> 12;
998 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000999 } else {
1000 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001001 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001002 }
1003}
1004
Will Deacon518f7132014-11-14 17:17:54 +00001005static struct iommu_gather_ops arm_smmu_gather_ops = {
1006 .tlb_flush_all = arm_smmu_tlb_inv_context,
1007 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1008 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +00001009};
1010
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001011static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1012 dma_addr_t iova, u32 fsr)
1013{
1014 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1015 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1016 struct arm_smmu_device *smmu;
1017 void __iomem *cb_base;
1018 u64 sctlr, sctlr_orig;
1019 phys_addr_t phys;
1020
1021 smmu = smmu_domain->smmu;
1022 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1023
1024 arm_smmu_halt_nowait(smmu);
1025
1026 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
1027
1028 arm_smmu_wait_for_halt(smmu);
1029
1030 /* clear FSR to allow ATOS to log any faults */
1031 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
1032
1033 /* disable stall mode momentarily */
1034 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
1035 sctlr = sctlr_orig & ~SCTLR_CFCFG;
1036 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
1037
1038 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
1039
1040 if (!phys) {
1041 dev_err(smmu->dev,
1042 "ATOS failed. Will issue a TLBIALL and try again...\n");
1043 arm_smmu_tlb_inv_context(smmu_domain);
1044 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
1045 if (phys)
1046 dev_err(smmu->dev,
1047 "ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
1048 else
1049 dev_err(smmu->dev,
1050 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1051 }
1052
1053 /* restore SCTLR */
1054 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
1055
1056 arm_smmu_resume(smmu);
1057
1058 return phys;
1059}
1060
Will Deacon45ae7cf2013-06-24 18:31:25 +01001061static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1062{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001063 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001064 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001065 unsigned long iova;
1066 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001067 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001068 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1069 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001070 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001071 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001072 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001073 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001074 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001075 bool non_fatal_fault = !!(smmu_domain->attributes &
1076 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001077
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001078 static DEFINE_RATELIMIT_STATE(_rs,
1079 DEFAULT_RATELIMIT_INTERVAL,
1080 DEFAULT_RATELIMIT_BURST);
1081
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001082 ret = arm_smmu_power_on(smmu);
1083 if (ret)
1084 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001085
Shalaj Jain04059c52015-03-03 13:34:59 -08001086 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001087 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001088 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1089
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001090 if (!(fsr & FSR_FAULT)) {
1091 ret = IRQ_NONE;
1092 goto out_power_off;
1093 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001094
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001095 if (fatal_asf && (fsr & FSR_ASF)) {
1096 dev_err(smmu->dev,
1097 "Took an address size fault. Refusing to recover.\n");
1098 BUG();
1099 }
1100
Will Deacon45ae7cf2013-06-24 18:31:25 +01001101 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001102 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001103 if (fsr & FSR_TF)
1104 flags |= IOMMU_FAULT_TRANSLATION;
1105 if (fsr & FSR_PF)
1106 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001107 if (fsr & FSR_EF)
1108 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001109 if (fsr & FSR_SS)
1110 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001111
Robin Murphyf9a05f02016-04-13 18:13:01 +01001112 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001113 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001114 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1115 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001116 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1117 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001118 dev_dbg(smmu->dev,
1119 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1120 iova, fsr, fsynr, cfg->cbndx);
1121 dev_dbg(smmu->dev,
1122 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001123 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001124 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001125 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001126 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1127 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001128 if (__ratelimit(&_rs)) {
1129 dev_err(smmu->dev,
1130 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1131 iova, fsr, fsynr, cfg->cbndx);
1132 dev_err(smmu->dev, "FAR = %016lx\n",
1133 (unsigned long)iova);
1134 dev_err(smmu->dev,
1135 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1136 fsr,
1137 (fsr & 0x02) ? "TF " : "",
1138 (fsr & 0x04) ? "AFF " : "",
1139 (fsr & 0x08) ? "PF " : "",
1140 (fsr & 0x10) ? "EF " : "",
1141 (fsr & 0x20) ? "TLBMCF " : "",
1142 (fsr & 0x40) ? "TLBLKF " : "",
1143 (fsr & 0x80) ? "MHF " : "",
1144 (fsr & 0x40000000) ? "SS " : "",
1145 (fsr & 0x80000000) ? "MULTI " : "");
1146 dev_err(smmu->dev,
1147 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001148 if (!phys_soft)
1149 dev_err(smmu->dev,
1150 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1151 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001152 dev_err(smmu->dev,
1153 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1154 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1155 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001156 ret = IRQ_NONE;
1157 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001158 if (!non_fatal_fault) {
1159 dev_err(smmu->dev,
1160 "Unhandled arm-smmu context fault!\n");
1161 BUG();
1162 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001163 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001164
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001165 /*
1166 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1167 * if stalled. This is required to keep the IOMMU client stalled on
1168 * the outstanding fault. This gives the client a chance to take any
1169 * debug action and then terminate the stalled transaction.
1170 * So, the sequence in case of stall on fault should be:
1171 * 1) Do not clear FSR or write to RESUME here
1172 * 2) Client takes any debug action
1173 * 3) Client terminates the stalled transaction and resumes the IOMMU
1174 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1175 * not before so that the fault remains outstanding. This ensures
1176 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1177 * need to be terminated.
1178 */
1179 if (tmp != -EBUSY) {
1180 /* Clear the faulting FSR */
1181 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001182
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001183 /*
1184 * Barrier required to ensure that the FSR is cleared
1185 * before resuming SMMU operation
1186 */
1187 wmb();
1188
1189 /* Retry or terminate any stalled transactions */
1190 if (fsr & FSR_SS)
1191 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1192 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001193
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001194out_power_off:
1195 arm_smmu_power_off(smmu);
1196
Patrick Daly5ba28112016-08-30 19:18:52 -07001197 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001198}
1199
1200static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1201{
1202 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1203 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001204 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001206 if (arm_smmu_power_on(smmu))
1207 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001208
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1210 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1211 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1212 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1213
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001214 if (!gfsr) {
1215 arm_smmu_power_off(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001216 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001217 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001218
Will Deacon45ae7cf2013-06-24 18:31:25 +01001219 dev_err_ratelimited(smmu->dev,
1220 "Unexpected global fault, this could be serious\n");
1221 dev_err_ratelimited(smmu->dev,
1222 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1223 gfsr, gfsynr0, gfsynr1, gfsynr2);
1224
1225 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001226 arm_smmu_power_off(smmu);
Will Deaconadaba322013-07-31 19:21:26 +01001227 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001228}
1229
Will Deacon518f7132014-11-14 17:17:54 +00001230static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1231 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001232{
1233 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001234 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001235 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001236 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1237 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001238 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001239
Will Deacon45ae7cf2013-06-24 18:31:25 +01001240 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001241 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1242 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001243
Will Deacon4a1c93c2015-03-04 12:21:03 +00001244 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001245 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1246 reg = CBA2R_RW64_64BIT;
1247 else
1248 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001249 /* 16-bit VMIDs live in CBA2R */
1250 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001251 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001252
Will Deacon4a1c93c2015-03-04 12:21:03 +00001253 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1254 }
1255
Will Deacon45ae7cf2013-06-24 18:31:25 +01001256 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001257 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001258 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001259 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001260
Will Deacon57ca90f2014-02-06 14:59:05 +00001261 /*
1262 * Use the weakest shareability/memory types, so they are
1263 * overridden by the ttbcr/pte.
1264 */
1265 if (stage1) {
1266 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1267 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001268 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1269 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001270 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001271 }
Will Deacon44680ee2014-06-25 11:29:12 +01001272 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001273
Will Deacon518f7132014-11-14 17:17:54 +00001274 /* TTBRs */
1275 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001276 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001277
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001278 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001279 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001280
1281 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001282 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001283 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001284 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001285 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001286 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001287 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001288
Will Deacon518f7132014-11-14 17:17:54 +00001289 /* TTBCR */
1290 if (stage1) {
1291 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1292 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
1293 if (smmu->version > ARM_SMMU_V1) {
1294 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +01001295 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +00001296 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001297 }
1298 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001299 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1300 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301 }
1302
Will Deacon518f7132014-11-14 17:17:54 +00001303 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001304 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +00001305 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001306 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +00001307 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1308 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001309 }
1310
Will Deacon45ae7cf2013-06-24 18:31:25 +01001311 /* SCTLR */
Patrick Dalye62d3362016-03-15 18:58:28 -07001312 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_EAE_SBOP;
1313
1314 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1315 !stage1)
1316 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001317 if (stage1)
1318 reg |= SCTLR_S1_ASIDPNE;
1319#ifdef __BIG_ENDIAN
1320 reg |= SCTLR_E;
1321#endif
Will Deacon25724842013-08-21 13:49:53 +01001322 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001323}
1324
Patrick Dalyc190d932016-08-30 17:23:28 -07001325static int arm_smmu_init_asid(struct iommu_domain *domain,
1326 struct arm_smmu_device *smmu)
1327{
1328 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1329 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1330 bool dynamic = is_dynamic_domain(domain);
1331 int ret;
1332
1333 if (!dynamic) {
1334 cfg->asid = cfg->cbndx + 1;
1335 } else {
1336 mutex_lock(&smmu->idr_mutex);
1337 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1338 smmu->num_context_banks + 2,
1339 MAX_ASID + 1, GFP_KERNEL);
1340
1341 mutex_unlock(&smmu->idr_mutex);
1342 if (ret < 0) {
1343 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1344 ret);
1345 return ret;
1346 }
1347 cfg->asid = ret;
1348 }
1349 return 0;
1350}
1351
1352static void arm_smmu_free_asid(struct iommu_domain *domain)
1353{
1354 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1355 struct arm_smmu_device *smmu = smmu_domain->smmu;
1356 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1357 bool dynamic = is_dynamic_domain(domain);
1358
1359 if (cfg->asid == INVALID_ASID || !dynamic)
1360 return;
1361
1362 mutex_lock(&smmu->idr_mutex);
1363 idr_remove(&smmu->asid_idr, cfg->asid);
1364 mutex_unlock(&smmu->idr_mutex);
1365}
1366
Will Deacon45ae7cf2013-06-24 18:31:25 +01001367static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001368 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001369{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001370 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001371 unsigned long ias, oas;
1372 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001373 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001374 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001375 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Patrick Dalyc190d932016-08-30 17:23:28 -07001376 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001377
Will Deacon518f7132014-11-14 17:17:54 +00001378 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001379 if (smmu_domain->smmu)
1380 goto out_unlock;
1381
Patrick Dalyc190d932016-08-30 17:23:28 -07001382 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1383 smmu_domain->cfg.asid = INVALID_ASID;
1384
Robin Murphy98006992016-04-20 14:53:33 +01001385 /* We're bypassing these SIDs, so don't allocate an actual context */
1386 if (domain->type == IOMMU_DOMAIN_DMA) {
1387 smmu_domain->smmu = smmu;
1388 goto out_unlock;
1389 }
1390
Patrick Dalyc190d932016-08-30 17:23:28 -07001391 dynamic = is_dynamic_domain(domain);
1392 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1393 dev_err(smmu->dev, "dynamic domains not supported\n");
1394 ret = -EPERM;
1395 goto out_unlock;
1396 }
1397
Will Deaconc752ce42014-06-25 22:46:31 +01001398 /*
1399 * Mapping the requested stage onto what we support is surprisingly
1400 * complicated, mainly because the spec allows S1+S2 SMMUs without
1401 * support for nested translation. That means we end up with the
1402 * following table:
1403 *
1404 * Requested Supported Actual
1405 * S1 N S1
1406 * S1 S1+S2 S1
1407 * S1 S2 S2
1408 * S1 S1 S1
1409 * N N N
1410 * N S1+S2 S2
1411 * N S2 S2
1412 * N S1 S1
1413 *
1414 * Note that you can't actually request stage-2 mappings.
1415 */
1416 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1417 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1418 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1419 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1420
Robin Murphy7602b872016-04-28 17:12:09 +01001421 /*
1422 * Choosing a suitable context format is even more fiddly. Until we
1423 * grow some way for the caller to express a preference, and/or move
1424 * the decision into the io-pgtable code where it arguably belongs,
1425 * just aim for the closest thing to the rest of the system, and hope
1426 * that the hardware isn't esoteric enough that we can't assume AArch64
1427 * support to be a superset of AArch32 support...
1428 */
1429 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1430 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1431 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1432 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1433 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1434 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1435 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1436
1437 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1438 ret = -EINVAL;
1439 goto out_unlock;
1440 }
1441
Will Deaconc752ce42014-06-25 22:46:31 +01001442 switch (smmu_domain->stage) {
1443 case ARM_SMMU_DOMAIN_S1:
1444 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1445 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001446 ias = smmu->va_size;
1447 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001448 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001449 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001450 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001451 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001452 ias = min(ias, 32UL);
1453 oas = min(oas, 40UL);
1454 }
Will Deaconc752ce42014-06-25 22:46:31 +01001455 break;
1456 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001457 /*
1458 * We will likely want to change this if/when KVM gets
1459 * involved.
1460 */
Will Deaconc752ce42014-06-25 22:46:31 +01001461 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001462 cfg->cbar = CBAR_TYPE_S2_TRANS;
1463 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001464 ias = smmu->ipa_size;
1465 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001466 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001467 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001468 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001469 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001470 ias = min(ias, 40UL);
1471 oas = min(oas, 40UL);
1472 }
Will Deaconc752ce42014-06-25 22:46:31 +01001473 break;
1474 default:
1475 ret = -EINVAL;
1476 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001477 }
1478
Patrick Dalyc190d932016-08-30 17:23:28 -07001479 /* Dynamic domains must set cbndx through domain attribute */
1480 if (!dynamic) {
1481 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001482 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001483 if (ret < 0)
1484 goto out_unlock;
1485 cfg->cbndx = ret;
1486 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001487 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001488 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1489 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001490 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001491 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001492 }
1493
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001494 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001495 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001496 .ias = ias,
1497 .oas = oas,
1498 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001499 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001500 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001501
Will Deacon518f7132014-11-14 17:17:54 +00001502 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001503 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1504 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001505 if (!pgtbl_ops) {
1506 ret = -ENOMEM;
1507 goto out_clear_smmu;
1508 }
1509
Robin Murphyd5466352016-05-09 17:20:09 +01001510 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001511 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001512
Patrick Dalyc190d932016-08-30 17:23:28 -07001513 /* Assign an asid */
1514 ret = arm_smmu_init_asid(domain, smmu);
1515 if (ret)
1516 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001517
Patrick Dalyc190d932016-08-30 17:23:28 -07001518 if (!dynamic) {
1519 /* Initialise the context bank with our page table cfg */
1520 arm_smmu_init_context_bank(smmu_domain,
1521 &smmu_domain->pgtbl_cfg);
1522
1523 /*
1524 * Request context fault interrupt. Do this last to avoid the
1525 * handler seeing a half-initialised domain state.
1526 */
1527 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1528 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001529 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1530 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001531 if (ret < 0) {
1532 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1533 cfg->irptndx, irq);
1534 cfg->irptndx = INVALID_IRPTNDX;
1535 goto out_clear_smmu;
1536 }
1537 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001538 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001539 }
Will Deacon518f7132014-11-14 17:17:54 +00001540 mutex_unlock(&smmu_domain->init_mutex);
1541
1542 /* Publish page table ops for map/unmap */
1543 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001544 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001545
Will Deacon518f7132014-11-14 17:17:54 +00001546out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001547 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001548 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001549out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001550 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001551 return ret;
1552}
1553
1554static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1555{
Joerg Roedel1d672632015-03-26 13:43:10 +01001556 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001557 struct arm_smmu_device *smmu = smmu_domain->smmu;
1558 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001559 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001560 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001561 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001562 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001563
Robin Murphy98006992016-04-20 14:53:33 +01001564 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001565 return;
1566
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001567 ret = arm_smmu_power_on(smmu);
1568 if (ret) {
1569 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1570 smmu);
1571 return;
1572 }
1573
Patrick Dalyc190d932016-08-30 17:23:28 -07001574 dynamic = is_dynamic_domain(domain);
1575 if (dynamic) {
1576 arm_smmu_free_asid(domain);
1577 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001578 arm_smmu_power_off(smmu);
Patrick Dalyc190d932016-08-30 17:23:28 -07001579 return;
1580 }
1581
Will Deacon518f7132014-11-14 17:17:54 +00001582 /*
1583 * Disable the context bank and free the page tables before freeing
1584 * it.
1585 */
Will Deacon44680ee2014-06-25 11:29:12 +01001586 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001587 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001588
Will Deacon44680ee2014-06-25 11:29:12 +01001589 if (cfg->irptndx != INVALID_IRPTNDX) {
1590 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001591 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001592 }
1593
Markus Elfring44830b02015-11-06 18:32:41 +01001594 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001595 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001596
1597 arm_smmu_power_off(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001598}
1599
Joerg Roedel1d672632015-03-26 13:43:10 +01001600static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001601{
1602 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001603
Patrick Daly09801312016-08-29 17:02:52 -07001604 /* Do not support DOMAIN_DMA for now */
1605 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001606 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001607 /*
1608 * Allocate the domain and initialise some of its data structures.
1609 * We can't really do anything meaningful until we've added a
1610 * master.
1611 */
1612 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1613 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001614 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001615
Robin Murphy9adb9592016-01-26 18:06:36 +00001616 if (type == IOMMU_DOMAIN_DMA &&
1617 iommu_get_dma_cookie(&smmu_domain->domain)) {
1618 kfree(smmu_domain);
1619 return NULL;
1620 }
1621
Will Deacon518f7132014-11-14 17:17:54 +00001622 mutex_init(&smmu_domain->init_mutex);
1623 spin_lock_init(&smmu_domain->pgtbl_lock);
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06001624 smmu_domain->cfg.cbndx = INVALID_CBNDX;
Joerg Roedel1d672632015-03-26 13:43:10 +01001625
1626 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001627}
1628
Joerg Roedel1d672632015-03-26 13:43:10 +01001629static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001630{
Joerg Roedel1d672632015-03-26 13:43:10 +01001631 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001632
1633 /*
1634 * Free the domain resources. We assume that all devices have
1635 * already been detached.
1636 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001637 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001638 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001639 kfree(smmu_domain);
1640}
1641
1642static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001643 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001644{
1645 int i;
1646 struct arm_smmu_smr *smrs;
1647 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1648
1649 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1650 return 0;
1651
Will Deacona9a1b0b2014-05-01 18:05:08 +01001652 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001653 return -EEXIST;
1654
Mitchel Humpherys29073202014-07-08 09:52:18 -07001655 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001656 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001657 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1658 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001659 return -ENOMEM;
1660 }
1661
Will Deacon44680ee2014-06-25 11:29:12 +01001662 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001663 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001664 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1665 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001666 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001667 dev_err(smmu->dev, "failed to allocate free SMR\n");
1668 goto err_free_smrs;
1669 }
1670
1671 smrs[i] = (struct arm_smmu_smr) {
1672 .idx = idx,
1673 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001674 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001675 };
1676 }
1677
1678 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001679 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001680 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1681 smrs[i].mask << SMR_MASK_SHIFT;
1682 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1683 }
1684
Will Deacona9a1b0b2014-05-01 18:05:08 +01001685 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001686 return 0;
1687
1688err_free_smrs:
1689 while (--i >= 0)
1690 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1691 kfree(smrs);
1692 return -ENOSPC;
1693}
1694
1695static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001696 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001697{
1698 int i;
1699 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001700 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001701
Will Deacon43b412b2014-07-15 11:22:24 +01001702 if (!smrs)
1703 return;
1704
Will Deacon45ae7cf2013-06-24 18:31:25 +01001705 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001706 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001707 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001708
Will Deacon45ae7cf2013-06-24 18:31:25 +01001709 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1710 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1711 }
1712
Will Deacona9a1b0b2014-05-01 18:05:08 +01001713 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001714 kfree(smrs);
1715}
1716
Will Deacon45ae7cf2013-06-24 18:31:25 +01001717static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001718 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719{
1720 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001721 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001722 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1723
Will Deacon5f634952016-04-20 14:53:32 +01001724 /*
1725 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1726 * for all devices behind the SMMU. Note that we need to take
1727 * care configuring SMRs for devices both a platform_device and
1728 * and a PCI device (i.e. a PCI host controller)
1729 */
1730 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1731 return 0;
1732
Will Deacon8f68f8e2014-07-15 11:27:08 +01001733 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001734 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001735 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001736 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001737
Will Deacona9a1b0b2014-05-01 18:05:08 +01001738 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001739 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001740
Will Deacona9a1b0b2014-05-01 18:05:08 +01001741 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Patrick Dalyf4930442016-06-27 20:50:14 -07001742 s2cr = S2CR_TYPE_TRANS |
Will Deacon44680ee2014-06-25 11:29:12 +01001743 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001744 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1745 }
1746
1747 return 0;
1748}
1749
1750static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001751 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752{
Will Deacon43b412b2014-07-15 11:22:24 +01001753 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001754 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001755 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001756
Will Deacon8f68f8e2014-07-15 11:27:08 +01001757 /* An IOMMU group is torn down by the first device to be removed */
1758 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1759 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001760
1761 /*
1762 * We *must* clear the S2CR first, because freeing the SMR means
1763 * that it can be re-allocated immediately.
1764 */
Will Deacon43b412b2014-07-15 11:22:24 +01001765 for (i = 0; i < cfg->num_streamids; ++i) {
1766 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001767 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001768
Robin Murphy25a1c962016-02-10 14:25:33 +00001769 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001770 }
1771
Will Deacona9a1b0b2014-05-01 18:05:08 +01001772 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001773}
1774
Patrick Daly09801312016-08-29 17:02:52 -07001775static void arm_smmu_detach_dev(struct iommu_domain *domain,
1776 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001777{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001778 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001779 struct arm_smmu_device *smmu = smmu_domain->smmu;
1780 struct arm_smmu_master_cfg *cfg;
1781 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07001782 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07001783
1784 if (dynamic)
1785 return;
1786
1787 cfg = find_smmu_master_cfg(dev);
1788 if (!cfg)
1789 return;
1790
1791 if (!smmu) {
1792 dev_err(dev, "Domain not attached; cannot detach!\n");
1793 return;
1794 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001795
1796 dev->archdata.iommu = NULL;
1797 arm_smmu_domain_remove_master(smmu_domain, cfg);
Patrick Daly8befb662016-08-17 20:03:28 -07001798
1799 /* Remove additional vote for atomic power */
1800 if (atomic_domain) {
1801 WARN_ON(arm_smmu_enable_clocks_atomic(smmu));
1802 arm_smmu_power_off(smmu);
1803 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001804}
1805
Will Deacon45ae7cf2013-06-24 18:31:25 +01001806static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1807{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001808 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001809 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001810 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001811 struct arm_smmu_master_cfg *cfg;
Patrick Daly8befb662016-08-17 20:03:28 -07001812 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001813
Will Deacon8f68f8e2014-07-15 11:27:08 +01001814 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001815 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001816 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1817 return -ENXIO;
1818 }
1819
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001820 /* Enable Clocks and Power */
1821 ret = arm_smmu_power_on(smmu);
1822 if (ret)
1823 return ret;
1824
Patrick Daly8befb662016-08-17 20:03:28 -07001825 /*
1826 * Keep an additional vote for non-atomic power until domain is
1827 * detached
1828 */
1829 if (atomic_domain) {
1830 ret = arm_smmu_power_on(smmu);
1831 if (ret)
1832 goto out_power_off;
1833
1834 arm_smmu_disable_clocks_atomic(smmu);
1835 }
1836
Will Deacon518f7132014-11-14 17:17:54 +00001837 /* Ensure that the domain is finalised */
1838 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001839 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001840 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00001841
Patrick Dalyc190d932016-08-30 17:23:28 -07001842 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001843 if (is_dynamic_domain(domain)) {
1844 ret = 0;
1845 goto out_power_off;
1846 }
Patrick Dalyc190d932016-08-30 17:23:28 -07001847
Will Deacon45ae7cf2013-06-24 18:31:25 +01001848 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001849 * Sanity check the domain. We don't support domains across
1850 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001851 */
Will Deacon518f7132014-11-14 17:17:54 +00001852 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001853 dev_err(dev,
1854 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001855 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001856 ret = -EINVAL;
1857 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001858 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001859
1860 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001861 cfg = find_smmu_master_cfg(dev);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001862 if (!cfg) {
1863 ret = -ENODEV;
1864 goto out_power_off;
1865 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001866
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001867 /* Detach the dev from its current domain */
1868 if (dev->archdata.iommu)
Patrick Daly09801312016-08-29 17:02:52 -07001869 arm_smmu_detach_dev(dev->archdata.iommu, dev);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001870
Will Deacon844e35b2014-07-17 11:23:51 +01001871 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1872 if (!ret)
1873 dev->archdata.iommu = domain;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001874
1875out_power_off:
1876 arm_smmu_power_off(smmu);
1877
Will Deacon45ae7cf2013-06-24 18:31:25 +01001878 return ret;
1879}
1880
Will Deacon45ae7cf2013-06-24 18:31:25 +01001881static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001882 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001883{
Will Deacon518f7132014-11-14 17:17:54 +00001884 int ret;
1885 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001886 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001887 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001888
Will Deacon518f7132014-11-14 17:17:54 +00001889 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001890 return -ENODEV;
1891
Will Deacon518f7132014-11-14 17:17:54 +00001892 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1893 ret = ops->map(ops, iova, paddr, size, prot);
1894 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1895 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001896}
1897
1898static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1899 size_t size)
1900{
Will Deacon518f7132014-11-14 17:17:54 +00001901 size_t ret;
1902 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001903 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001904 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001905
Will Deacon518f7132014-11-14 17:17:54 +00001906 if (!ops)
1907 return 0;
1908
Patrick Daly8befb662016-08-17 20:03:28 -07001909 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001910 if (ret)
1911 return ret;
1912
Will Deacon518f7132014-11-14 17:17:54 +00001913 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1914 ret = ops->unmap(ops, iova, size);
1915 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001916
Patrick Daly8befb662016-08-17 20:03:28 -07001917 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Will Deacon518f7132014-11-14 17:17:54 +00001918 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001919}
1920
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001921static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
1922 struct scatterlist *sg, unsigned int nents, int prot)
1923{
1924 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07001925 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001926 unsigned long flags;
1927 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1928 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1929
1930 if (!ops)
1931 return -ENODEV;
1932
Patrick Daly8befb662016-08-17 20:03:28 -07001933 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001934 if (ret)
1935 return ret;
1936
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001937 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07001938 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001939 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07001940
1941 if (!ret)
1942 arm_smmu_unmap(domain, iova, size);
1943
Patrick Daly8befb662016-08-17 20:03:28 -07001944 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001945 return ret;
1946}
1947
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001948static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001949 dma_addr_t iova, bool do_halt)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001950{
Joerg Roedel1d672632015-03-26 13:43:10 +01001951 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001952 struct arm_smmu_device *smmu = smmu_domain->smmu;
1953 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1954 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1955 struct device *dev = smmu->dev;
1956 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001957 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001958 u32 tmp;
1959 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001960 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001961
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001962 spin_lock_irqsave(&smmu->atos_lock, flags);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001963 if (do_halt && arm_smmu_halt(smmu)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001964 phys = 0;
1965 goto out_unlock;
1966 }
1967
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001968 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1969
Robin Murphy661d9622015-05-27 17:09:34 +01001970 /* ATS1 registers can only be written atomically */
1971 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001972 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001973 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1974 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001975 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001976
1977 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1978 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001979 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08001980 dev_err(dev,
1981 "iova to phys timed out on %pad. software table walk result=%pa.\n",
1982 &iova, &phys);
1983 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001984 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001985 }
1986
Robin Murphyf9a05f02016-04-13 18:13:01 +01001987 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001988 if (phys & CB_PAR_F) {
1989 dev_err(dev, "translation fault!\n");
1990 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001991 phys = 0;
1992 } else {
1993 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001994 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001995out_resume:
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001996 if (do_halt)
1997 arm_smmu_resume(smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001998out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001999 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002000 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002001}
2002
Will Deacon45ae7cf2013-06-24 18:31:25 +01002003static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002004 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002005{
Will Deacon518f7132014-11-14 17:17:54 +00002006 phys_addr_t ret;
2007 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002008 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002009 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002010
Will Deacon518f7132014-11-14 17:17:54 +00002011 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002012 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002013
Will Deacon518f7132014-11-14 17:17:54 +00002014 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002015 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002016 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002017
Will Deacon518f7132014-11-14 17:17:54 +00002018 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002019}
2020
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002021/*
2022 * This function can sleep, and cannot be called from atomic context. Will
2023 * power on register block if required. This restriction does not apply to the
2024 * original iova_to_phys() op.
2025 */
2026static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2027 dma_addr_t iova)
2028{
2029 phys_addr_t ret = 0;
2030 unsigned long flags;
2031 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002032 int err;
2033
2034 err = arm_smmu_power_on(smmu_domain->smmu);
2035 if (err)
2036 return 0;
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002037
2038 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2039 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2040 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002041 ret = __arm_smmu_iova_to_phys_hard(domain, iova, true);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002042
2043 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2044
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002045 arm_smmu_power_off(smmu_domain->smmu);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002046 return ret;
2047}
2048
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002049static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
2050 struct iommu_domain *domain, dma_addr_t iova)
2051{
2052 return __arm_smmu_iova_to_phys_hard(domain, iova, false);
2053}
2054
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002055static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002056{
Will Deacond0948942014-06-24 17:30:10 +01002057 switch (cap) {
2058 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002059 /*
2060 * Return true here as the SMMU can always send out coherent
2061 * requests.
2062 */
2063 return true;
Will Deacond0948942014-06-24 17:30:10 +01002064 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002065 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002066 case IOMMU_CAP_NOEXEC:
2067 return true;
Will Deacond0948942014-06-24 17:30:10 +01002068 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002069 return false;
Will Deacond0948942014-06-24 17:30:10 +01002070 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002071}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002072
Will Deacona9a1b0b2014-05-01 18:05:08 +01002073static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
2074{
2075 *((u16 *)data) = alias;
2076 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002077}
2078
Will Deacon8f68f8e2014-07-15 11:27:08 +01002079static void __arm_smmu_release_pci_iommudata(void *data)
2080{
2081 kfree(data);
2082}
2083
Joerg Roedelaf659932015-10-21 23:51:41 +02002084static int arm_smmu_init_pci_device(struct pci_dev *pdev,
2085 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002086{
Will Deacon03edb222015-01-19 14:27:33 +00002087 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02002088 u16 sid;
2089 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002090
Will Deacon03edb222015-01-19 14:27:33 +00002091 cfg = iommu_group_get_iommudata(group);
2092 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01002093 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002094 if (!cfg)
2095 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002096
Will Deacon03edb222015-01-19 14:27:33 +00002097 iommu_group_set_iommudata(group, cfg,
2098 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01002099 }
2100
Joerg Roedelaf659932015-10-21 23:51:41 +02002101 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
2102 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002103
Will Deacon03edb222015-01-19 14:27:33 +00002104 /*
2105 * Assume Stream ID == Requester ID for now.
2106 * We need a way to describe the ID mappings in FDT.
2107 */
2108 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
2109 for (i = 0; i < cfg->num_streamids; ++i)
2110 if (cfg->streamids[i] == sid)
2111 break;
2112
2113 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
2114 if (i == cfg->num_streamids)
2115 cfg->streamids[cfg->num_streamids++] = sid;
2116
2117 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002118}
2119
Joerg Roedelaf659932015-10-21 23:51:41 +02002120static int arm_smmu_init_platform_device(struct device *dev,
2121 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00002122{
Will Deacon03edb222015-01-19 14:27:33 +00002123 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002124 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00002125
2126 if (!smmu)
2127 return -ENODEV;
2128
2129 master = find_smmu_master(smmu, dev->of_node);
2130 if (!master)
2131 return -ENODEV;
2132
Will Deacon03edb222015-01-19 14:27:33 +00002133 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002134
2135 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002136}
2137
2138static int arm_smmu_add_device(struct device *dev)
2139{
Joerg Roedelaf659932015-10-21 23:51:41 +02002140 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00002141
Joerg Roedelaf659932015-10-21 23:51:41 +02002142 group = iommu_group_get_for_dev(dev);
2143 if (IS_ERR(group))
2144 return PTR_ERR(group);
2145
Peng Fan9a4a9d82015-11-20 16:56:18 +08002146 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02002147 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002148}
2149
Will Deacon45ae7cf2013-06-24 18:31:25 +01002150static void arm_smmu_remove_device(struct device *dev)
2151{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002152 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002153}
2154
Joerg Roedelaf659932015-10-21 23:51:41 +02002155static struct iommu_group *arm_smmu_device_group(struct device *dev)
2156{
2157 struct iommu_group *group;
2158 int ret;
2159
2160 if (dev_is_pci(dev))
2161 group = pci_device_group(dev);
2162 else
2163 group = generic_device_group(dev);
2164
2165 if (IS_ERR(group))
2166 return group;
2167
2168 if (dev_is_pci(dev))
2169 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
2170 else
2171 ret = arm_smmu_init_platform_device(dev, group);
2172
2173 if (ret) {
2174 iommu_group_put(group);
2175 group = ERR_PTR(ret);
2176 }
2177
2178 return group;
2179}
2180
Will Deaconc752ce42014-06-25 22:46:31 +01002181static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2182 enum iommu_attr attr, void *data)
2183{
Joerg Roedel1d672632015-03-26 13:43:10 +01002184 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002185 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002186
2187 switch (attr) {
2188 case DOMAIN_ATTR_NESTING:
2189 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2190 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002191 case DOMAIN_ATTR_PT_BASE_ADDR:
2192 *((phys_addr_t *)data) =
2193 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2194 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002195 case DOMAIN_ATTR_CONTEXT_BANK:
2196 /* context bank index isn't valid until we are attached */
2197 if (smmu_domain->smmu == NULL)
2198 return -ENODEV;
2199
2200 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2201 ret = 0;
2202 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002203 case DOMAIN_ATTR_TTBR0: {
2204 u64 val;
2205 struct arm_smmu_device *smmu = smmu_domain->smmu;
2206 /* not valid until we are attached */
2207 if (smmu == NULL)
2208 return -ENODEV;
2209
2210 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2211 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2212 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2213 << (TTBRn_ASID_SHIFT);
2214 *((u64 *)data) = val;
2215 ret = 0;
2216 break;
2217 }
2218 case DOMAIN_ATTR_CONTEXTIDR:
2219 /* not valid until attached */
2220 if (smmu_domain->smmu == NULL)
2221 return -ENODEV;
2222 *((u32 *)data) = smmu_domain->cfg.procid;
2223 ret = 0;
2224 break;
2225 case DOMAIN_ATTR_PROCID:
2226 *((u32 *)data) = smmu_domain->cfg.procid;
2227 ret = 0;
2228 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002229 case DOMAIN_ATTR_DYNAMIC:
2230 *((int *)data) = !!(smmu_domain->attributes
2231 & (1 << DOMAIN_ATTR_DYNAMIC));
2232 ret = 0;
2233 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002234 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2235 *((int *)data) = !!(smmu_domain->attributes
2236 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2237 ret = 0;
2238 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002239 case DOMAIN_ATTR_S1_BYPASS:
2240 *((int *)data) = !!(smmu_domain->attributes
2241 & (1 << DOMAIN_ATTR_S1_BYPASS));
2242 ret = 0;
2243 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002244 default:
2245 return -ENODEV;
2246 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002247 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002248}
2249
2250static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2251 enum iommu_attr attr, void *data)
2252{
Will Deacon518f7132014-11-14 17:17:54 +00002253 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002254 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002255
Will Deacon518f7132014-11-14 17:17:54 +00002256 mutex_lock(&smmu_domain->init_mutex);
2257
Will Deaconc752ce42014-06-25 22:46:31 +01002258 switch (attr) {
2259 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002260 if (smmu_domain->smmu) {
2261 ret = -EPERM;
2262 goto out_unlock;
2263 }
2264
Will Deaconc752ce42014-06-25 22:46:31 +01002265 if (*(int *)data)
2266 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2267 else
2268 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2269
Will Deacon518f7132014-11-14 17:17:54 +00002270 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002271 case DOMAIN_ATTR_PROCID:
2272 if (smmu_domain->smmu != NULL) {
2273 dev_err(smmu_domain->smmu->dev,
2274 "cannot change procid attribute while attached\n");
2275 ret = -EBUSY;
2276 break;
2277 }
2278 smmu_domain->cfg.procid = *((u32 *)data);
2279 ret = 0;
2280 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002281 case DOMAIN_ATTR_DYNAMIC: {
2282 int dynamic = *((int *)data);
2283
2284 if (smmu_domain->smmu != NULL) {
2285 dev_err(smmu_domain->smmu->dev,
2286 "cannot change dynamic attribute while attached\n");
2287 ret = -EBUSY;
2288 break;
2289 }
2290
2291 if (dynamic)
2292 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2293 else
2294 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2295 ret = 0;
2296 break;
2297 }
2298 case DOMAIN_ATTR_CONTEXT_BANK:
2299 /* context bank can't be set while attached */
2300 if (smmu_domain->smmu != NULL) {
2301 ret = -EBUSY;
2302 break;
2303 }
2304 /* ... and it can only be set for dynamic contexts. */
2305 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2306 ret = -EINVAL;
2307 break;
2308 }
2309
2310 /* this will be validated during attach */
2311 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2312 ret = 0;
2313 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002314 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2315 u32 non_fatal_faults = *((int *)data);
2316
2317 if (non_fatal_faults)
2318 smmu_domain->attributes |=
2319 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2320 else
2321 smmu_domain->attributes &=
2322 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2323 ret = 0;
2324 break;
2325 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002326 case DOMAIN_ATTR_S1_BYPASS: {
2327 int bypass = *((int *)data);
2328
2329 /* bypass can't be changed while attached */
2330 if (smmu_domain->smmu != NULL) {
2331 ret = -EBUSY;
2332 break;
2333 }
2334 if (bypass)
2335 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2336 else
2337 smmu_domain->attributes &=
2338 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2339
2340 ret = 0;
2341 break;
2342 }
Patrick Daly8befb662016-08-17 20:03:28 -07002343 case DOMAIN_ATTR_ATOMIC:
2344 {
2345 int atomic_ctx = *((int *)data);
2346
2347 /* can't be changed while attached */
2348 if (smmu_domain->smmu != NULL) {
2349 ret = -EBUSY;
2350 break;
2351 }
2352 if (atomic_ctx)
2353 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2354 else
2355 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2356 break;
2357 }
Will Deaconc752ce42014-06-25 22:46:31 +01002358 default:
Will Deacon518f7132014-11-14 17:17:54 +00002359 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002360 }
Will Deacon518f7132014-11-14 17:17:54 +00002361
2362out_unlock:
2363 mutex_unlock(&smmu_domain->init_mutex);
2364 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002365}
2366
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002367static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2368 unsigned long flags)
2369{
2370 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2371 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2372 struct arm_smmu_device *smmu;
2373 void __iomem *cb_base;
2374
2375 if (!smmu_domain->smmu) {
2376 pr_err("Can't trigger faults on non-attached domains\n");
2377 return;
2378 }
2379
2380 smmu = smmu_domain->smmu;
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002381 if (arm_smmu_power_on(smmu))
2382 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002383
2384 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2385 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2386 flags, cfg->cbndx);
2387 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002388 /* give the interrupt time to fire... */
2389 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002390
2391 arm_smmu_power_off(smmu);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002392}
2393
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002394static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2395 unsigned long offset)
2396{
2397 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2398 struct arm_smmu_device *smmu;
2399 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2400 void __iomem *cb_base;
2401 unsigned long val;
2402
2403 if (offset >= SZ_4K) {
2404 pr_err("Invalid offset: 0x%lx\n", offset);
2405 return 0;
2406 }
2407
2408 smmu = smmu_domain->smmu;
2409 if (!smmu) {
2410 WARN(1, "Can't read registers of a detached domain\n");
2411 val = 0;
2412 return val;
2413 }
2414
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002415 if (arm_smmu_power_on(smmu))
2416 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002417
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002418 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2419 val = readl_relaxed(cb_base + offset);
2420
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002421 arm_smmu_power_off(smmu);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002422 return val;
2423}
2424
2425static void arm_smmu_reg_write(struct iommu_domain *domain,
2426 unsigned long offset, unsigned long val)
2427{
2428 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2429 struct arm_smmu_device *smmu;
2430 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2431 void __iomem *cb_base;
2432
2433 if (offset >= SZ_4K) {
2434 pr_err("Invalid offset: 0x%lx\n", offset);
2435 return;
2436 }
2437
2438 smmu = smmu_domain->smmu;
2439 if (!smmu) {
2440 WARN(1, "Can't read registers of a detached domain\n");
2441 return;
2442 }
2443
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002444 if (arm_smmu_power_on(smmu))
2445 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002446
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002447 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2448 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002449
2450 arm_smmu_power_off(smmu);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002451}
2452
Will Deacon518f7132014-11-14 17:17:54 +00002453static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002454 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002455 .domain_alloc = arm_smmu_domain_alloc,
2456 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002457 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002458 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002459 .map = arm_smmu_map,
2460 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002461 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002462 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002463 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002464 .add_device = arm_smmu_add_device,
2465 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002466 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002467 .domain_get_attr = arm_smmu_domain_get_attr,
2468 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00002469 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002470 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002471 .reg_read = arm_smmu_reg_read,
2472 .reg_write = arm_smmu_reg_write,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002473};
2474
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002475static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002476{
2477 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002478 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002479
2480 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2481 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2482 0, 30000)) {
2483 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2484 return -EBUSY;
2485 }
2486
2487 return 0;
2488}
2489
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002490static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
2491{
2492 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2493 u32 reg;
2494
2495 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2496 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2497 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2498
2499 return wait ? arm_smmu_wait_for_halt(smmu) : 0;
2500}
2501
2502static int arm_smmu_halt(struct arm_smmu_device *smmu)
2503{
2504 return __arm_smmu_halt(smmu, true);
2505}
2506
2507static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
2508{
2509 return __arm_smmu_halt(smmu, false);
2510}
2511
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002512static void arm_smmu_resume(struct arm_smmu_device *smmu)
2513{
2514 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2515 u32 reg;
2516
2517 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2518 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2519 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2520}
2521
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002522static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
2523{
2524 int i;
2525 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
2526
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002527 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002528 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2529 writel_relaxed(regs[i].value,
2530 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002531 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002532}
2533
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002534static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002535{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002536 int i;
2537 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002538 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002539 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002540
Peng Fan3ca37122016-05-03 21:50:30 +08002541 /*
2542 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
2543 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
2544 * bit is only present in MMU-500r2 onwards.
2545 */
2546 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
2547 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
2548 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
2549 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
2550 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
2551 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
2552 }
2553
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002554 /* Make sure all context banks are disabled and clear CB_FSR */
2555 for (i = 0; i < smmu->num_context_banks; ++i) {
2556 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2557 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
2558 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002559 /*
2560 * Disable MMU-500's not-particularly-beneficial next-page
2561 * prefetcher for the sake of errata #841119 and #826419.
2562 */
2563 if (smmu->model == ARM_MMU500) {
2564 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
2565 reg &= ~ARM_MMU500_ACTLR_CPRE;
2566 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2567 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002568
2569 if (smmu->model == QCOM_SMMUV2) {
2570 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2571 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2572 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2573 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2574 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002575 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002576}
2577
2578static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
2579{
2580 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
2581 int i = 0;
2582 u32 reg;
2583
2584 /* clear global FSR */
2585 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2586 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2587
2588 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
2589 /*
2590 * Mark all SMRn as invalid and all S2CRn as bypass unless
2591 * overridden
2592 */
2593 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
2594 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2595 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
2596 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
2597 }
2598
2599 arm_smmu_context_bank_reset(smmu);
2600 }
Will Deacon1463fe42013-07-31 19:21:27 +01002601
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002602 /* Program implementation defined registers */
2603 arm_smmu_impl_def_programming(smmu);
2604
Will Deacon45ae7cf2013-06-24 18:31:25 +01002605 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002606 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
2607 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
2608
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002609 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002610
Will Deacon45ae7cf2013-06-24 18:31:25 +01002611 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002612 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002613
2614 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002615 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002616
Robin Murphy25a1c962016-02-10 14:25:33 +00002617 /* Enable client access, handling unmatched streams as appropriate */
2618 reg &= ~sCR0_CLIENTPD;
2619 if (disable_bypass)
2620 reg |= sCR0_USFCFG;
2621 else
2622 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002623
2624 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002625 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002626
2627 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002628 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002629
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002630 if (smmu->features & ARM_SMMU_FEAT_VMID16)
2631 reg |= sCR0_VMID16EN;
2632
Will Deacon45ae7cf2013-06-24 18:31:25 +01002633 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00002634 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002635 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002636}
2637
2638static int arm_smmu_id_size_to_bits(int size)
2639{
2640 switch (size) {
2641 case 0:
2642 return 32;
2643 case 1:
2644 return 36;
2645 case 2:
2646 return 40;
2647 case 3:
2648 return 42;
2649 case 4:
2650 return 44;
2651 case 5:
2652 default:
2653 return 48;
2654 }
2655}
2656
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002657static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
2658{
2659 struct device *dev = smmu->dev;
2660 int i, ntuples, ret;
2661 u32 *tuples;
2662 struct arm_smmu_impl_def_reg *regs, *regit;
2663
2664 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
2665 return 0;
2666
2667 ntuples /= sizeof(u32);
2668 if (ntuples % 2) {
2669 dev_err(dev,
2670 "Invalid number of attach-impl-defs registers: %d\n",
2671 ntuples);
2672 return -EINVAL;
2673 }
2674
2675 regs = devm_kmalloc(
2676 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
2677 GFP_KERNEL);
2678 if (!regs)
2679 return -ENOMEM;
2680
2681 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
2682 if (!tuples)
2683 return -ENOMEM;
2684
2685 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
2686 tuples, ntuples);
2687 if (ret)
2688 return ret;
2689
2690 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
2691 regit->offset = tuples[i];
2692 regit->value = tuples[i + 1];
2693 }
2694
2695 devm_kfree(dev, tuples);
2696
2697 smmu->impl_def_attach_registers = regs;
2698 smmu->num_impl_def_attach_registers = ntuples / 2;
2699
2700 return 0;
2701}
2702
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002703static int arm_smmu_init_clocks(struct arm_smmu_device *smmu)
2704{
2705 const char *cname;
2706 struct property *prop;
2707 int i;
2708 struct device *dev = smmu->dev;
2709
2710 smmu->num_clocks =
2711 of_property_count_strings(dev->of_node, "clock-names");
2712
2713 if (smmu->num_clocks < 1)
2714 return 0;
2715
2716 smmu->clocks = devm_kzalloc(
2717 dev, sizeof(*smmu->clocks) * smmu->num_clocks,
2718 GFP_KERNEL);
2719
2720 if (!smmu->clocks) {
2721 dev_err(dev,
2722 "Failed to allocate memory for clocks\n");
2723 return -ENODEV;
2724 }
2725
2726 i = 0;
2727 of_property_for_each_string(dev->of_node, "clock-names",
2728 prop, cname) {
2729 struct clk *c = devm_clk_get(dev, cname);
2730
2731 if (IS_ERR(c)) {
2732 dev_err(dev, "Couldn't get clock: %s",
2733 cname);
2734 return -ENODEV;
2735 }
2736
2737 if (clk_get_rate(c) == 0) {
2738 long rate = clk_round_rate(c, 1000);
2739
2740 clk_set_rate(c, rate);
2741 }
2742
2743 smmu->clocks[i] = c;
2744
2745 ++i;
2746 }
2747 return 0;
2748}
2749
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07002750static int arm_smmu_init_regulators(struct arm_smmu_device *smmu)
2751{
2752 struct device *dev = smmu->dev;
2753
2754 if (!of_get_property(dev->of_node, "vdd-supply", NULL))
2755 return 0;
2756
2757 smmu->gdsc = devm_regulator_get(dev, "vdd");
2758 if (IS_ERR(smmu->gdsc))
2759 return PTR_ERR(smmu->gdsc);
2760
2761 return 0;
2762}
2763
Patrick Daly2764f952016-09-06 19:22:44 -07002764static int arm_smmu_init_bus_scaling(struct platform_device *pdev,
2765 struct arm_smmu_device *smmu)
2766{
2767 u32 master_id;
2768
2769 if (of_property_read_u32(pdev->dev.of_node, "qcom,bus-master-id",
2770 &master_id)) {
2771 dev_dbg(smmu->dev, "No bus scaling info\n");
2772 return 0;
2773 }
2774
2775 smmu->bus_client_name = devm_kasprintf(
2776 smmu->dev, GFP_KERNEL, "smmu-bus-client-%s",
2777 dev_name(smmu->dev));
2778
2779 if (!smmu->bus_client_name)
2780 return -ENOMEM;
2781
2782 smmu->bus_client = msm_bus_scale_register(
2783 master_id, MSM_BUS_SLAVE_EBI_CH0, smmu->bus_client_name, true);
2784 if (IS_ERR(&smmu->bus_client)) {
2785 int ret = PTR_ERR(smmu->bus_client);
2786
2787 if (ret != -EPROBE_DEFER)
2788 dev_err(smmu->dev, "Bus client registration failed\n");
2789 return ret;
2790 }
2791
2792 return 0;
2793}
2794
Will Deacon45ae7cf2013-06-24 18:31:25 +01002795static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
2796{
2797 unsigned long size;
2798 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
2799 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01002800 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002801
2802 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01002803 dev_notice(smmu->dev, "SMMUv%d with:\n",
2804 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002805
2806 /* ID0 */
2807 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01002808
2809 /* Restrict available stages based on module parameter */
2810 if (force_stage == 1)
2811 id &= ~(ID0_S2TS | ID0_NTS);
2812 else if (force_stage == 2)
2813 id &= ~(ID0_S1TS | ID0_NTS);
2814
Will Deacon45ae7cf2013-06-24 18:31:25 +01002815 if (id & ID0_S1TS) {
2816 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2817 dev_notice(smmu->dev, "\tstage 1 translation\n");
2818 }
2819
2820 if (id & ID0_S2TS) {
2821 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2822 dev_notice(smmu->dev, "\tstage 2 translation\n");
2823 }
2824
2825 if (id & ID0_NTS) {
2826 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
2827 dev_notice(smmu->dev, "\tnested translation\n");
2828 }
2829
2830 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01002831 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002832 dev_err(smmu->dev, "\tno translation support!\n");
2833 return -ENODEV;
2834 }
2835
Robin Murphyb7862e32016-04-13 18:13:03 +01002836 if ((id & ID0_S1TS) &&
2837 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002838 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
2839 dev_notice(smmu->dev, "\taddress translation ops\n");
2840 }
2841
Robin Murphybae2c2d2015-07-29 19:46:05 +01002842 /*
2843 * In order for DMA API calls to work properly, we must defer to what
2844 * the DT says about coherency, regardless of what the hardware claims.
2845 * Fortunately, this also opens up a workaround for systems where the
2846 * ID register value has ended up configured incorrectly.
2847 */
2848 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
2849 cttw_reg = !!(id & ID0_CTTW);
2850 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002851 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01002852 if (cttw_dt || cttw_reg)
2853 dev_notice(smmu->dev, "\t%scoherent table walk\n",
2854 cttw_dt ? "" : "non-");
2855 if (cttw_dt != cttw_reg)
2856 dev_notice(smmu->dev,
2857 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002858
2859 if (id & ID0_SMS) {
2860 u32 smr, sid, mask;
2861
2862 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
2863 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
2864 ID0_NUMSMRG_MASK;
2865 if (smmu->num_mapping_groups == 0) {
2866 dev_err(smmu->dev,
2867 "stream-matching supported, but no SMRs present!\n");
2868 return -ENODEV;
2869 }
2870
Dhaval Patel031d7462015-05-09 14:47:29 -07002871 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
2872 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
2873 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
2874 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
2875 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Will Deacon45ae7cf2013-06-24 18:31:25 +01002876
Dhaval Patel031d7462015-05-09 14:47:29 -07002877 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
2878 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
2879 if ((mask & sid) != sid) {
2880 dev_err(smmu->dev,
2881 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
2882 mask, sid);
2883 return -ENODEV;
2884 }
2885
2886 dev_notice(smmu->dev,
2887 "\tstream matching with %u register groups, mask 0x%x",
2888 smmu->num_mapping_groups, mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002889 }
Olav Haugan3c8766d2014-08-22 17:12:32 -07002890 } else {
2891 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
2892 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002893 }
2894
Robin Murphy7602b872016-04-28 17:12:09 +01002895 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
2896 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
2897 if (!(id & ID0_PTFS_NO_AARCH32S))
2898 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
2899 }
2900
Will Deacon45ae7cf2013-06-24 18:31:25 +01002901 /* ID1 */
2902 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01002903 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002904
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002905 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00002906 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01002907 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002908 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07002909 dev_warn(smmu->dev,
2910 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
2911 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002912
Will Deacon518f7132014-11-14 17:17:54 +00002913 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002914 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
2915 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
2916 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
2917 return -ENODEV;
2918 }
2919 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
2920 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01002921 /*
2922 * Cavium CN88xx erratum #27704.
2923 * Ensure ASID and VMID allocation is unique across all SMMUs in
2924 * the system.
2925 */
2926 if (smmu->model == CAVIUM_SMMUV2) {
2927 smmu->cavium_id_base =
2928 atomic_add_return(smmu->num_context_banks,
2929 &cavium_smmu_context_count);
2930 smmu->cavium_id_base -= smmu->num_context_banks;
2931 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002932
2933 /* ID2 */
2934 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
2935 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002936 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002937
Will Deacon518f7132014-11-14 17:17:54 +00002938 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002939 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002940 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002941
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002942 if (id & ID2_VMID16)
2943 smmu->features |= ARM_SMMU_FEAT_VMID16;
2944
Robin Murphyf1d84542015-03-04 16:41:05 +00002945 /*
2946 * What the page table walker can address actually depends on which
2947 * descriptor format is in use, but since a) we don't know that yet,
2948 * and b) it can vary per context bank, this will have to do...
2949 */
2950 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2951 dev_warn(smmu->dev,
2952 "failed to set DMA mask for table walker\n");
2953
Robin Murphyb7862e32016-04-13 18:13:03 +01002954 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00002955 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01002956 if (smmu->version == ARM_SMMU_V1_64K)
2957 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002958 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002959 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00002960 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002961 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002962 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002963 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002964 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002965 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002966 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002967 }
2968
Robin Murphy7602b872016-04-28 17:12:09 +01002969 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002970 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002971 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002972 if (smmu->features &
2973 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002974 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002975 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002976 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002977 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002978 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002979
Robin Murphyd5466352016-05-09 17:20:09 +01002980 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2981 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2982 else
2983 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2984 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2985 smmu->pgsize_bitmap);
2986
Will Deacon518f7132014-11-14 17:17:54 +00002987
Will Deacon28d60072014-09-01 16:24:48 +01002988 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2989 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002990 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002991
2992 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2993 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002994 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002995
Will Deacon45ae7cf2013-06-24 18:31:25 +01002996 return 0;
2997}
2998
Robin Murphy67b65a32016-04-13 18:12:57 +01002999struct arm_smmu_match_data {
3000 enum arm_smmu_arch_version version;
3001 enum arm_smmu_implementation model;
3002};
3003
3004#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
3005static struct arm_smmu_match_data name = { .version = ver, .model = imp }
3006
3007ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
3008ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01003009ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003010ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01003011ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003012ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01003013
Joerg Roedel09b52692014-10-02 12:24:45 +02003014static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003015 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3016 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3017 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003018 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003019 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003020 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003021 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01003022 { },
3023};
3024MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3025
Will Deacon45ae7cf2013-06-24 18:31:25 +01003026static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3027{
Robin Murphy09360402014-08-28 17:51:59 +01003028 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01003029 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003030 struct resource *res;
3031 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003032 struct device *dev = &pdev->dev;
3033 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003034 struct of_phandle_iterator it;
3035 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003036 int num_irqs, i, err;
3037
3038 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3039 if (!smmu) {
3040 dev_err(dev, "failed to allocate arm_smmu_device\n");
3041 return -ENOMEM;
3042 }
3043 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003044 spin_lock_init(&smmu->atos_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003045 mutex_init(&smmu->power_lock);
Patrick Daly8befb662016-08-17 20:03:28 -07003046 spin_lock_init(&smmu->clock_refs_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003047 idr_init(&smmu->asid_idr);
3048 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003049
Robin Murphy09360402014-08-28 17:51:59 +01003050 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01003051 data = of_id->data;
3052 smmu->version = data->version;
3053 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01003054
Will Deacon45ae7cf2013-06-24 18:31:25 +01003055 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003056 smmu->base = devm_ioremap_resource(dev, res);
3057 if (IS_ERR(smmu->base))
3058 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003059 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003060
3061 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3062 &smmu->num_global_irqs)) {
3063 dev_err(dev, "missing #global-interrupts property\n");
3064 return -ENODEV;
3065 }
3066
3067 num_irqs = 0;
3068 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3069 num_irqs++;
3070 if (num_irqs > smmu->num_global_irqs)
3071 smmu->num_context_irqs++;
3072 }
3073
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003074 if (!smmu->num_context_irqs) {
3075 dev_err(dev, "found %d interrupts but expected at least %d\n",
3076 num_irqs, smmu->num_global_irqs + 1);
3077 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003078 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003079
3080 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3081 GFP_KERNEL);
3082 if (!smmu->irqs) {
3083 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3084 return -ENOMEM;
3085 }
3086
3087 for (i = 0; i < num_irqs; ++i) {
3088 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003089
Will Deacon45ae7cf2013-06-24 18:31:25 +01003090 if (irq < 0) {
3091 dev_err(dev, "failed to get irq index %d\n", i);
3092 return -ENODEV;
3093 }
3094 smmu->irqs[i] = irq;
3095 }
3096
Dhaval Patel031d7462015-05-09 14:47:29 -07003097 parse_driver_options(smmu);
3098
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003099 err = arm_smmu_init_clocks(smmu);
Olav Haugan3c8766d2014-08-22 17:12:32 -07003100 if (err)
3101 return err;
3102
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003103 err = arm_smmu_init_regulators(smmu);
3104 if (err)
3105 return err;
3106
Patrick Daly2764f952016-09-06 19:22:44 -07003107 err = arm_smmu_init_bus_scaling(pdev, smmu);
3108 if (err)
3109 return err;
3110
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003111 err = arm_smmu_power_on(smmu);
3112 if (err)
3113 return err;
3114
3115 err = arm_smmu_device_cfg_probe(smmu);
3116 if (err)
3117 goto out_power_off;
3118
Will Deacon45ae7cf2013-06-24 18:31:25 +01003119 i = 0;
3120 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003121
3122 err = -ENOMEM;
3123 /* No need to zero the memory for masterspec */
3124 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
3125 if (!masterspec)
3126 goto out_put_masters;
3127
3128 of_for_each_phandle(&it, err, dev->of_node,
3129 "mmu-masters", "#stream-id-cells", 0) {
3130 int count = of_phandle_iterator_args(&it, masterspec->args,
3131 MAX_MASTER_STREAMIDS);
3132 masterspec->np = of_node_get(it.node);
3133 masterspec->args_count = count;
3134
3135 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003136 if (err) {
3137 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003138 masterspec->np->name);
3139 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003140 goto out_put_masters;
3141 }
3142
3143 i++;
3144 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003145
Will Deacon45ae7cf2013-06-24 18:31:25 +01003146 dev_notice(dev, "registered %d master devices\n", i);
3147
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003148 kfree(masterspec);
3149
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003150 err = arm_smmu_parse_impl_def_registers(smmu);
3151 if (err)
3152 goto out_put_masters;
3153
Robin Murphyb7862e32016-04-13 18:13:03 +01003154 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003155 smmu->num_context_banks != smmu->num_context_irqs) {
3156 dev_err(dev,
3157 "found only %d context interrupt(s) but %d required\n",
3158 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00003159 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01003160 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003161 }
3162
Will Deacon45ae7cf2013-06-24 18:31:25 +01003163 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003164 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3165 NULL, arm_smmu_global_fault,
3166 IRQF_ONESHOT | IRQF_SHARED,
3167 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003168 if (err) {
3169 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3170 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08003171 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003172 }
3173 }
3174
3175 INIT_LIST_HEAD(&smmu->list);
3176 spin_lock(&arm_smmu_devices_lock);
3177 list_add(&smmu->list, &arm_smmu_devices);
3178 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01003179
3180 arm_smmu_device_reset(smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003181 arm_smmu_power_off(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003182 return 0;
3183
Will Deacon45ae7cf2013-06-24 18:31:25 +01003184out_put_masters:
3185 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003186 struct arm_smmu_master *master
3187 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003188 of_node_put(master->of_node);
3189 }
3190
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003191out_power_off:
3192 arm_smmu_power_off(smmu);
3193
Will Deacon45ae7cf2013-06-24 18:31:25 +01003194 return err;
3195}
3196
3197static int arm_smmu_device_remove(struct platform_device *pdev)
3198{
3199 int i;
3200 struct device *dev = &pdev->dev;
3201 struct arm_smmu_device *curr, *smmu = NULL;
3202 struct rb_node *node;
3203
3204 spin_lock(&arm_smmu_devices_lock);
3205 list_for_each_entry(curr, &arm_smmu_devices, list) {
3206 if (curr->dev == dev) {
3207 smmu = curr;
3208 list_del(&smmu->list);
3209 break;
3210 }
3211 }
3212 spin_unlock(&arm_smmu_devices_lock);
3213
3214 if (!smmu)
3215 return -ENODEV;
3216
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003217 if (arm_smmu_power_on(smmu))
3218 return -EINVAL;
3219
Will Deacon45ae7cf2013-06-24 18:31:25 +01003220 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003221 struct arm_smmu_master *master
3222 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003223 of_node_put(master->of_node);
3224 }
3225
Will Deaconecfadb62013-07-31 19:21:28 +01003226 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003227 dev_err(dev, "removing device with active domains!\n");
3228
3229 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08003230 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003231
Patrick Dalyc190d932016-08-30 17:23:28 -07003232 idr_destroy(&smmu->asid_idr);
3233
Will Deacon45ae7cf2013-06-24 18:31:25 +01003234 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003235 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003236 arm_smmu_power_off(smmu);
3237
Patrick Daly2764f952016-09-06 19:22:44 -07003238 msm_bus_scale_unregister(smmu->bus_client);
3239
Will Deacon45ae7cf2013-06-24 18:31:25 +01003240 return 0;
3241}
3242
Will Deacon45ae7cf2013-06-24 18:31:25 +01003243static struct platform_driver arm_smmu_driver = {
3244 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003245 .name = "arm-smmu",
3246 .of_match_table = of_match_ptr(arm_smmu_of_match),
3247 },
3248 .probe = arm_smmu_device_dt_probe,
3249 .remove = arm_smmu_device_remove,
3250};
3251
3252static int __init arm_smmu_init(void)
3253{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003254 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003255 int ret;
3256
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003257 /*
3258 * Play nice with systems that don't have an ARM SMMU by checking that
3259 * an ARM SMMU exists in the system before proceeding with the driver
3260 * and IOMMU bus operation registration.
3261 */
3262 np = of_find_matching_node(NULL, arm_smmu_of_match);
3263 if (!np)
3264 return 0;
3265
3266 of_node_put(np);
3267
Will Deacon45ae7cf2013-06-24 18:31:25 +01003268 ret = platform_driver_register(&arm_smmu_driver);
3269 if (ret)
3270 return ret;
3271
3272 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01003273 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003274 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3275
Will Deacond123cf82014-02-04 22:17:53 +00003276#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01003277 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003278 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00003279#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003280
Will Deacona9a1b0b2014-05-01 18:05:08 +01003281#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08003282 if (!iommu_present(&pci_bus_type)) {
3283 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01003284 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08003285 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01003286#endif
3287
Will Deacon45ae7cf2013-06-24 18:31:25 +01003288 return 0;
3289}
3290
3291static void __exit arm_smmu_exit(void)
3292{
3293 return platform_driver_unregister(&arm_smmu_driver);
3294}
3295
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003296subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003297module_exit(arm_smmu_exit);
3298
3299MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
3300MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
3301MODULE_LICENSE("GPL v2");