blob: d6d4cb0abe1cc51cb458ff35b3d94d7fa2b14148 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
Patrick Daly2764f952016-09-06 19:22:44 -070047#include <linux/msm-bus.h>
48#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010049
50#include <linux/amba/bus.h>
51
Will Deacon518f7132014-11-14 17:17:54 +000052#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010053
54/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020055#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010056
57/* Maximum number of context banks per SMMU */
58#define ARM_SMMU_MAX_CBS 128
59
60/* Maximum number of mapping groups per SMMU */
61#define ARM_SMMU_MAX_SMRS 128
62
Will Deacon45ae7cf2013-06-24 18:31:25 +010063/* SMMU global address space */
64#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010065#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010066
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000067/*
68 * SMMU global address space with conditional offset to access secure
69 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
70 * nsGFSYNR0: 0x450)
71 */
72#define ARM_SMMU_GR0_NS(smmu) \
73 ((smmu)->base + \
74 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
75 ? 0x400 : 0))
76
Robin Murphyf9a05f02016-04-13 18:13:01 +010077/*
78 * Some 64-bit registers only make sense to write atomically, but in such
79 * cases all the data relevant to AArch32 formats lies within the lower word,
80 * therefore this actually makes more sense than it might first appear.
81 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010085#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010086#endif
87
Will Deacon45ae7cf2013-06-24 18:31:25 +010088/* Configuration registers */
89#define ARM_SMMU_GR0_sCR0 0x0
90#define sCR0_CLIENTPD (1 << 0)
91#define sCR0_GFRE (1 << 1)
92#define sCR0_GFIE (1 << 2)
93#define sCR0_GCFGFRE (1 << 4)
94#define sCR0_GCFGFIE (1 << 5)
95#define sCR0_USFCFG (1 << 10)
96#define sCR0_VMIDPNE (1 << 11)
97#define sCR0_PTM (1 << 12)
98#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080099#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100100#define sCR0_BSU_SHIFT 14
101#define sCR0_BSU_MASK 0x3
102
Peng Fan3ca37122016-05-03 21:50:30 +0800103/* Auxiliary Configuration register */
104#define ARM_SMMU_GR0_sACR 0x10
105
Will Deacon45ae7cf2013-06-24 18:31:25 +0100106/* Identification registers */
107#define ARM_SMMU_GR0_ID0 0x20
108#define ARM_SMMU_GR0_ID1 0x24
109#define ARM_SMMU_GR0_ID2 0x28
110#define ARM_SMMU_GR0_ID3 0x2c
111#define ARM_SMMU_GR0_ID4 0x30
112#define ARM_SMMU_GR0_ID5 0x34
113#define ARM_SMMU_GR0_ID6 0x38
114#define ARM_SMMU_GR0_ID7 0x3c
115#define ARM_SMMU_GR0_sGFSR 0x48
116#define ARM_SMMU_GR0_sGFSYNR0 0x50
117#define ARM_SMMU_GR0_sGFSYNR1 0x54
118#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100119
120#define ID0_S1TS (1 << 30)
121#define ID0_S2TS (1 << 29)
122#define ID0_NTS (1 << 28)
123#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000124#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100125#define ID0_PTFS_NO_AARCH32 (1 << 25)
126#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100127#define ID0_CTTW (1 << 14)
128#define ID0_NUMIRPT_SHIFT 16
129#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700130#define ID0_NUMSIDB_SHIFT 9
131#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100132#define ID0_NUMSMRG_SHIFT 0
133#define ID0_NUMSMRG_MASK 0xff
134
135#define ID1_PAGESIZE (1 << 31)
136#define ID1_NUMPAGENDXB_SHIFT 28
137#define ID1_NUMPAGENDXB_MASK 7
138#define ID1_NUMS2CB_SHIFT 16
139#define ID1_NUMS2CB_MASK 0xff
140#define ID1_NUMCB_SHIFT 0
141#define ID1_NUMCB_MASK 0xff
142
143#define ID2_OAS_SHIFT 4
144#define ID2_OAS_MASK 0xf
145#define ID2_IAS_SHIFT 0
146#define ID2_IAS_MASK 0xf
147#define ID2_UBS_SHIFT 8
148#define ID2_UBS_MASK 0xf
149#define ID2_PTFS_4K (1 << 12)
150#define ID2_PTFS_16K (1 << 13)
151#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800152#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153
Peng Fan3ca37122016-05-03 21:50:30 +0800154#define ID7_MAJOR_SHIFT 4
155#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158#define ARM_SMMU_GR0_TLBIVMID 0x64
159#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
160#define ARM_SMMU_GR0_TLBIALLH 0x6c
161#define ARM_SMMU_GR0_sTLBGSYNC 0x70
162#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
163#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800164#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165
166/* Stream mapping registers */
167#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
168#define SMR_VALID (1 << 31)
169#define SMR_MASK_SHIFT 16
170#define SMR_MASK_MASK 0x7fff
171#define SMR_ID_SHIFT 0
172#define SMR_ID_MASK 0x7fff
173
174#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
175#define S2CR_CBNDX_SHIFT 0
176#define S2CR_CBNDX_MASK 0xff
177#define S2CR_TYPE_SHIFT 16
178#define S2CR_TYPE_MASK 0x3
179#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
180#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
181#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
182
183/* Context bank attribute registers */
184#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
185#define CBAR_VMID_SHIFT 0
186#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000187#define CBAR_S1_BPSHCFG_SHIFT 8
188#define CBAR_S1_BPSHCFG_MASK 3
189#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100190#define CBAR_S1_MEMATTR_SHIFT 12
191#define CBAR_S1_MEMATTR_MASK 0xf
192#define CBAR_S1_MEMATTR_WB 0xf
193#define CBAR_TYPE_SHIFT 16
194#define CBAR_TYPE_MASK 0x3
195#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
196#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
199#define CBAR_IRPTNDX_SHIFT 24
200#define CBAR_IRPTNDX_MASK 0xff
201
Shalaj Jain04059c52015-03-03 13:34:59 -0800202#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
203#define CBFRSYNRA_SID_MASK (0xffff)
204
Will Deacon45ae7cf2013-06-24 18:31:25 +0100205#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
206#define CBA2R_RW64_32BIT (0 << 0)
207#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800208#define CBA2R_VMID_SHIFT 16
209#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100210
211/* Translation context bank */
212#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100213#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100214
215#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100216#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100217#define ARM_SMMU_CB_RESUME 0x8
218#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100219#define ARM_SMMU_CB_TTBR0 0x20
220#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100221#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600222#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000224#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100225#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700227#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100228#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000230#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100231#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_TLBIVAL 0x620
233#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
234#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700235#define ARM_SMMU_CB_TLBSYNC 0x7f0
236#define ARM_SMMU_CB_TLBSTATUS 0x7f4
237#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100238#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000239#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100240
241#define SCTLR_S1_ASIDPNE (1 << 12)
242#define SCTLR_CFCFG (1 << 7)
243#define SCTLR_CFIE (1 << 6)
244#define SCTLR_CFRE (1 << 5)
245#define SCTLR_E (1 << 4)
246#define SCTLR_AFE (1 << 2)
247#define SCTLR_TRE (1 << 1)
248#define SCTLR_M (1 << 0)
249#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
250
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100251#define ARM_MMU500_ACTLR_CPRE (1 << 1)
252
Peng Fan3ca37122016-05-03 21:50:30 +0800253#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
254
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700255/* Definitions for implementation-defined registers */
256#define ACTLR_QCOM_OSH_SHIFT 28
257#define ACTLR_QCOM_OSH 1
258
259#define ACTLR_QCOM_ISH_SHIFT 29
260#define ACTLR_QCOM_ISH 1
261
262#define ACTLR_QCOM_NSH_SHIFT 30
263#define ACTLR_QCOM_NSH 1
264
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700265#define ARM_SMMU_IMPL_DEF0(smmu) \
266 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
267#define ARM_SMMU_IMPL_DEF1(smmu) \
268 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
269#define IMPL_DEF1_MICRO_MMU_CTRL 0
270#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
271#define MICRO_MMU_CTRL_IDLE (1 << 3)
272
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000273#define CB_PAR_F (1 << 0)
274
275#define ATSR_ACTIVE (1 << 0)
276
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277#define RESUME_RETRY (0 << 0)
278#define RESUME_TERMINATE (1 << 0)
279
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100281#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100282
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100283#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100284
285#define FSR_MULTI (1 << 31)
286#define FSR_SS (1 << 30)
287#define FSR_UUT (1 << 8)
288#define FSR_ASF (1 << 7)
289#define FSR_TLBLKF (1 << 6)
290#define FSR_TLBMCF (1 << 5)
291#define FSR_EF (1 << 4)
292#define FSR_PF (1 << 3)
293#define FSR_AFF (1 << 2)
294#define FSR_TF (1 << 1)
295
Mitchel Humpherys29073202014-07-08 09:52:18 -0700296#define FSR_IGN (FSR_AFF | FSR_ASF | \
297 FSR_TLBMCF | FSR_TLBLKF)
298#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100299 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100300
301#define FSYNR0_WNR (1 << 4)
302
Will Deacon4cf740b2014-07-14 19:47:39 +0100303static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000304module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100305MODULE_PARM_DESC(force_stage,
306 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000307static bool disable_bypass;
308module_param(disable_bypass, bool, S_IRUGO);
309MODULE_PARM_DESC(disable_bypass,
310 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100311
Robin Murphy09360402014-08-28 17:51:59 +0100312enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100313 ARM_SMMU_V1,
314 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100315 ARM_SMMU_V2,
316};
317
Robin Murphy67b65a32016-04-13 18:12:57 +0100318enum arm_smmu_implementation {
319 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100320 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100321 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700322 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100323};
324
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700325struct arm_smmu_impl_def_reg {
326 u32 offset;
327 u32 value;
328};
329
Will Deacon45ae7cf2013-06-24 18:31:25 +0100330struct arm_smmu_smr {
331 u8 idx;
332 u16 mask;
333 u16 id;
334};
335
Will Deacona9a1b0b2014-05-01 18:05:08 +0100336struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100337 int num_streamids;
338 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100339 struct arm_smmu_smr *smrs;
340};
341
Will Deacona9a1b0b2014-05-01 18:05:08 +0100342struct arm_smmu_master {
343 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100344 struct rb_node node;
345 struct arm_smmu_master_cfg cfg;
346};
347
Will Deacon45ae7cf2013-06-24 18:31:25 +0100348struct arm_smmu_device {
349 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100350
351 void __iomem *base;
352 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100353 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100354
355#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
356#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
357#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
358#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
359#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000360#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800361#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100362#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
363#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
364#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
365#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
366#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100367 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000368
369#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800370#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800371#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700372#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000373 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100374 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100375 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100376
377 u32 num_context_banks;
378 u32 num_s2_context_banks;
379 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
380 atomic_t irptndx;
381
382 u32 num_mapping_groups;
383 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
384
Will Deacon518f7132014-11-14 17:17:54 +0000385 unsigned long va_size;
386 unsigned long ipa_size;
387 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100388 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100389
390 u32 num_global_irqs;
391 u32 num_context_irqs;
392 unsigned int *irqs;
393
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394 struct list_head list;
395 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800396
397 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700398 /* Specific to QCOM */
399 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
400 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800401
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700402 int num_clocks;
403 struct clk **clocks;
404
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700405 struct regulator *gdsc;
406
Patrick Daly2764f952016-09-06 19:22:44 -0700407 struct msm_bus_client_handle *bus_client;
408 char *bus_client_name;
409
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700410 /* Protects power_count */
411 struct mutex power_lock;
412 int power_count;
Patrick Daly8befb662016-08-17 20:03:28 -0700413 /* Protects clock_refs_count */
414 spinlock_t clock_refs_lock;
415 int clock_refs_count;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700416
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800417 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700418
419 /* protects idr */
420 struct mutex idr_mutex;
421 struct idr asid_idr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100422};
423
Robin Murphy7602b872016-04-28 17:12:09 +0100424enum arm_smmu_context_fmt {
425 ARM_SMMU_CTX_FMT_NONE,
426 ARM_SMMU_CTX_FMT_AARCH64,
427 ARM_SMMU_CTX_FMT_AARCH32_L,
428 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100429};
430
431struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100432 u8 cbndx;
433 u8 irptndx;
434 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600435 u32 procid;
436 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100437 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100438};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100439#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600440#define INVALID_CBNDX 0xff
441#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700442/*
443 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
444 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
445 */
446#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100447
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600448#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800449#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100450
Will Deaconc752ce42014-06-25 22:46:31 +0100451enum arm_smmu_domain_stage {
452 ARM_SMMU_DOMAIN_S1 = 0,
453 ARM_SMMU_DOMAIN_S2,
454 ARM_SMMU_DOMAIN_NESTED,
455};
456
Will Deacon45ae7cf2013-06-24 18:31:25 +0100457struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100458 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000459 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700460 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000461 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100462 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100463 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000464 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700465 u32 attributes;
Joerg Roedel1d672632015-03-26 13:43:10 +0100466 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100467};
468
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200469struct arm_smmu_phandle_args {
470 struct device_node *np;
471 int args_count;
472 uint32_t args[MAX_MASTER_STREAMIDS];
473};
474
Will Deacon45ae7cf2013-06-24 18:31:25 +0100475static DEFINE_SPINLOCK(arm_smmu_devices_lock);
476static LIST_HEAD(arm_smmu_devices);
477
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000478struct arm_smmu_option_prop {
479 u32 opt;
480 const char *prop;
481};
482
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800483static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
484
Mitchel Humpherys29073202014-07-08 09:52:18 -0700485static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000486 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800487 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800488 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700489 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000490 { 0, NULL},
491};
492
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800493static int arm_smmu_halt(struct arm_smmu_device *smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700494static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
495static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800496static void arm_smmu_resume(struct arm_smmu_device *smmu);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800497static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
498 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700499static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
500 dma_addr_t iova);
501static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
502 struct iommu_domain *domain, dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600503static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800504
Joerg Roedel1d672632015-03-26 13:43:10 +0100505static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
506{
507 return container_of(dom, struct arm_smmu_domain, domain);
508}
509
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000510static void parse_driver_options(struct arm_smmu_device *smmu)
511{
512 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700513
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000514 do {
515 if (of_property_read_bool(smmu->dev->of_node,
516 arm_smmu_options[i].prop)) {
517 smmu->options |= arm_smmu_options[i].opt;
518 dev_notice(smmu->dev, "option %s\n",
519 arm_smmu_options[i].prop);
520 }
521 } while (arm_smmu_options[++i].opt);
522}
523
Patrick Dalyc190d932016-08-30 17:23:28 -0700524static bool is_dynamic_domain(struct iommu_domain *domain)
525{
526 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
527
528 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
529}
530
Will Deacon8f68f8e2014-07-15 11:27:08 +0100531static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100532{
533 if (dev_is_pci(dev)) {
534 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700535
Will Deacona9a1b0b2014-05-01 18:05:08 +0100536 while (!pci_is_root_bus(bus))
537 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100538 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100539 }
540
Will Deacon8f68f8e2014-07-15 11:27:08 +0100541 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100542}
543
Will Deacon45ae7cf2013-06-24 18:31:25 +0100544static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
545 struct device_node *dev_node)
546{
547 struct rb_node *node = smmu->masters.rb_node;
548
549 while (node) {
550 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700551
Will Deacon45ae7cf2013-06-24 18:31:25 +0100552 master = container_of(node, struct arm_smmu_master, node);
553
554 if (dev_node < master->of_node)
555 node = node->rb_left;
556 else if (dev_node > master->of_node)
557 node = node->rb_right;
558 else
559 return master;
560 }
561
562 return NULL;
563}
564
Will Deacona9a1b0b2014-05-01 18:05:08 +0100565static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100566find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100567{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100568 struct arm_smmu_master_cfg *cfg = NULL;
569 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100570
Will Deacon8f68f8e2014-07-15 11:27:08 +0100571 if (group) {
572 cfg = iommu_group_get_iommudata(group);
573 iommu_group_put(group);
574 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100575
Will Deacon8f68f8e2014-07-15 11:27:08 +0100576 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100577}
578
Will Deacon45ae7cf2013-06-24 18:31:25 +0100579static int insert_smmu_master(struct arm_smmu_device *smmu,
580 struct arm_smmu_master *master)
581{
582 struct rb_node **new, *parent;
583
584 new = &smmu->masters.rb_node;
585 parent = NULL;
586 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700587 struct arm_smmu_master *this
588 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100589
590 parent = *new;
591 if (master->of_node < this->of_node)
592 new = &((*new)->rb_left);
593 else if (master->of_node > this->of_node)
594 new = &((*new)->rb_right);
595 else
596 return -EEXIST;
597 }
598
599 rb_link_node(&master->node, parent, new);
600 rb_insert_color(&master->node, &smmu->masters);
601 return 0;
602}
603
604static int register_smmu_master(struct arm_smmu_device *smmu,
605 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200606 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100607{
608 int i;
609 struct arm_smmu_master *master;
610
611 master = find_smmu_master(smmu, masterspec->np);
612 if (master) {
613 dev_err(dev,
614 "rejecting multiple registrations for master device %s\n",
615 masterspec->np->name);
616 return -EBUSY;
617 }
618
619 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
620 dev_err(dev,
621 "reached maximum number (%d) of stream IDs for master device %s\n",
622 MAX_MASTER_STREAMIDS, masterspec->np->name);
623 return -ENOSPC;
624 }
625
626 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
627 if (!master)
628 return -ENOMEM;
629
Will Deacona9a1b0b2014-05-01 18:05:08 +0100630 master->of_node = masterspec->np;
631 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100632
Olav Haugan3c8766d2014-08-22 17:12:32 -0700633 for (i = 0; i < master->cfg.num_streamids; ++i) {
634 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100635
Olav Haugan3c8766d2014-08-22 17:12:32 -0700636 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
637 (streamid >= smmu->num_mapping_groups)) {
638 dev_err(dev,
639 "stream ID for master device %s greater than maximum allowed (%d)\n",
640 masterspec->np->name, smmu->num_mapping_groups);
641 return -ERANGE;
642 }
643 master->cfg.streamids[i] = streamid;
644 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100645 return insert_smmu_master(smmu, master);
646}
647
Will Deacon44680ee2014-06-25 11:29:12 +0100648static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100649{
Will Deacon44680ee2014-06-25 11:29:12 +0100650 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100651 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100652 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100653
654 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100655 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100656 master = find_smmu_master(smmu, dev_node);
657 if (master)
658 break;
659 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100660 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100661
Will Deacona9a1b0b2014-05-01 18:05:08 +0100662 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100663}
664
665static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
666{
667 int idx;
668
669 do {
670 idx = find_next_zero_bit(map, end, start);
671 if (idx == end)
672 return -ENOSPC;
673 } while (test_and_set_bit(idx, map));
674
675 return idx;
676}
677
678static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
679{
680 clear_bit(idx, map);
681}
682
Patrick Daly8befb662016-08-17 20:03:28 -0700683static int arm_smmu_prepare_clocks(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700684{
685 int i, ret = 0;
686
687 for (i = 0; i < smmu->num_clocks; ++i) {
Patrick Daly8befb662016-08-17 20:03:28 -0700688 ret = clk_prepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700689 if (ret) {
Patrick Daly8befb662016-08-17 20:03:28 -0700690 dev_err(smmu->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700691 while (i--)
Patrick Daly8befb662016-08-17 20:03:28 -0700692 clk_unprepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700693 break;
694 }
695 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700696 return ret;
697}
698
Patrick Daly8befb662016-08-17 20:03:28 -0700699static void arm_smmu_unprepare_clocks(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700700{
701 int i;
702
703 for (i = 0; i < smmu->num_clocks; ++i)
Patrick Daly8befb662016-08-17 20:03:28 -0700704 clk_unprepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700705}
706
Patrick Daly8befb662016-08-17 20:03:28 -0700707/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
708static int arm_smmu_enable_clocks_atomic(struct arm_smmu_device *smmu)
709{
710 int i, ret = 0;
711 unsigned long flags;
712
713 spin_lock_irqsave(&smmu->clock_refs_lock, flags);
714 if (smmu->clock_refs_count > 0) {
715 smmu->clock_refs_count++;
716 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
717 return 0;
718 }
719
720 for (i = 0; i < smmu->num_clocks; ++i) {
721 ret = clk_enable(smmu->clocks[i]);
722 if (ret) {
723 dev_err(smmu->dev, "Couldn't enable clock #%d\n", i);
724 while (i--)
725 clk_disable(smmu->clocks[i]);
726 break;
727 }
728 }
729
730 if (!ret)
731 smmu->clock_refs_count++;
732
733 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
734 return ret;
735}
736
737/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
738static void arm_smmu_disable_clocks_atomic(struct arm_smmu_device *smmu)
739{
740 int i;
741 unsigned long flags;
742
743 spin_lock_irqsave(&smmu->clock_refs_lock, flags);
744 WARN_ON(smmu->clock_refs_count == 0);
745 if (smmu->clock_refs_count > 1) {
746 smmu->clock_refs_count--;
747 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
748 return;
749 }
750
751 for (i = 0; i < smmu->num_clocks; ++i)
752 clk_disable(smmu->clocks[i]);
753
754 smmu->clock_refs_count--;
755 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
756}
757
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700758static int arm_smmu_enable_regulators(struct arm_smmu_device *smmu)
759{
760 if (!smmu->gdsc)
761 return 0;
762
763 return regulator_enable(smmu->gdsc);
764}
765
766static int arm_smmu_disable_regulators(struct arm_smmu_device *smmu)
767{
768 if (!smmu->gdsc)
769 return 0;
770
771 return regulator_disable(smmu->gdsc);
772}
773
Patrick Daly2764f952016-09-06 19:22:44 -0700774static int arm_smmu_request_bus(struct arm_smmu_device *smmu)
775{
776 if (!smmu->bus_client)
777 return 0;
778 return msm_bus_scale_update_bw(smmu->bus_client, 0, 1000);
779}
780
781static int arm_smmu_unrequest_bus(struct arm_smmu_device *smmu)
782{
783 if (!smmu->bus_client)
784 return 0;
785 return msm_bus_scale_update_bw(smmu->bus_client, 0, 0);
786}
787
788
Patrick Daly8befb662016-08-17 20:03:28 -0700789static int arm_smmu_power_on_slow(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700790{
791 int ret;
792
793 mutex_lock(&smmu->power_lock);
794 if (smmu->power_count > 0) {
795 smmu->power_count += 1;
796 mutex_unlock(&smmu->power_lock);
797 return 0;
798 }
799
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700800 ret = arm_smmu_enable_regulators(smmu);
801 if (ret)
802 goto out_unlock;
803
Patrick Daly2764f952016-09-06 19:22:44 -0700804 ret = arm_smmu_request_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700805 if (ret)
806 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700807
Patrick Daly2764f952016-09-06 19:22:44 -0700808 ret = arm_smmu_prepare_clocks(smmu);
809 if (ret)
810 goto out_disable_bus;
811
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700812 smmu->power_count += 1;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700813 mutex_unlock(&smmu->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700814 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700815
Patrick Daly2764f952016-09-06 19:22:44 -0700816out_disable_bus:
817 arm_smmu_unrequest_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700818out_disable_regulators:
819 arm_smmu_disable_regulators(smmu);
820out_unlock:
821 mutex_unlock(&smmu->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700822 return ret;
823}
824
Patrick Daly8befb662016-08-17 20:03:28 -0700825static void arm_smmu_power_off_slow(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700826{
827 mutex_lock(&smmu->power_lock);
828 smmu->power_count--;
829 WARN_ON(smmu->power_count < 0);
830
831 if (smmu->power_count > 0) {
832 mutex_unlock(&smmu->power_lock);
833 return;
834 }
835
Patrick Daly8befb662016-08-17 20:03:28 -0700836 arm_smmu_unprepare_clocks(smmu);
Patrick Daly2764f952016-09-06 19:22:44 -0700837 arm_smmu_unrequest_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700838 arm_smmu_disable_regulators(smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700839
840 mutex_unlock(&smmu->power_lock);
841}
842
Patrick Daly8befb662016-08-17 20:03:28 -0700843static int arm_smmu_power_on(struct arm_smmu_device *smmu)
844{
845 int ret;
846
847 ret = arm_smmu_power_on_slow(smmu);
848 if (ret)
849 return ret;
850
851 ret = arm_smmu_enable_clocks_atomic(smmu);
852 if (ret)
853 goto out_disable;
854
855 return 0;
856
857out_disable:
858 arm_smmu_power_off_slow(smmu);
859 return ret;
860}
861
862static void arm_smmu_power_off(struct arm_smmu_device *smmu)
863{
864 arm_smmu_disable_clocks_atomic(smmu);
865 arm_smmu_power_off_slow(smmu);
866}
867
868/*
869 * Must be used instead of arm_smmu_power_on if it may be called from
870 * atomic context
871 */
872static int arm_smmu_domain_power_on(struct iommu_domain *domain,
873 struct arm_smmu_device *smmu)
874{
875 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
876 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
877
878 if (atomic_domain)
879 return arm_smmu_enable_clocks_atomic(smmu);
880
881 return arm_smmu_power_on(smmu);
882}
883
884/*
885 * Must be used instead of arm_smmu_power_on if it may be called from
886 * atomic context
887 */
888static void arm_smmu_domain_power_off(struct iommu_domain *domain,
889 struct arm_smmu_device *smmu)
890{
891 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
892 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
893
894 if (atomic_domain) {
895 arm_smmu_disable_clocks_atomic(smmu);
896 return;
897 }
898
899 arm_smmu_power_off(smmu);
900}
901
Will Deacon45ae7cf2013-06-24 18:31:25 +0100902/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700903static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
904 int cbndx)
905{
906 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
907 u32 val;
908
909 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
910 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
911 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700912 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700913 dev_err(smmu->dev, "TLBSYNC timeout!\n");
914}
915
Will Deacon518f7132014-11-14 17:17:54 +0000916static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100917{
918 int count = 0;
919 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
920
921 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
922 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
923 & sTLBGSTATUS_GSACTIVE) {
924 cpu_relax();
925 if (++count == TLB_LOOP_TIMEOUT) {
926 dev_err_ratelimited(smmu->dev,
927 "TLB sync timed out -- SMMU may be deadlocked\n");
928 return;
929 }
930 udelay(1);
931 }
932}
933
Will Deacon518f7132014-11-14 17:17:54 +0000934static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100935{
Will Deacon518f7132014-11-14 17:17:54 +0000936 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700937 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000938}
939
Patrick Daly8befb662016-08-17 20:03:28 -0700940/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000941static void arm_smmu_tlb_inv_context(void *cookie)
942{
943 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100944 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
945 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100946 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000947 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100948
949 if (stage1) {
950 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800951 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100952 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700953 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100954 } else {
955 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800956 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100957 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700958 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100959 }
Will Deacon1463fe42013-07-31 19:21:27 +0100960}
961
Will Deacon518f7132014-11-14 17:17:54 +0000962static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000963 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000964{
965 struct arm_smmu_domain *smmu_domain = cookie;
966 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
967 struct arm_smmu_device *smmu = smmu_domain->smmu;
968 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
969 void __iomem *reg;
970
971 if (stage1) {
972 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
973 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
974
Robin Murphy7602b872016-04-28 17:12:09 +0100975 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000976 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800977 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000978 do {
979 writel_relaxed(iova, reg);
980 iova += granule;
981 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000982 } else {
983 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800984 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000985 do {
986 writeq_relaxed(iova, reg);
987 iova += granule >> 12;
988 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000989 }
Will Deacon518f7132014-11-14 17:17:54 +0000990 } else if (smmu->version == ARM_SMMU_V2) {
991 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
992 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
993 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000994 iova >>= 12;
995 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100996 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000997 iova += granule >> 12;
998 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000999 } else {
1000 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001001 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001002 }
1003}
1004
Will Deacon518f7132014-11-14 17:17:54 +00001005static struct iommu_gather_ops arm_smmu_gather_ops = {
1006 .tlb_flush_all = arm_smmu_tlb_inv_context,
1007 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1008 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +00001009};
1010
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001011static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1012 dma_addr_t iova, u32 fsr)
1013{
1014 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1015 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1016 struct arm_smmu_device *smmu;
1017 void __iomem *cb_base;
1018 u64 sctlr, sctlr_orig;
1019 phys_addr_t phys;
1020
1021 smmu = smmu_domain->smmu;
1022 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1023
1024 arm_smmu_halt_nowait(smmu);
1025
1026 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
1027
1028 arm_smmu_wait_for_halt(smmu);
1029
1030 /* clear FSR to allow ATOS to log any faults */
1031 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
1032
1033 /* disable stall mode momentarily */
1034 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
1035 sctlr = sctlr_orig & ~SCTLR_CFCFG;
1036 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
1037
1038 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
1039
1040 if (!phys) {
1041 dev_err(smmu->dev,
1042 "ATOS failed. Will issue a TLBIALL and try again...\n");
1043 arm_smmu_tlb_inv_context(smmu_domain);
1044 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
1045 if (phys)
1046 dev_err(smmu->dev,
1047 "ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
1048 else
1049 dev_err(smmu->dev,
1050 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1051 }
1052
1053 /* restore SCTLR */
1054 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
1055
1056 arm_smmu_resume(smmu);
1057
1058 return phys;
1059}
1060
Will Deacon45ae7cf2013-06-24 18:31:25 +01001061static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1062{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001063 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001064 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001065 unsigned long iova;
1066 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001067 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001068 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1069 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001070 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001071 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001072 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001073 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001074 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001075 bool non_fatal_fault = !!(smmu_domain->attributes &
1076 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001077
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001078 static DEFINE_RATELIMIT_STATE(_rs,
1079 DEFAULT_RATELIMIT_INTERVAL,
1080 DEFAULT_RATELIMIT_BURST);
1081
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001082 arm_smmu_power_on(smmu);
1083
Shalaj Jain04059c52015-03-03 13:34:59 -08001084 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001085 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001086 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1087
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001088 if (!(fsr & FSR_FAULT)) {
1089 ret = IRQ_NONE;
1090 goto out_power_off;
1091 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001092
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001093 if (fatal_asf && (fsr & FSR_ASF)) {
1094 dev_err(smmu->dev,
1095 "Took an address size fault. Refusing to recover.\n");
1096 BUG();
1097 }
1098
Will Deacon45ae7cf2013-06-24 18:31:25 +01001099 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001100 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001101 if (fsr & FSR_TF)
1102 flags |= IOMMU_FAULT_TRANSLATION;
1103 if (fsr & FSR_PF)
1104 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001105 if (fsr & FSR_EF)
1106 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001107 if (fsr & FSR_SS)
1108 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001109
Robin Murphyf9a05f02016-04-13 18:13:01 +01001110 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001111 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001112 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1113 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001114 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1115 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001116 dev_dbg(smmu->dev,
1117 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1118 iova, fsr, fsynr, cfg->cbndx);
1119 dev_dbg(smmu->dev,
1120 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001121 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001122 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001123 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001124 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1125 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001126 if (__ratelimit(&_rs)) {
1127 dev_err(smmu->dev,
1128 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1129 iova, fsr, fsynr, cfg->cbndx);
1130 dev_err(smmu->dev, "FAR = %016lx\n",
1131 (unsigned long)iova);
1132 dev_err(smmu->dev,
1133 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1134 fsr,
1135 (fsr & 0x02) ? "TF " : "",
1136 (fsr & 0x04) ? "AFF " : "",
1137 (fsr & 0x08) ? "PF " : "",
1138 (fsr & 0x10) ? "EF " : "",
1139 (fsr & 0x20) ? "TLBMCF " : "",
1140 (fsr & 0x40) ? "TLBLKF " : "",
1141 (fsr & 0x80) ? "MHF " : "",
1142 (fsr & 0x40000000) ? "SS " : "",
1143 (fsr & 0x80000000) ? "MULTI " : "");
1144 dev_err(smmu->dev,
1145 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001146 if (!phys_soft)
1147 dev_err(smmu->dev,
1148 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1149 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001150 dev_err(smmu->dev,
1151 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1152 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1153 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001154 ret = IRQ_NONE;
1155 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001156 if (!non_fatal_fault) {
1157 dev_err(smmu->dev,
1158 "Unhandled arm-smmu context fault!\n");
1159 BUG();
1160 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001161 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001162
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001163 /*
1164 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1165 * if stalled. This is required to keep the IOMMU client stalled on
1166 * the outstanding fault. This gives the client a chance to take any
1167 * debug action and then terminate the stalled transaction.
1168 * So, the sequence in case of stall on fault should be:
1169 * 1) Do not clear FSR or write to RESUME here
1170 * 2) Client takes any debug action
1171 * 3) Client terminates the stalled transaction and resumes the IOMMU
1172 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1173 * not before so that the fault remains outstanding. This ensures
1174 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1175 * need to be terminated.
1176 */
1177 if (tmp != -EBUSY) {
1178 /* Clear the faulting FSR */
1179 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001180
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001181 /*
1182 * Barrier required to ensure that the FSR is cleared
1183 * before resuming SMMU operation
1184 */
1185 wmb();
1186
1187 /* Retry or terminate any stalled transactions */
1188 if (fsr & FSR_SS)
1189 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1190 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001191
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001192out_power_off:
1193 arm_smmu_power_off(smmu);
1194
Patrick Daly5ba28112016-08-30 19:18:52 -07001195 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001196}
1197
1198static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1199{
1200 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1201 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001202 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001203
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001204 arm_smmu_power_on(smmu);
1205
Will Deacon45ae7cf2013-06-24 18:31:25 +01001206 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1207 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1208 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1209 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1210
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001211 if (!gfsr) {
1212 arm_smmu_power_off(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001213 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001214 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001215
Will Deacon45ae7cf2013-06-24 18:31:25 +01001216 dev_err_ratelimited(smmu->dev,
1217 "Unexpected global fault, this could be serious\n");
1218 dev_err_ratelimited(smmu->dev,
1219 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1220 gfsr, gfsynr0, gfsynr1, gfsynr2);
1221
1222 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001223 arm_smmu_power_off(smmu);
Will Deaconadaba322013-07-31 19:21:26 +01001224 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001225}
1226
Will Deacon518f7132014-11-14 17:17:54 +00001227static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1228 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001229{
1230 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001231 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001232 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001233 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1234 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001235 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001236
Will Deacon45ae7cf2013-06-24 18:31:25 +01001237 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001238 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1239 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001240
Will Deacon4a1c93c2015-03-04 12:21:03 +00001241 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001242 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1243 reg = CBA2R_RW64_64BIT;
1244 else
1245 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001246 /* 16-bit VMIDs live in CBA2R */
1247 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001248 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001249
Will Deacon4a1c93c2015-03-04 12:21:03 +00001250 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1251 }
1252
Will Deacon45ae7cf2013-06-24 18:31:25 +01001253 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001254 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001255 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001256 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001257
Will Deacon57ca90f2014-02-06 14:59:05 +00001258 /*
1259 * Use the weakest shareability/memory types, so they are
1260 * overridden by the ttbcr/pte.
1261 */
1262 if (stage1) {
1263 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1264 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001265 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1266 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001267 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001268 }
Will Deacon44680ee2014-06-25 11:29:12 +01001269 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001270
Will Deacon518f7132014-11-14 17:17:54 +00001271 /* TTBRs */
1272 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001273 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001275 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001276 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001277
1278 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001279 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001280 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001281 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001282 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001283 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001284 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001285
Will Deacon518f7132014-11-14 17:17:54 +00001286 /* TTBCR */
1287 if (stage1) {
1288 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1289 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
1290 if (smmu->version > ARM_SMMU_V1) {
1291 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +01001292 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +00001293 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001294 }
1295 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001296 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1297 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001298 }
1299
Will Deacon518f7132014-11-14 17:17:54 +00001300 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +00001302 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001303 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +00001304 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1305 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001306 }
1307
Will Deacon45ae7cf2013-06-24 18:31:25 +01001308 /* SCTLR */
Patrick Dalye62d3362016-03-15 18:58:28 -07001309 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_EAE_SBOP;
1310
1311 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1312 !stage1)
1313 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001314 if (stage1)
1315 reg |= SCTLR_S1_ASIDPNE;
1316#ifdef __BIG_ENDIAN
1317 reg |= SCTLR_E;
1318#endif
Will Deacon25724842013-08-21 13:49:53 +01001319 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001320}
1321
Patrick Dalyc190d932016-08-30 17:23:28 -07001322static int arm_smmu_init_asid(struct iommu_domain *domain,
1323 struct arm_smmu_device *smmu)
1324{
1325 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1326 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1327 bool dynamic = is_dynamic_domain(domain);
1328 int ret;
1329
1330 if (!dynamic) {
1331 cfg->asid = cfg->cbndx + 1;
1332 } else {
1333 mutex_lock(&smmu->idr_mutex);
1334 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1335 smmu->num_context_banks + 2,
1336 MAX_ASID + 1, GFP_KERNEL);
1337
1338 mutex_unlock(&smmu->idr_mutex);
1339 if (ret < 0) {
1340 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1341 ret);
1342 return ret;
1343 }
1344 cfg->asid = ret;
1345 }
1346 return 0;
1347}
1348
1349static void arm_smmu_free_asid(struct iommu_domain *domain)
1350{
1351 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1352 struct arm_smmu_device *smmu = smmu_domain->smmu;
1353 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1354 bool dynamic = is_dynamic_domain(domain);
1355
1356 if (cfg->asid == INVALID_ASID || !dynamic)
1357 return;
1358
1359 mutex_lock(&smmu->idr_mutex);
1360 idr_remove(&smmu->asid_idr, cfg->asid);
1361 mutex_unlock(&smmu->idr_mutex);
1362}
1363
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001365 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001366{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001367 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001368 unsigned long ias, oas;
1369 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001370 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001371 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001372 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Patrick Dalyc190d932016-08-30 17:23:28 -07001373 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001374
Will Deacon518f7132014-11-14 17:17:54 +00001375 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001376 if (smmu_domain->smmu)
1377 goto out_unlock;
1378
Patrick Dalyc190d932016-08-30 17:23:28 -07001379 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1380 smmu_domain->cfg.asid = INVALID_ASID;
1381
Robin Murphy98006992016-04-20 14:53:33 +01001382 /* We're bypassing these SIDs, so don't allocate an actual context */
1383 if (domain->type == IOMMU_DOMAIN_DMA) {
1384 smmu_domain->smmu = smmu;
1385 goto out_unlock;
1386 }
1387
Patrick Dalyc190d932016-08-30 17:23:28 -07001388 dynamic = is_dynamic_domain(domain);
1389 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1390 dev_err(smmu->dev, "dynamic domains not supported\n");
1391 ret = -EPERM;
1392 goto out_unlock;
1393 }
1394
Will Deaconc752ce42014-06-25 22:46:31 +01001395 /*
1396 * Mapping the requested stage onto what we support is surprisingly
1397 * complicated, mainly because the spec allows S1+S2 SMMUs without
1398 * support for nested translation. That means we end up with the
1399 * following table:
1400 *
1401 * Requested Supported Actual
1402 * S1 N S1
1403 * S1 S1+S2 S1
1404 * S1 S2 S2
1405 * S1 S1 S1
1406 * N N N
1407 * N S1+S2 S2
1408 * N S2 S2
1409 * N S1 S1
1410 *
1411 * Note that you can't actually request stage-2 mappings.
1412 */
1413 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1414 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1415 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1416 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1417
Robin Murphy7602b872016-04-28 17:12:09 +01001418 /*
1419 * Choosing a suitable context format is even more fiddly. Until we
1420 * grow some way for the caller to express a preference, and/or move
1421 * the decision into the io-pgtable code where it arguably belongs,
1422 * just aim for the closest thing to the rest of the system, and hope
1423 * that the hardware isn't esoteric enough that we can't assume AArch64
1424 * support to be a superset of AArch32 support...
1425 */
1426 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1427 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1428 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1429 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1430 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1431 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1432 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1433
1434 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1435 ret = -EINVAL;
1436 goto out_unlock;
1437 }
1438
Will Deaconc752ce42014-06-25 22:46:31 +01001439 switch (smmu_domain->stage) {
1440 case ARM_SMMU_DOMAIN_S1:
1441 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1442 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001443 ias = smmu->va_size;
1444 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001445 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001446 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001447 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001448 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001449 ias = min(ias, 32UL);
1450 oas = min(oas, 40UL);
1451 }
Will Deaconc752ce42014-06-25 22:46:31 +01001452 break;
1453 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001454 /*
1455 * We will likely want to change this if/when KVM gets
1456 * involved.
1457 */
Will Deaconc752ce42014-06-25 22:46:31 +01001458 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001459 cfg->cbar = CBAR_TYPE_S2_TRANS;
1460 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001461 ias = smmu->ipa_size;
1462 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001463 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001464 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001465 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001466 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001467 ias = min(ias, 40UL);
1468 oas = min(oas, 40UL);
1469 }
Will Deaconc752ce42014-06-25 22:46:31 +01001470 break;
1471 default:
1472 ret = -EINVAL;
1473 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001474 }
1475
Patrick Dalyc190d932016-08-30 17:23:28 -07001476 /* Dynamic domains must set cbndx through domain attribute */
1477 if (!dynamic) {
1478 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001479 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001480 if (ret < 0)
1481 goto out_unlock;
1482 cfg->cbndx = ret;
1483 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001484 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001485 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1486 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001487 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001488 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001489 }
1490
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001491 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001492 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001493 .ias = ias,
1494 .oas = oas,
1495 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001496 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001497 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001498
Will Deacon518f7132014-11-14 17:17:54 +00001499 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001500 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1501 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001502 if (!pgtbl_ops) {
1503 ret = -ENOMEM;
1504 goto out_clear_smmu;
1505 }
1506
Robin Murphyd5466352016-05-09 17:20:09 +01001507 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001508 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001509
Patrick Dalyc190d932016-08-30 17:23:28 -07001510 /* Assign an asid */
1511 ret = arm_smmu_init_asid(domain, smmu);
1512 if (ret)
1513 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001514
Patrick Dalyc190d932016-08-30 17:23:28 -07001515 if (!dynamic) {
1516 /* Initialise the context bank with our page table cfg */
1517 arm_smmu_init_context_bank(smmu_domain,
1518 &smmu_domain->pgtbl_cfg);
1519
1520 /*
1521 * Request context fault interrupt. Do this last to avoid the
1522 * handler seeing a half-initialised domain state.
1523 */
1524 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1525 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001526 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1527 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001528 if (ret < 0) {
1529 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1530 cfg->irptndx, irq);
1531 cfg->irptndx = INVALID_IRPTNDX;
1532 goto out_clear_smmu;
1533 }
1534 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001535 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001536 }
Will Deacon518f7132014-11-14 17:17:54 +00001537 mutex_unlock(&smmu_domain->init_mutex);
1538
1539 /* Publish page table ops for map/unmap */
1540 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001541 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001542
Will Deacon518f7132014-11-14 17:17:54 +00001543out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001544 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001545 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001546out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001547 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001548 return ret;
1549}
1550
1551static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1552{
Joerg Roedel1d672632015-03-26 13:43:10 +01001553 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001554 struct arm_smmu_device *smmu = smmu_domain->smmu;
1555 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001556 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001557 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001558 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001559 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001560
Robin Murphy98006992016-04-20 14:53:33 +01001561 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001562 return;
1563
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001564 ret = arm_smmu_power_on(smmu);
1565 if (ret) {
1566 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1567 smmu);
1568 return;
1569 }
1570
Patrick Dalyc190d932016-08-30 17:23:28 -07001571 dynamic = is_dynamic_domain(domain);
1572 if (dynamic) {
1573 arm_smmu_free_asid(domain);
1574 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001575 arm_smmu_power_off(smmu);
Patrick Dalyc190d932016-08-30 17:23:28 -07001576 return;
1577 }
1578
Will Deacon518f7132014-11-14 17:17:54 +00001579 /*
1580 * Disable the context bank and free the page tables before freeing
1581 * it.
1582 */
Will Deacon44680ee2014-06-25 11:29:12 +01001583 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001584 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001585
Will Deacon44680ee2014-06-25 11:29:12 +01001586 if (cfg->irptndx != INVALID_IRPTNDX) {
1587 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001588 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001589 }
1590
Markus Elfring44830b02015-11-06 18:32:41 +01001591 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001592 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001593
1594 arm_smmu_power_off(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001595}
1596
Joerg Roedel1d672632015-03-26 13:43:10 +01001597static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001598{
1599 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001600
Patrick Daly09801312016-08-29 17:02:52 -07001601 /* Do not support DOMAIN_DMA for now */
1602 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001603 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001604 /*
1605 * Allocate the domain and initialise some of its data structures.
1606 * We can't really do anything meaningful until we've added a
1607 * master.
1608 */
1609 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1610 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001611 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001612
Robin Murphy9adb9592016-01-26 18:06:36 +00001613 if (type == IOMMU_DOMAIN_DMA &&
1614 iommu_get_dma_cookie(&smmu_domain->domain)) {
1615 kfree(smmu_domain);
1616 return NULL;
1617 }
1618
Will Deacon518f7132014-11-14 17:17:54 +00001619 mutex_init(&smmu_domain->init_mutex);
1620 spin_lock_init(&smmu_domain->pgtbl_lock);
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06001621 smmu_domain->cfg.cbndx = INVALID_CBNDX;
Joerg Roedel1d672632015-03-26 13:43:10 +01001622
1623 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001624}
1625
Joerg Roedel1d672632015-03-26 13:43:10 +01001626static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001627{
Joerg Roedel1d672632015-03-26 13:43:10 +01001628 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001629
1630 /*
1631 * Free the domain resources. We assume that all devices have
1632 * already been detached.
1633 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001634 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001635 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001636 kfree(smmu_domain);
1637}
1638
1639static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001640 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001641{
1642 int i;
1643 struct arm_smmu_smr *smrs;
1644 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1645
1646 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1647 return 0;
1648
Will Deacona9a1b0b2014-05-01 18:05:08 +01001649 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001650 return -EEXIST;
1651
Mitchel Humpherys29073202014-07-08 09:52:18 -07001652 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001653 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001654 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1655 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001656 return -ENOMEM;
1657 }
1658
Will Deacon44680ee2014-06-25 11:29:12 +01001659 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001660 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001661 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1662 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001663 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001664 dev_err(smmu->dev, "failed to allocate free SMR\n");
1665 goto err_free_smrs;
1666 }
1667
1668 smrs[i] = (struct arm_smmu_smr) {
1669 .idx = idx,
1670 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001671 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001672 };
1673 }
1674
1675 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001676 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001677 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1678 smrs[i].mask << SMR_MASK_SHIFT;
1679 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1680 }
1681
Will Deacona9a1b0b2014-05-01 18:05:08 +01001682 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001683 return 0;
1684
1685err_free_smrs:
1686 while (--i >= 0)
1687 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1688 kfree(smrs);
1689 return -ENOSPC;
1690}
1691
1692static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001693 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001694{
1695 int i;
1696 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001697 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001698
Will Deacon43b412b2014-07-15 11:22:24 +01001699 if (!smrs)
1700 return;
1701
Will Deacon45ae7cf2013-06-24 18:31:25 +01001702 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001703 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001704 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001705
Will Deacon45ae7cf2013-06-24 18:31:25 +01001706 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1707 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1708 }
1709
Will Deacona9a1b0b2014-05-01 18:05:08 +01001710 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001711 kfree(smrs);
1712}
1713
Will Deacon45ae7cf2013-06-24 18:31:25 +01001714static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001715 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001716{
1717 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001718 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1720
Will Deacon5f634952016-04-20 14:53:32 +01001721 /*
1722 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1723 * for all devices behind the SMMU. Note that we need to take
1724 * care configuring SMRs for devices both a platform_device and
1725 * and a PCI device (i.e. a PCI host controller)
1726 */
1727 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1728 return 0;
1729
Will Deacon8f68f8e2014-07-15 11:27:08 +01001730 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001731 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001732 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001733 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001734
Will Deacona9a1b0b2014-05-01 18:05:08 +01001735 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001736 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001737
Will Deacona9a1b0b2014-05-01 18:05:08 +01001738 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Patrick Dalyf4930442016-06-27 20:50:14 -07001739 s2cr = S2CR_TYPE_TRANS |
Will Deacon44680ee2014-06-25 11:29:12 +01001740 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001741 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1742 }
1743
1744 return 0;
1745}
1746
1747static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001748 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001749{
Will Deacon43b412b2014-07-15 11:22:24 +01001750 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001751 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001752 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001753
Will Deacon8f68f8e2014-07-15 11:27:08 +01001754 /* An IOMMU group is torn down by the first device to be removed */
1755 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1756 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001757
1758 /*
1759 * We *must* clear the S2CR first, because freeing the SMR means
1760 * that it can be re-allocated immediately.
1761 */
Will Deacon43b412b2014-07-15 11:22:24 +01001762 for (i = 0; i < cfg->num_streamids; ++i) {
1763 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001764 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001765
Robin Murphy25a1c962016-02-10 14:25:33 +00001766 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001767 }
1768
Will Deacona9a1b0b2014-05-01 18:05:08 +01001769 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001770}
1771
Patrick Daly09801312016-08-29 17:02:52 -07001772static void arm_smmu_detach_dev(struct iommu_domain *domain,
1773 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001774{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001775 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001776 struct arm_smmu_device *smmu = smmu_domain->smmu;
1777 struct arm_smmu_master_cfg *cfg;
1778 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07001779 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07001780
1781 if (dynamic)
1782 return;
1783
1784 cfg = find_smmu_master_cfg(dev);
1785 if (!cfg)
1786 return;
1787
1788 if (!smmu) {
1789 dev_err(dev, "Domain not attached; cannot detach!\n");
1790 return;
1791 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001792
1793 dev->archdata.iommu = NULL;
1794 arm_smmu_domain_remove_master(smmu_domain, cfg);
Patrick Daly8befb662016-08-17 20:03:28 -07001795
1796 /* Remove additional vote for atomic power */
1797 if (atomic_domain) {
1798 WARN_ON(arm_smmu_enable_clocks_atomic(smmu));
1799 arm_smmu_power_off(smmu);
1800 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001801}
1802
Will Deacon45ae7cf2013-06-24 18:31:25 +01001803static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1804{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001805 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001806 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001807 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001808 struct arm_smmu_master_cfg *cfg;
Patrick Daly8befb662016-08-17 20:03:28 -07001809 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001810
Will Deacon8f68f8e2014-07-15 11:27:08 +01001811 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001812 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001813 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1814 return -ENXIO;
1815 }
1816
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001817 /* Enable Clocks and Power */
1818 ret = arm_smmu_power_on(smmu);
1819 if (ret)
1820 return ret;
1821
Patrick Daly8befb662016-08-17 20:03:28 -07001822 /*
1823 * Keep an additional vote for non-atomic power until domain is
1824 * detached
1825 */
1826 if (atomic_domain) {
1827 ret = arm_smmu_power_on(smmu);
1828 if (ret)
1829 goto out_power_off;
1830
1831 arm_smmu_disable_clocks_atomic(smmu);
1832 }
1833
Will Deacon518f7132014-11-14 17:17:54 +00001834 /* Ensure that the domain is finalised */
1835 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001836 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001837 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00001838
Patrick Dalyc190d932016-08-30 17:23:28 -07001839 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001840 if (is_dynamic_domain(domain)) {
1841 ret = 0;
1842 goto out_power_off;
1843 }
Patrick Dalyc190d932016-08-30 17:23:28 -07001844
Will Deacon45ae7cf2013-06-24 18:31:25 +01001845 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001846 * Sanity check the domain. We don't support domains across
1847 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001848 */
Will Deacon518f7132014-11-14 17:17:54 +00001849 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001850 dev_err(dev,
1851 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001852 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001853 ret = -EINVAL;
1854 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001855 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001856
1857 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001858 cfg = find_smmu_master_cfg(dev);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001859 if (!cfg) {
1860 ret = -ENODEV;
1861 goto out_power_off;
1862 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001863
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001864 /* Detach the dev from its current domain */
1865 if (dev->archdata.iommu)
Patrick Daly09801312016-08-29 17:02:52 -07001866 arm_smmu_detach_dev(dev->archdata.iommu, dev);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001867
Will Deacon844e35b2014-07-17 11:23:51 +01001868 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1869 if (!ret)
1870 dev->archdata.iommu = domain;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001871
1872out_power_off:
1873 arm_smmu_power_off(smmu);
1874
Will Deacon45ae7cf2013-06-24 18:31:25 +01001875 return ret;
1876}
1877
Will Deacon45ae7cf2013-06-24 18:31:25 +01001878static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001879 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001880{
Will Deacon518f7132014-11-14 17:17:54 +00001881 int ret;
1882 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001883 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001884 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001885
Will Deacon518f7132014-11-14 17:17:54 +00001886 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001887 return -ENODEV;
1888
Will Deacon518f7132014-11-14 17:17:54 +00001889 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1890 ret = ops->map(ops, iova, paddr, size, prot);
1891 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1892 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001893}
1894
1895static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1896 size_t size)
1897{
Will Deacon518f7132014-11-14 17:17:54 +00001898 size_t ret;
1899 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001900 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001901 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001902
Will Deacon518f7132014-11-14 17:17:54 +00001903 if (!ops)
1904 return 0;
1905
Patrick Daly8befb662016-08-17 20:03:28 -07001906 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001907 if (ret)
1908 return ret;
1909
Will Deacon518f7132014-11-14 17:17:54 +00001910 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1911 ret = ops->unmap(ops, iova, size);
1912 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001913
Patrick Daly8befb662016-08-17 20:03:28 -07001914 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Will Deacon518f7132014-11-14 17:17:54 +00001915 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001916}
1917
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001918static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
1919 struct scatterlist *sg, unsigned int nents, int prot)
1920{
1921 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07001922 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001923 unsigned long flags;
1924 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1925 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1926
1927 if (!ops)
1928 return -ENODEV;
1929
Patrick Daly8befb662016-08-17 20:03:28 -07001930 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001931 if (ret)
1932 return ret;
1933
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001934 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07001935 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001936 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07001937
1938 if (!ret)
1939 arm_smmu_unmap(domain, iova, size);
1940
Patrick Daly8befb662016-08-17 20:03:28 -07001941 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07001942 return ret;
1943}
1944
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07001945static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001946 dma_addr_t iova, bool do_halt)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001947{
Joerg Roedel1d672632015-03-26 13:43:10 +01001948 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001949 struct arm_smmu_device *smmu = smmu_domain->smmu;
1950 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1951 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1952 struct device *dev = smmu->dev;
1953 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001954 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001955 u32 tmp;
1956 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001957 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001958
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001959 spin_lock_irqsave(&smmu->atos_lock, flags);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001960 if (do_halt && arm_smmu_halt(smmu)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001961 phys = 0;
1962 goto out_unlock;
1963 }
1964
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001965 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1966
Robin Murphy661d9622015-05-27 17:09:34 +01001967 /* ATS1 registers can only be written atomically */
1968 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001969 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001970 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1971 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001972 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001973
1974 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1975 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001976 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08001977 dev_err(dev,
1978 "iova to phys timed out on %pad. software table walk result=%pa.\n",
1979 &iova, &phys);
1980 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001981 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001982 }
1983
Robin Murphyf9a05f02016-04-13 18:13:01 +01001984 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001985 if (phys & CB_PAR_F) {
1986 dev_err(dev, "translation fault!\n");
1987 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001988 phys = 0;
1989 } else {
1990 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001991 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001992out_resume:
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001993 if (do_halt)
1994 arm_smmu_resume(smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001995out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08001996 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08001997 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001998}
1999
Will Deacon45ae7cf2013-06-24 18:31:25 +01002000static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002001 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002002{
Will Deacon518f7132014-11-14 17:17:54 +00002003 phys_addr_t ret;
2004 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002005 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002006 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002007
Will Deacon518f7132014-11-14 17:17:54 +00002008 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002009 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002010
Will Deacon518f7132014-11-14 17:17:54 +00002011 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002012 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002013 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002014
Will Deacon518f7132014-11-14 17:17:54 +00002015 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002016}
2017
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002018/*
2019 * This function can sleep, and cannot be called from atomic context. Will
2020 * power on register block if required. This restriction does not apply to the
2021 * original iova_to_phys() op.
2022 */
2023static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2024 dma_addr_t iova)
2025{
2026 phys_addr_t ret = 0;
2027 unsigned long flags;
2028 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002029 int err;
2030
2031 err = arm_smmu_power_on(smmu_domain->smmu);
2032 if (err)
2033 return 0;
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002034
2035 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2036 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2037 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002038 ret = __arm_smmu_iova_to_phys_hard(domain, iova, true);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002039
2040 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2041
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002042 arm_smmu_power_off(smmu_domain->smmu);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002043 return ret;
2044}
2045
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002046static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
2047 struct iommu_domain *domain, dma_addr_t iova)
2048{
2049 return __arm_smmu_iova_to_phys_hard(domain, iova, false);
2050}
2051
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002052static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002053{
Will Deacond0948942014-06-24 17:30:10 +01002054 switch (cap) {
2055 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002056 /*
2057 * Return true here as the SMMU can always send out coherent
2058 * requests.
2059 */
2060 return true;
Will Deacond0948942014-06-24 17:30:10 +01002061 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002062 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002063 case IOMMU_CAP_NOEXEC:
2064 return true;
Will Deacond0948942014-06-24 17:30:10 +01002065 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002066 return false;
Will Deacond0948942014-06-24 17:30:10 +01002067 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002068}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002069
Will Deacona9a1b0b2014-05-01 18:05:08 +01002070static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
2071{
2072 *((u16 *)data) = alias;
2073 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002074}
2075
Will Deacon8f68f8e2014-07-15 11:27:08 +01002076static void __arm_smmu_release_pci_iommudata(void *data)
2077{
2078 kfree(data);
2079}
2080
Joerg Roedelaf659932015-10-21 23:51:41 +02002081static int arm_smmu_init_pci_device(struct pci_dev *pdev,
2082 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002083{
Will Deacon03edb222015-01-19 14:27:33 +00002084 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02002085 u16 sid;
2086 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002087
Will Deacon03edb222015-01-19 14:27:33 +00002088 cfg = iommu_group_get_iommudata(group);
2089 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01002090 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002091 if (!cfg)
2092 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002093
Will Deacon03edb222015-01-19 14:27:33 +00002094 iommu_group_set_iommudata(group, cfg,
2095 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01002096 }
2097
Joerg Roedelaf659932015-10-21 23:51:41 +02002098 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
2099 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002100
Will Deacon03edb222015-01-19 14:27:33 +00002101 /*
2102 * Assume Stream ID == Requester ID for now.
2103 * We need a way to describe the ID mappings in FDT.
2104 */
2105 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
2106 for (i = 0; i < cfg->num_streamids; ++i)
2107 if (cfg->streamids[i] == sid)
2108 break;
2109
2110 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
2111 if (i == cfg->num_streamids)
2112 cfg->streamids[cfg->num_streamids++] = sid;
2113
2114 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002115}
2116
Joerg Roedelaf659932015-10-21 23:51:41 +02002117static int arm_smmu_init_platform_device(struct device *dev,
2118 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00002119{
Will Deacon03edb222015-01-19 14:27:33 +00002120 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002121 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00002122
2123 if (!smmu)
2124 return -ENODEV;
2125
2126 master = find_smmu_master(smmu, dev->of_node);
2127 if (!master)
2128 return -ENODEV;
2129
Will Deacon03edb222015-01-19 14:27:33 +00002130 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002131
2132 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002133}
2134
2135static int arm_smmu_add_device(struct device *dev)
2136{
Joerg Roedelaf659932015-10-21 23:51:41 +02002137 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00002138
Joerg Roedelaf659932015-10-21 23:51:41 +02002139 group = iommu_group_get_for_dev(dev);
2140 if (IS_ERR(group))
2141 return PTR_ERR(group);
2142
Peng Fan9a4a9d82015-11-20 16:56:18 +08002143 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02002144 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002145}
2146
Will Deacon45ae7cf2013-06-24 18:31:25 +01002147static void arm_smmu_remove_device(struct device *dev)
2148{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002149 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002150}
2151
Joerg Roedelaf659932015-10-21 23:51:41 +02002152static struct iommu_group *arm_smmu_device_group(struct device *dev)
2153{
2154 struct iommu_group *group;
2155 int ret;
2156
2157 if (dev_is_pci(dev))
2158 group = pci_device_group(dev);
2159 else
2160 group = generic_device_group(dev);
2161
2162 if (IS_ERR(group))
2163 return group;
2164
2165 if (dev_is_pci(dev))
2166 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
2167 else
2168 ret = arm_smmu_init_platform_device(dev, group);
2169
2170 if (ret) {
2171 iommu_group_put(group);
2172 group = ERR_PTR(ret);
2173 }
2174
2175 return group;
2176}
2177
Will Deaconc752ce42014-06-25 22:46:31 +01002178static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2179 enum iommu_attr attr, void *data)
2180{
Joerg Roedel1d672632015-03-26 13:43:10 +01002181 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002182 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002183
2184 switch (attr) {
2185 case DOMAIN_ATTR_NESTING:
2186 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2187 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002188 case DOMAIN_ATTR_PT_BASE_ADDR:
2189 *((phys_addr_t *)data) =
2190 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2191 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002192 case DOMAIN_ATTR_CONTEXT_BANK:
2193 /* context bank index isn't valid until we are attached */
2194 if (smmu_domain->smmu == NULL)
2195 return -ENODEV;
2196
2197 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2198 ret = 0;
2199 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002200 case DOMAIN_ATTR_TTBR0: {
2201 u64 val;
2202 struct arm_smmu_device *smmu = smmu_domain->smmu;
2203 /* not valid until we are attached */
2204 if (smmu == NULL)
2205 return -ENODEV;
2206
2207 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2208 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2209 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2210 << (TTBRn_ASID_SHIFT);
2211 *((u64 *)data) = val;
2212 ret = 0;
2213 break;
2214 }
2215 case DOMAIN_ATTR_CONTEXTIDR:
2216 /* not valid until attached */
2217 if (smmu_domain->smmu == NULL)
2218 return -ENODEV;
2219 *((u32 *)data) = smmu_domain->cfg.procid;
2220 ret = 0;
2221 break;
2222 case DOMAIN_ATTR_PROCID:
2223 *((u32 *)data) = smmu_domain->cfg.procid;
2224 ret = 0;
2225 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002226 case DOMAIN_ATTR_DYNAMIC:
2227 *((int *)data) = !!(smmu_domain->attributes
2228 & (1 << DOMAIN_ATTR_DYNAMIC));
2229 ret = 0;
2230 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002231 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2232 *((int *)data) = !!(smmu_domain->attributes
2233 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2234 ret = 0;
2235 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002236 case DOMAIN_ATTR_S1_BYPASS:
2237 *((int *)data) = !!(smmu_domain->attributes
2238 & (1 << DOMAIN_ATTR_S1_BYPASS));
2239 ret = 0;
2240 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002241 default:
2242 return -ENODEV;
2243 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002244 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002245}
2246
2247static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2248 enum iommu_attr attr, void *data)
2249{
Will Deacon518f7132014-11-14 17:17:54 +00002250 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002251 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002252
Will Deacon518f7132014-11-14 17:17:54 +00002253 mutex_lock(&smmu_domain->init_mutex);
2254
Will Deaconc752ce42014-06-25 22:46:31 +01002255 switch (attr) {
2256 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002257 if (smmu_domain->smmu) {
2258 ret = -EPERM;
2259 goto out_unlock;
2260 }
2261
Will Deaconc752ce42014-06-25 22:46:31 +01002262 if (*(int *)data)
2263 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2264 else
2265 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2266
Will Deacon518f7132014-11-14 17:17:54 +00002267 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002268 case DOMAIN_ATTR_PROCID:
2269 if (smmu_domain->smmu != NULL) {
2270 dev_err(smmu_domain->smmu->dev,
2271 "cannot change procid attribute while attached\n");
2272 ret = -EBUSY;
2273 break;
2274 }
2275 smmu_domain->cfg.procid = *((u32 *)data);
2276 ret = 0;
2277 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002278 case DOMAIN_ATTR_DYNAMIC: {
2279 int dynamic = *((int *)data);
2280
2281 if (smmu_domain->smmu != NULL) {
2282 dev_err(smmu_domain->smmu->dev,
2283 "cannot change dynamic attribute while attached\n");
2284 ret = -EBUSY;
2285 break;
2286 }
2287
2288 if (dynamic)
2289 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2290 else
2291 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2292 ret = 0;
2293 break;
2294 }
2295 case DOMAIN_ATTR_CONTEXT_BANK:
2296 /* context bank can't be set while attached */
2297 if (smmu_domain->smmu != NULL) {
2298 ret = -EBUSY;
2299 break;
2300 }
2301 /* ... and it can only be set for dynamic contexts. */
2302 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2303 ret = -EINVAL;
2304 break;
2305 }
2306
2307 /* this will be validated during attach */
2308 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2309 ret = 0;
2310 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002311 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2312 u32 non_fatal_faults = *((int *)data);
2313
2314 if (non_fatal_faults)
2315 smmu_domain->attributes |=
2316 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2317 else
2318 smmu_domain->attributes &=
2319 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2320 ret = 0;
2321 break;
2322 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002323 case DOMAIN_ATTR_S1_BYPASS: {
2324 int bypass = *((int *)data);
2325
2326 /* bypass can't be changed while attached */
2327 if (smmu_domain->smmu != NULL) {
2328 ret = -EBUSY;
2329 break;
2330 }
2331 if (bypass)
2332 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2333 else
2334 smmu_domain->attributes &=
2335 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2336
2337 ret = 0;
2338 break;
2339 }
Patrick Daly8befb662016-08-17 20:03:28 -07002340 case DOMAIN_ATTR_ATOMIC:
2341 {
2342 int atomic_ctx = *((int *)data);
2343
2344 /* can't be changed while attached */
2345 if (smmu_domain->smmu != NULL) {
2346 ret = -EBUSY;
2347 break;
2348 }
2349 if (atomic_ctx)
2350 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2351 else
2352 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2353 break;
2354 }
Will Deaconc752ce42014-06-25 22:46:31 +01002355 default:
Will Deacon518f7132014-11-14 17:17:54 +00002356 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002357 }
Will Deacon518f7132014-11-14 17:17:54 +00002358
2359out_unlock:
2360 mutex_unlock(&smmu_domain->init_mutex);
2361 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002362}
2363
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002364static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2365 unsigned long flags)
2366{
2367 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2368 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2369 struct arm_smmu_device *smmu;
2370 void __iomem *cb_base;
2371
2372 if (!smmu_domain->smmu) {
2373 pr_err("Can't trigger faults on non-attached domains\n");
2374 return;
2375 }
2376
2377 smmu = smmu_domain->smmu;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002378 arm_smmu_power_on(smmu);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002379
2380 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2381 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2382 flags, cfg->cbndx);
2383 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002384 /* give the interrupt time to fire... */
2385 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002386
2387 arm_smmu_power_off(smmu);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002388}
2389
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002390static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2391 unsigned long offset)
2392{
2393 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2394 struct arm_smmu_device *smmu;
2395 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2396 void __iomem *cb_base;
2397 unsigned long val;
2398
2399 if (offset >= SZ_4K) {
2400 pr_err("Invalid offset: 0x%lx\n", offset);
2401 return 0;
2402 }
2403
2404 smmu = smmu_domain->smmu;
2405 if (!smmu) {
2406 WARN(1, "Can't read registers of a detached domain\n");
2407 val = 0;
2408 return val;
2409 }
2410
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002411 arm_smmu_power_on(smmu);
2412
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002413 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2414 val = readl_relaxed(cb_base + offset);
2415
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002416 arm_smmu_power_off(smmu);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002417 return val;
2418}
2419
2420static void arm_smmu_reg_write(struct iommu_domain *domain,
2421 unsigned long offset, unsigned long val)
2422{
2423 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2424 struct arm_smmu_device *smmu;
2425 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2426 void __iomem *cb_base;
2427
2428 if (offset >= SZ_4K) {
2429 pr_err("Invalid offset: 0x%lx\n", offset);
2430 return;
2431 }
2432
2433 smmu = smmu_domain->smmu;
2434 if (!smmu) {
2435 WARN(1, "Can't read registers of a detached domain\n");
2436 return;
2437 }
2438
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002439 arm_smmu_power_on(smmu);
2440
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002441 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2442 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002443
2444 arm_smmu_power_off(smmu);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002445}
2446
Will Deacon518f7132014-11-14 17:17:54 +00002447static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002448 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002449 .domain_alloc = arm_smmu_domain_alloc,
2450 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002451 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002452 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002453 .map = arm_smmu_map,
2454 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002455 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002456 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002457 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002458 .add_device = arm_smmu_add_device,
2459 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002460 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002461 .domain_get_attr = arm_smmu_domain_get_attr,
2462 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00002463 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002464 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002465 .reg_read = arm_smmu_reg_read,
2466 .reg_write = arm_smmu_reg_write,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002467};
2468
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002469static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002470{
2471 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002472 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002473
2474 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2475 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2476 0, 30000)) {
2477 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2478 return -EBUSY;
2479 }
2480
2481 return 0;
2482}
2483
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002484static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
2485{
2486 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2487 u32 reg;
2488
2489 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2490 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2491 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2492
2493 return wait ? arm_smmu_wait_for_halt(smmu) : 0;
2494}
2495
2496static int arm_smmu_halt(struct arm_smmu_device *smmu)
2497{
2498 return __arm_smmu_halt(smmu, true);
2499}
2500
2501static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
2502{
2503 return __arm_smmu_halt(smmu, false);
2504}
2505
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002506static void arm_smmu_resume(struct arm_smmu_device *smmu)
2507{
2508 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2509 u32 reg;
2510
2511 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2512 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2513 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2514}
2515
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002516static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
2517{
2518 int i;
2519 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
2520
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002521 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002522 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2523 writel_relaxed(regs[i].value,
2524 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002525 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002526}
2527
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002528static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002529{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002530 int i;
2531 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002532 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002533 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002534
Peng Fan3ca37122016-05-03 21:50:30 +08002535 /*
2536 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
2537 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
2538 * bit is only present in MMU-500r2 onwards.
2539 */
2540 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
2541 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
2542 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
2543 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
2544 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
2545 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
2546 }
2547
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002548 /* Make sure all context banks are disabled and clear CB_FSR */
2549 for (i = 0; i < smmu->num_context_banks; ++i) {
2550 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2551 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
2552 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002553 /*
2554 * Disable MMU-500's not-particularly-beneficial next-page
2555 * prefetcher for the sake of errata #841119 and #826419.
2556 */
2557 if (smmu->model == ARM_MMU500) {
2558 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
2559 reg &= ~ARM_MMU500_ACTLR_CPRE;
2560 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2561 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002562
2563 if (smmu->model == QCOM_SMMUV2) {
2564 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2565 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2566 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2567 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2568 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002569 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002570}
2571
2572static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
2573{
2574 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
2575 int i = 0;
2576 u32 reg;
2577
2578 /* clear global FSR */
2579 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2580 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2581
2582 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
2583 /*
2584 * Mark all SMRn as invalid and all S2CRn as bypass unless
2585 * overridden
2586 */
2587 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
2588 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2589 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
2590 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
2591 }
2592
2593 arm_smmu_context_bank_reset(smmu);
2594 }
Will Deacon1463fe42013-07-31 19:21:27 +01002595
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002596 /* Program implementation defined registers */
2597 arm_smmu_impl_def_programming(smmu);
2598
Will Deacon45ae7cf2013-06-24 18:31:25 +01002599 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002600 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
2601 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
2602
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002603 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002604
Will Deacon45ae7cf2013-06-24 18:31:25 +01002605 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002606 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002607
2608 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002609 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002610
Robin Murphy25a1c962016-02-10 14:25:33 +00002611 /* Enable client access, handling unmatched streams as appropriate */
2612 reg &= ~sCR0_CLIENTPD;
2613 if (disable_bypass)
2614 reg |= sCR0_USFCFG;
2615 else
2616 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002617
2618 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002619 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002620
2621 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002622 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002623
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002624 if (smmu->features & ARM_SMMU_FEAT_VMID16)
2625 reg |= sCR0_VMID16EN;
2626
Will Deacon45ae7cf2013-06-24 18:31:25 +01002627 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00002628 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002629 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002630}
2631
2632static int arm_smmu_id_size_to_bits(int size)
2633{
2634 switch (size) {
2635 case 0:
2636 return 32;
2637 case 1:
2638 return 36;
2639 case 2:
2640 return 40;
2641 case 3:
2642 return 42;
2643 case 4:
2644 return 44;
2645 case 5:
2646 default:
2647 return 48;
2648 }
2649}
2650
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002651static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
2652{
2653 struct device *dev = smmu->dev;
2654 int i, ntuples, ret;
2655 u32 *tuples;
2656 struct arm_smmu_impl_def_reg *regs, *regit;
2657
2658 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
2659 return 0;
2660
2661 ntuples /= sizeof(u32);
2662 if (ntuples % 2) {
2663 dev_err(dev,
2664 "Invalid number of attach-impl-defs registers: %d\n",
2665 ntuples);
2666 return -EINVAL;
2667 }
2668
2669 regs = devm_kmalloc(
2670 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
2671 GFP_KERNEL);
2672 if (!regs)
2673 return -ENOMEM;
2674
2675 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
2676 if (!tuples)
2677 return -ENOMEM;
2678
2679 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
2680 tuples, ntuples);
2681 if (ret)
2682 return ret;
2683
2684 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
2685 regit->offset = tuples[i];
2686 regit->value = tuples[i + 1];
2687 }
2688
2689 devm_kfree(dev, tuples);
2690
2691 smmu->impl_def_attach_registers = regs;
2692 smmu->num_impl_def_attach_registers = ntuples / 2;
2693
2694 return 0;
2695}
2696
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002697static int arm_smmu_init_clocks(struct arm_smmu_device *smmu)
2698{
2699 const char *cname;
2700 struct property *prop;
2701 int i;
2702 struct device *dev = smmu->dev;
2703
2704 smmu->num_clocks =
2705 of_property_count_strings(dev->of_node, "clock-names");
2706
2707 if (smmu->num_clocks < 1)
2708 return 0;
2709
2710 smmu->clocks = devm_kzalloc(
2711 dev, sizeof(*smmu->clocks) * smmu->num_clocks,
2712 GFP_KERNEL);
2713
2714 if (!smmu->clocks) {
2715 dev_err(dev,
2716 "Failed to allocate memory for clocks\n");
2717 return -ENODEV;
2718 }
2719
2720 i = 0;
2721 of_property_for_each_string(dev->of_node, "clock-names",
2722 prop, cname) {
2723 struct clk *c = devm_clk_get(dev, cname);
2724
2725 if (IS_ERR(c)) {
2726 dev_err(dev, "Couldn't get clock: %s",
2727 cname);
2728 return -ENODEV;
2729 }
2730
2731 if (clk_get_rate(c) == 0) {
2732 long rate = clk_round_rate(c, 1000);
2733
2734 clk_set_rate(c, rate);
2735 }
2736
2737 smmu->clocks[i] = c;
2738
2739 ++i;
2740 }
2741 return 0;
2742}
2743
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07002744static int arm_smmu_init_regulators(struct arm_smmu_device *smmu)
2745{
2746 struct device *dev = smmu->dev;
2747
2748 if (!of_get_property(dev->of_node, "vdd-supply", NULL))
2749 return 0;
2750
2751 smmu->gdsc = devm_regulator_get(dev, "vdd");
2752 if (IS_ERR(smmu->gdsc))
2753 return PTR_ERR(smmu->gdsc);
2754
2755 return 0;
2756}
2757
Patrick Daly2764f952016-09-06 19:22:44 -07002758static int arm_smmu_init_bus_scaling(struct platform_device *pdev,
2759 struct arm_smmu_device *smmu)
2760{
2761 u32 master_id;
2762
2763 if (of_property_read_u32(pdev->dev.of_node, "qcom,bus-master-id",
2764 &master_id)) {
2765 dev_dbg(smmu->dev, "No bus scaling info\n");
2766 return 0;
2767 }
2768
2769 smmu->bus_client_name = devm_kasprintf(
2770 smmu->dev, GFP_KERNEL, "smmu-bus-client-%s",
2771 dev_name(smmu->dev));
2772
2773 if (!smmu->bus_client_name)
2774 return -ENOMEM;
2775
2776 smmu->bus_client = msm_bus_scale_register(
2777 master_id, MSM_BUS_SLAVE_EBI_CH0, smmu->bus_client_name, true);
2778 if (IS_ERR(&smmu->bus_client)) {
2779 int ret = PTR_ERR(smmu->bus_client);
2780
2781 if (ret != -EPROBE_DEFER)
2782 dev_err(smmu->dev, "Bus client registration failed\n");
2783 return ret;
2784 }
2785
2786 return 0;
2787}
2788
Will Deacon45ae7cf2013-06-24 18:31:25 +01002789static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
2790{
2791 unsigned long size;
2792 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
2793 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01002794 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002795
2796 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01002797 dev_notice(smmu->dev, "SMMUv%d with:\n",
2798 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002799
2800 /* ID0 */
2801 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01002802
2803 /* Restrict available stages based on module parameter */
2804 if (force_stage == 1)
2805 id &= ~(ID0_S2TS | ID0_NTS);
2806 else if (force_stage == 2)
2807 id &= ~(ID0_S1TS | ID0_NTS);
2808
Will Deacon45ae7cf2013-06-24 18:31:25 +01002809 if (id & ID0_S1TS) {
2810 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2811 dev_notice(smmu->dev, "\tstage 1 translation\n");
2812 }
2813
2814 if (id & ID0_S2TS) {
2815 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2816 dev_notice(smmu->dev, "\tstage 2 translation\n");
2817 }
2818
2819 if (id & ID0_NTS) {
2820 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
2821 dev_notice(smmu->dev, "\tnested translation\n");
2822 }
2823
2824 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01002825 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002826 dev_err(smmu->dev, "\tno translation support!\n");
2827 return -ENODEV;
2828 }
2829
Robin Murphyb7862e32016-04-13 18:13:03 +01002830 if ((id & ID0_S1TS) &&
2831 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002832 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
2833 dev_notice(smmu->dev, "\taddress translation ops\n");
2834 }
2835
Robin Murphybae2c2d2015-07-29 19:46:05 +01002836 /*
2837 * In order for DMA API calls to work properly, we must defer to what
2838 * the DT says about coherency, regardless of what the hardware claims.
2839 * Fortunately, this also opens up a workaround for systems where the
2840 * ID register value has ended up configured incorrectly.
2841 */
2842 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
2843 cttw_reg = !!(id & ID0_CTTW);
2844 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002845 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01002846 if (cttw_dt || cttw_reg)
2847 dev_notice(smmu->dev, "\t%scoherent table walk\n",
2848 cttw_dt ? "" : "non-");
2849 if (cttw_dt != cttw_reg)
2850 dev_notice(smmu->dev,
2851 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002852
2853 if (id & ID0_SMS) {
2854 u32 smr, sid, mask;
2855
2856 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
2857 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
2858 ID0_NUMSMRG_MASK;
2859 if (smmu->num_mapping_groups == 0) {
2860 dev_err(smmu->dev,
2861 "stream-matching supported, but no SMRs present!\n");
2862 return -ENODEV;
2863 }
2864
Dhaval Patel031d7462015-05-09 14:47:29 -07002865 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
2866 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
2867 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
2868 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
2869 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Will Deacon45ae7cf2013-06-24 18:31:25 +01002870
Dhaval Patel031d7462015-05-09 14:47:29 -07002871 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
2872 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
2873 if ((mask & sid) != sid) {
2874 dev_err(smmu->dev,
2875 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
2876 mask, sid);
2877 return -ENODEV;
2878 }
2879
2880 dev_notice(smmu->dev,
2881 "\tstream matching with %u register groups, mask 0x%x",
2882 smmu->num_mapping_groups, mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002883 }
Olav Haugan3c8766d2014-08-22 17:12:32 -07002884 } else {
2885 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
2886 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002887 }
2888
Robin Murphy7602b872016-04-28 17:12:09 +01002889 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
2890 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
2891 if (!(id & ID0_PTFS_NO_AARCH32S))
2892 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
2893 }
2894
Will Deacon45ae7cf2013-06-24 18:31:25 +01002895 /* ID1 */
2896 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01002897 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002898
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002899 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00002900 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01002901 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01002902 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07002903 dev_warn(smmu->dev,
2904 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
2905 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002906
Will Deacon518f7132014-11-14 17:17:54 +00002907 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002908 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
2909 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
2910 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
2911 return -ENODEV;
2912 }
2913 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
2914 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01002915 /*
2916 * Cavium CN88xx erratum #27704.
2917 * Ensure ASID and VMID allocation is unique across all SMMUs in
2918 * the system.
2919 */
2920 if (smmu->model == CAVIUM_SMMUV2) {
2921 smmu->cavium_id_base =
2922 atomic_add_return(smmu->num_context_banks,
2923 &cavium_smmu_context_count);
2924 smmu->cavium_id_base -= smmu->num_context_banks;
2925 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002926
2927 /* ID2 */
2928 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
2929 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002930 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002931
Will Deacon518f7132014-11-14 17:17:54 +00002932 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002933 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00002934 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002935
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002936 if (id & ID2_VMID16)
2937 smmu->features |= ARM_SMMU_FEAT_VMID16;
2938
Robin Murphyf1d84542015-03-04 16:41:05 +00002939 /*
2940 * What the page table walker can address actually depends on which
2941 * descriptor format is in use, but since a) we don't know that yet,
2942 * and b) it can vary per context bank, this will have to do...
2943 */
2944 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2945 dev_warn(smmu->dev,
2946 "failed to set DMA mask for table walker\n");
2947
Robin Murphyb7862e32016-04-13 18:13:03 +01002948 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00002949 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01002950 if (smmu->version == ARM_SMMU_V1_64K)
2951 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002952 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002953 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00002954 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00002955 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01002956 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00002957 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01002958 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002959 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002960 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002961 }
2962
Robin Murphy7602b872016-04-28 17:12:09 +01002963 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002964 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002965 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002966 if (smmu->features &
2967 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002968 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002969 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002970 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002971 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002972 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002973
Robin Murphyd5466352016-05-09 17:20:09 +01002974 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2975 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2976 else
2977 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2978 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2979 smmu->pgsize_bitmap);
2980
Will Deacon518f7132014-11-14 17:17:54 +00002981
Will Deacon28d60072014-09-01 16:24:48 +01002982 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2983 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002984 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002985
2986 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2987 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002988 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002989
Will Deacon45ae7cf2013-06-24 18:31:25 +01002990 return 0;
2991}
2992
Robin Murphy67b65a32016-04-13 18:12:57 +01002993struct arm_smmu_match_data {
2994 enum arm_smmu_arch_version version;
2995 enum arm_smmu_implementation model;
2996};
2997
2998#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2999static struct arm_smmu_match_data name = { .version = ver, .model = imp }
3000
3001ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
3002ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01003003ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003004ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01003005ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003006ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01003007
Joerg Roedel09b52692014-10-02 12:24:45 +02003008static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003009 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3010 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3011 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003012 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003013 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003014 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003015 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01003016 { },
3017};
3018MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3019
Will Deacon45ae7cf2013-06-24 18:31:25 +01003020static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3021{
Robin Murphy09360402014-08-28 17:51:59 +01003022 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01003023 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003024 struct resource *res;
3025 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003026 struct device *dev = &pdev->dev;
3027 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003028 struct of_phandle_iterator it;
3029 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003030 int num_irqs, i, err;
3031
3032 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3033 if (!smmu) {
3034 dev_err(dev, "failed to allocate arm_smmu_device\n");
3035 return -ENOMEM;
3036 }
3037 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003038 spin_lock_init(&smmu->atos_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003039 mutex_init(&smmu->power_lock);
Patrick Daly8befb662016-08-17 20:03:28 -07003040 spin_lock_init(&smmu->clock_refs_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003041 idr_init(&smmu->asid_idr);
3042 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003043
Robin Murphy09360402014-08-28 17:51:59 +01003044 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01003045 data = of_id->data;
3046 smmu->version = data->version;
3047 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01003048
Will Deacon45ae7cf2013-06-24 18:31:25 +01003049 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003050 smmu->base = devm_ioremap_resource(dev, res);
3051 if (IS_ERR(smmu->base))
3052 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003053 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003054
3055 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3056 &smmu->num_global_irqs)) {
3057 dev_err(dev, "missing #global-interrupts property\n");
3058 return -ENODEV;
3059 }
3060
3061 num_irqs = 0;
3062 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3063 num_irqs++;
3064 if (num_irqs > smmu->num_global_irqs)
3065 smmu->num_context_irqs++;
3066 }
3067
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003068 if (!smmu->num_context_irqs) {
3069 dev_err(dev, "found %d interrupts but expected at least %d\n",
3070 num_irqs, smmu->num_global_irqs + 1);
3071 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003072 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003073
3074 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3075 GFP_KERNEL);
3076 if (!smmu->irqs) {
3077 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3078 return -ENOMEM;
3079 }
3080
3081 for (i = 0; i < num_irqs; ++i) {
3082 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003083
Will Deacon45ae7cf2013-06-24 18:31:25 +01003084 if (irq < 0) {
3085 dev_err(dev, "failed to get irq index %d\n", i);
3086 return -ENODEV;
3087 }
3088 smmu->irqs[i] = irq;
3089 }
3090
Dhaval Patel031d7462015-05-09 14:47:29 -07003091 parse_driver_options(smmu);
3092
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003093 err = arm_smmu_init_clocks(smmu);
Olav Haugan3c8766d2014-08-22 17:12:32 -07003094 if (err)
3095 return err;
3096
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003097 err = arm_smmu_init_regulators(smmu);
3098 if (err)
3099 return err;
3100
Patrick Daly2764f952016-09-06 19:22:44 -07003101 err = arm_smmu_init_bus_scaling(pdev, smmu);
3102 if (err)
3103 return err;
3104
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003105 err = arm_smmu_power_on(smmu);
3106 if (err)
3107 return err;
3108
3109 err = arm_smmu_device_cfg_probe(smmu);
3110 if (err)
3111 goto out_power_off;
3112
Will Deacon45ae7cf2013-06-24 18:31:25 +01003113 i = 0;
3114 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003115
3116 err = -ENOMEM;
3117 /* No need to zero the memory for masterspec */
3118 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
3119 if (!masterspec)
3120 goto out_put_masters;
3121
3122 of_for_each_phandle(&it, err, dev->of_node,
3123 "mmu-masters", "#stream-id-cells", 0) {
3124 int count = of_phandle_iterator_args(&it, masterspec->args,
3125 MAX_MASTER_STREAMIDS);
3126 masterspec->np = of_node_get(it.node);
3127 masterspec->args_count = count;
3128
3129 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003130 if (err) {
3131 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003132 masterspec->np->name);
3133 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003134 goto out_put_masters;
3135 }
3136
3137 i++;
3138 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003139
Will Deacon45ae7cf2013-06-24 18:31:25 +01003140 dev_notice(dev, "registered %d master devices\n", i);
3141
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003142 kfree(masterspec);
3143
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003144 err = arm_smmu_parse_impl_def_registers(smmu);
3145 if (err)
3146 goto out_put_masters;
3147
Robin Murphyb7862e32016-04-13 18:13:03 +01003148 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003149 smmu->num_context_banks != smmu->num_context_irqs) {
3150 dev_err(dev,
3151 "found only %d context interrupt(s) but %d required\n",
3152 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00003153 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01003154 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003155 }
3156
Will Deacon45ae7cf2013-06-24 18:31:25 +01003157 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003158 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3159 NULL, arm_smmu_global_fault,
3160 IRQF_ONESHOT | IRQF_SHARED,
3161 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003162 if (err) {
3163 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3164 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08003165 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003166 }
3167 }
3168
3169 INIT_LIST_HEAD(&smmu->list);
3170 spin_lock(&arm_smmu_devices_lock);
3171 list_add(&smmu->list, &arm_smmu_devices);
3172 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01003173
3174 arm_smmu_device_reset(smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003175 arm_smmu_power_off(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003176 return 0;
3177
Will Deacon45ae7cf2013-06-24 18:31:25 +01003178out_put_masters:
3179 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003180 struct arm_smmu_master *master
3181 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003182 of_node_put(master->of_node);
3183 }
3184
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003185out_power_off:
3186 arm_smmu_power_off(smmu);
3187
Will Deacon45ae7cf2013-06-24 18:31:25 +01003188 return err;
3189}
3190
3191static int arm_smmu_device_remove(struct platform_device *pdev)
3192{
3193 int i;
3194 struct device *dev = &pdev->dev;
3195 struct arm_smmu_device *curr, *smmu = NULL;
3196 struct rb_node *node;
3197
3198 spin_lock(&arm_smmu_devices_lock);
3199 list_for_each_entry(curr, &arm_smmu_devices, list) {
3200 if (curr->dev == dev) {
3201 smmu = curr;
3202 list_del(&smmu->list);
3203 break;
3204 }
3205 }
3206 spin_unlock(&arm_smmu_devices_lock);
3207
3208 if (!smmu)
3209 return -ENODEV;
3210
Will Deacon45ae7cf2013-06-24 18:31:25 +01003211 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003212 struct arm_smmu_master *master
3213 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003214 of_node_put(master->of_node);
3215 }
3216
Will Deaconecfadb62013-07-31 19:21:28 +01003217 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003218 dev_err(dev, "removing device with active domains!\n");
3219
3220 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08003221 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003222
Patrick Dalyc190d932016-08-30 17:23:28 -07003223 idr_destroy(&smmu->asid_idr);
3224
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003225 arm_smmu_power_on(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003226 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003227 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003228 arm_smmu_power_off(smmu);
3229
Patrick Daly2764f952016-09-06 19:22:44 -07003230 msm_bus_scale_unregister(smmu->bus_client);
3231
Will Deacon45ae7cf2013-06-24 18:31:25 +01003232 return 0;
3233}
3234
Will Deacon45ae7cf2013-06-24 18:31:25 +01003235static struct platform_driver arm_smmu_driver = {
3236 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003237 .name = "arm-smmu",
3238 .of_match_table = of_match_ptr(arm_smmu_of_match),
3239 },
3240 .probe = arm_smmu_device_dt_probe,
3241 .remove = arm_smmu_device_remove,
3242};
3243
3244static int __init arm_smmu_init(void)
3245{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003246 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003247 int ret;
3248
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003249 /*
3250 * Play nice with systems that don't have an ARM SMMU by checking that
3251 * an ARM SMMU exists in the system before proceeding with the driver
3252 * and IOMMU bus operation registration.
3253 */
3254 np = of_find_matching_node(NULL, arm_smmu_of_match);
3255 if (!np)
3256 return 0;
3257
3258 of_node_put(np);
3259
Will Deacon45ae7cf2013-06-24 18:31:25 +01003260 ret = platform_driver_register(&arm_smmu_driver);
3261 if (ret)
3262 return ret;
3263
3264 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01003265 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003266 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3267
Will Deacond123cf82014-02-04 22:17:53 +00003268#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01003269 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003270 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00003271#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003272
Will Deacona9a1b0b2014-05-01 18:05:08 +01003273#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08003274 if (!iommu_present(&pci_bus_type)) {
3275 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01003276 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08003277 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01003278#endif
3279
Will Deacon45ae7cf2013-06-24 18:31:25 +01003280 return 0;
3281}
3282
3283static void __exit arm_smmu_exit(void)
3284{
3285 return platform_driver_unregister(&arm_smmu_driver);
3286}
3287
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003288subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003289module_exit(arm_smmu_exit);
3290
3291MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
3292MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
3293MODULE_LICENSE("GPL v2");