blob: fab08a98ec562995d55066bb07746b9683919041 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070047#include <soc/qcom/secure_buffer.h>
Patrick Daly2764f952016-09-06 19:22:44 -070048#include <linux/msm-bus.h>
49#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050
51#include <linux/amba/bus.h>
52
Will Deacon518f7132014-11-14 17:17:54 +000053#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020056#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
58/* Maximum number of context banks per SMMU */
59#define ARM_SMMU_MAX_CBS 128
60
61/* Maximum number of mapping groups per SMMU */
62#define ARM_SMMU_MAX_SMRS 128
63
Will Deacon45ae7cf2013-06-24 18:31:25 +010064/* SMMU global address space */
65#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010066#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010067
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000068/*
69 * SMMU global address space with conditional offset to access secure
70 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
71 * nsGFSYNR0: 0x450)
72 */
73#define ARM_SMMU_GR0_NS(smmu) \
74 ((smmu)->base + \
75 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
76 ? 0x400 : 0))
77
Robin Murphyf9a05f02016-04-13 18:13:01 +010078/*
79 * Some 64-bit registers only make sense to write atomically, but in such
80 * cases all the data relevant to AArch32 formats lies within the lower word,
81 * therefore this actually makes more sense than it might first appear.
82 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010086#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010087#endif
88
Will Deacon45ae7cf2013-06-24 18:31:25 +010089/* Configuration registers */
90#define ARM_SMMU_GR0_sCR0 0x0
91#define sCR0_CLIENTPD (1 << 0)
92#define sCR0_GFRE (1 << 1)
93#define sCR0_GFIE (1 << 2)
94#define sCR0_GCFGFRE (1 << 4)
95#define sCR0_GCFGFIE (1 << 5)
96#define sCR0_USFCFG (1 << 10)
97#define sCR0_VMIDPNE (1 << 11)
98#define sCR0_PTM (1 << 12)
99#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800100#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100101#define sCR0_BSU_SHIFT 14
102#define sCR0_BSU_MASK 0x3
103
Peng Fan3ca37122016-05-03 21:50:30 +0800104/* Auxiliary Configuration register */
105#define ARM_SMMU_GR0_sACR 0x10
106
Will Deacon45ae7cf2013-06-24 18:31:25 +0100107/* Identification registers */
108#define ARM_SMMU_GR0_ID0 0x20
109#define ARM_SMMU_GR0_ID1 0x24
110#define ARM_SMMU_GR0_ID2 0x28
111#define ARM_SMMU_GR0_ID3 0x2c
112#define ARM_SMMU_GR0_ID4 0x30
113#define ARM_SMMU_GR0_ID5 0x34
114#define ARM_SMMU_GR0_ID6 0x38
115#define ARM_SMMU_GR0_ID7 0x3c
116#define ARM_SMMU_GR0_sGFSR 0x48
117#define ARM_SMMU_GR0_sGFSYNR0 0x50
118#define ARM_SMMU_GR0_sGFSYNR1 0x54
119#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100120
121#define ID0_S1TS (1 << 30)
122#define ID0_S2TS (1 << 29)
123#define ID0_NTS (1 << 28)
124#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000125#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100126#define ID0_PTFS_NO_AARCH32 (1 << 25)
127#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128#define ID0_CTTW (1 << 14)
129#define ID0_NUMIRPT_SHIFT 16
130#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700131#define ID0_NUMSIDB_SHIFT 9
132#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100133#define ID0_NUMSMRG_SHIFT 0
134#define ID0_NUMSMRG_MASK 0xff
135
136#define ID1_PAGESIZE (1 << 31)
137#define ID1_NUMPAGENDXB_SHIFT 28
138#define ID1_NUMPAGENDXB_MASK 7
139#define ID1_NUMS2CB_SHIFT 16
140#define ID1_NUMS2CB_MASK 0xff
141#define ID1_NUMCB_SHIFT 0
142#define ID1_NUMCB_MASK 0xff
143
144#define ID2_OAS_SHIFT 4
145#define ID2_OAS_MASK 0xf
146#define ID2_IAS_SHIFT 0
147#define ID2_IAS_MASK 0xf
148#define ID2_UBS_SHIFT 8
149#define ID2_UBS_MASK 0xf
150#define ID2_PTFS_4K (1 << 12)
151#define ID2_PTFS_16K (1 << 13)
152#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800153#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Peng Fan3ca37122016-05-03 21:50:30 +0800155#define ID7_MAJOR_SHIFT 4
156#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100159#define ARM_SMMU_GR0_TLBIVMID 0x64
160#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
161#define ARM_SMMU_GR0_TLBIALLH 0x6c
162#define ARM_SMMU_GR0_sTLBGSYNC 0x70
163#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
164#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800165#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167/* Stream mapping registers */
168#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
169#define SMR_VALID (1 << 31)
170#define SMR_MASK_SHIFT 16
171#define SMR_MASK_MASK 0x7fff
172#define SMR_ID_SHIFT 0
173#define SMR_ID_MASK 0x7fff
174
175#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
176#define S2CR_CBNDX_SHIFT 0
177#define S2CR_CBNDX_MASK 0xff
178#define S2CR_TYPE_SHIFT 16
179#define S2CR_TYPE_MASK 0x3
180#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
181#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
182#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
183
184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
Shalaj Jain04059c52015-03-03 13:34:59 -0800203#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
204#define CBFRSYNRA_SID_MASK (0xffff)
205
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600223#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000225#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100226#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700228#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100229#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100232#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000233#define ARM_SMMU_CB_S1_TLBIVAL 0x620
234#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
235#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700236#define ARM_SMMU_CB_TLBSYNC 0x7f0
237#define ARM_SMMU_CB_TLBSTATUS 0x7f4
238#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100239#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000240#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241
242#define SCTLR_S1_ASIDPNE (1 << 12)
243#define SCTLR_CFCFG (1 << 7)
244#define SCTLR_CFIE (1 << 6)
245#define SCTLR_CFRE (1 << 5)
246#define SCTLR_E (1 << 4)
247#define SCTLR_AFE (1 << 2)
248#define SCTLR_TRE (1 << 1)
249#define SCTLR_M (1 << 0)
250#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
251
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100252#define ARM_MMU500_ACTLR_CPRE (1 << 1)
253
Peng Fan3ca37122016-05-03 21:50:30 +0800254#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
255
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700256/* Definitions for implementation-defined registers */
257#define ACTLR_QCOM_OSH_SHIFT 28
258#define ACTLR_QCOM_OSH 1
259
260#define ACTLR_QCOM_ISH_SHIFT 29
261#define ACTLR_QCOM_ISH 1
262
263#define ACTLR_QCOM_NSH_SHIFT 30
264#define ACTLR_QCOM_NSH 1
265
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700266#define ARM_SMMU_IMPL_DEF0(smmu) \
267 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
268#define ARM_SMMU_IMPL_DEF1(smmu) \
269 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
270#define IMPL_DEF1_MICRO_MMU_CTRL 0
271#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
272#define MICRO_MMU_CTRL_IDLE (1 << 3)
273
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000274#define CB_PAR_F (1 << 0)
275
276#define ATSR_ACTIVE (1 << 0)
277
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278#define RESUME_RETRY (0 << 0)
279#define RESUME_TERMINATE (1 << 0)
280
Will Deacon45ae7cf2013-06-24 18:31:25 +0100281#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100282#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100283
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100284#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100285
286#define FSR_MULTI (1 << 31)
287#define FSR_SS (1 << 30)
288#define FSR_UUT (1 << 8)
289#define FSR_ASF (1 << 7)
290#define FSR_TLBLKF (1 << 6)
291#define FSR_TLBMCF (1 << 5)
292#define FSR_EF (1 << 4)
293#define FSR_PF (1 << 3)
294#define FSR_AFF (1 << 2)
295#define FSR_TF (1 << 1)
296
Mitchel Humpherys29073202014-07-08 09:52:18 -0700297#define FSR_IGN (FSR_AFF | FSR_ASF | \
298 FSR_TLBMCF | FSR_TLBLKF)
299#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100300 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100301
302#define FSYNR0_WNR (1 << 4)
303
Will Deacon4cf740b2014-07-14 19:47:39 +0100304static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000305module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100306MODULE_PARM_DESC(force_stage,
307 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000308static bool disable_bypass;
309module_param(disable_bypass, bool, S_IRUGO);
310MODULE_PARM_DESC(disable_bypass,
311 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100312
Robin Murphy09360402014-08-28 17:51:59 +0100313enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100314 ARM_SMMU_V1,
315 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100316 ARM_SMMU_V2,
317};
318
Robin Murphy67b65a32016-04-13 18:12:57 +0100319enum arm_smmu_implementation {
320 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100321 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100322 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700323 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100324};
325
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700326struct arm_smmu_impl_def_reg {
327 u32 offset;
328 u32 value;
329};
330
Will Deacon45ae7cf2013-06-24 18:31:25 +0100331struct arm_smmu_smr {
332 u8 idx;
333 u16 mask;
334 u16 id;
335};
336
Will Deacona9a1b0b2014-05-01 18:05:08 +0100337struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100338 int num_streamids;
339 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100340 struct arm_smmu_smr *smrs;
341};
342
Will Deacona9a1b0b2014-05-01 18:05:08 +0100343struct arm_smmu_master {
344 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100345 struct rb_node node;
346 struct arm_smmu_master_cfg cfg;
347};
348
Will Deacon45ae7cf2013-06-24 18:31:25 +0100349struct arm_smmu_device {
350 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351
352 void __iomem *base;
353 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100354 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355
356#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
357#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
358#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
359#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
360#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000361#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800362#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100363#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
364#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
365#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
366#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
367#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100368 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000369
370#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800371#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800372#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700373#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000374 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100375 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100376 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100377
378 u32 num_context_banks;
379 u32 num_s2_context_banks;
380 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
381 atomic_t irptndx;
382
383 u32 num_mapping_groups;
384 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
385
Will Deacon518f7132014-11-14 17:17:54 +0000386 unsigned long va_size;
387 unsigned long ipa_size;
388 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100389 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100390
391 u32 num_global_irqs;
392 u32 num_context_irqs;
393 unsigned int *irqs;
394
Will Deacon45ae7cf2013-06-24 18:31:25 +0100395 struct list_head list;
396 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800397
398 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700399 /* Specific to QCOM */
400 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
401 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800402
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700403 int num_clocks;
404 struct clk **clocks;
405
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700406 struct regulator *gdsc;
407
Patrick Daly2764f952016-09-06 19:22:44 -0700408 struct msm_bus_client_handle *bus_client;
409 char *bus_client_name;
410
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700411 /* Protects power_count */
412 struct mutex power_lock;
413 int power_count;
Patrick Daly8befb662016-08-17 20:03:28 -0700414 /* Protects clock_refs_count */
415 spinlock_t clock_refs_lock;
416 int clock_refs_count;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700417
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800418 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700419
420 /* protects idr */
421 struct mutex idr_mutex;
422 struct idr asid_idr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100423};
424
Robin Murphy7602b872016-04-28 17:12:09 +0100425enum arm_smmu_context_fmt {
426 ARM_SMMU_CTX_FMT_NONE,
427 ARM_SMMU_CTX_FMT_AARCH64,
428 ARM_SMMU_CTX_FMT_AARCH32_L,
429 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100430};
431
432struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100433 u8 cbndx;
434 u8 irptndx;
435 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600436 u32 procid;
437 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100438 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100439};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100440#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600441#define INVALID_CBNDX 0xff
442#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700443/*
444 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
445 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
446 */
447#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100448
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600449#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800450#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100451
Will Deaconc752ce42014-06-25 22:46:31 +0100452enum arm_smmu_domain_stage {
453 ARM_SMMU_DOMAIN_S1 = 0,
454 ARM_SMMU_DOMAIN_S2,
455 ARM_SMMU_DOMAIN_NESTED,
456};
457
Patrick Dalyc11d1082016-09-01 15:52:44 -0700458struct arm_smmu_pte_info {
459 void *virt_addr;
460 size_t size;
461 struct list_head entry;
462};
463
Will Deacon45ae7cf2013-06-24 18:31:25 +0100464struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100465 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000466 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700467 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000468 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100469 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100470 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000471 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700472 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700473 u32 secure_vmid;
474 struct list_head pte_info_list;
475 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700476 struct mutex assign_lock;
Joerg Roedel1d672632015-03-26 13:43:10 +0100477 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100478};
479
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200480struct arm_smmu_phandle_args {
481 struct device_node *np;
482 int args_count;
483 uint32_t args[MAX_MASTER_STREAMIDS];
484};
485
Will Deacon45ae7cf2013-06-24 18:31:25 +0100486static DEFINE_SPINLOCK(arm_smmu_devices_lock);
487static LIST_HEAD(arm_smmu_devices);
488
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000489struct arm_smmu_option_prop {
490 u32 opt;
491 const char *prop;
492};
493
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800494static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
495
Mitchel Humpherys29073202014-07-08 09:52:18 -0700496static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000497 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800498 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800499 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700500 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000501 { 0, NULL},
502};
503
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800504static int arm_smmu_halt(struct arm_smmu_device *smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700505static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
506static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800507static void arm_smmu_resume(struct arm_smmu_device *smmu);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800508static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
509 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700510static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
511 dma_addr_t iova);
512static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
513 struct iommu_domain *domain, dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600514static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800515
Patrick Dalyc11d1082016-09-01 15:52:44 -0700516static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
517static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700518static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700519static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
520
Joerg Roedel1d672632015-03-26 13:43:10 +0100521static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
522{
523 return container_of(dom, struct arm_smmu_domain, domain);
524}
525
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000526static void parse_driver_options(struct arm_smmu_device *smmu)
527{
528 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700529
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000530 do {
531 if (of_property_read_bool(smmu->dev->of_node,
532 arm_smmu_options[i].prop)) {
533 smmu->options |= arm_smmu_options[i].opt;
534 dev_notice(smmu->dev, "option %s\n",
535 arm_smmu_options[i].prop);
536 }
537 } while (arm_smmu_options[++i].opt);
538}
539
Patrick Dalyc190d932016-08-30 17:23:28 -0700540static bool is_dynamic_domain(struct iommu_domain *domain)
541{
542 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
543
544 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
545}
546
Patrick Dalye271f212016-10-04 13:24:49 -0700547static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
548{
549 return (smmu_domain->secure_vmid != VMID_INVAL);
550}
551
552static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
553{
554 if (arm_smmu_is_domain_secure(smmu_domain))
555 mutex_lock(&smmu_domain->assign_lock);
556}
557
558static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
559{
560 if (arm_smmu_is_domain_secure(smmu_domain))
561 mutex_unlock(&smmu_domain->assign_lock);
562}
563
Will Deacon8f68f8e2014-07-15 11:27:08 +0100564static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100565{
566 if (dev_is_pci(dev)) {
567 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700568
Will Deacona9a1b0b2014-05-01 18:05:08 +0100569 while (!pci_is_root_bus(bus))
570 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100571 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100572 }
573
Will Deacon8f68f8e2014-07-15 11:27:08 +0100574 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100575}
576
Will Deacon45ae7cf2013-06-24 18:31:25 +0100577static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
578 struct device_node *dev_node)
579{
580 struct rb_node *node = smmu->masters.rb_node;
581
582 while (node) {
583 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700584
Will Deacon45ae7cf2013-06-24 18:31:25 +0100585 master = container_of(node, struct arm_smmu_master, node);
586
587 if (dev_node < master->of_node)
588 node = node->rb_left;
589 else if (dev_node > master->of_node)
590 node = node->rb_right;
591 else
592 return master;
593 }
594
595 return NULL;
596}
597
Will Deacona9a1b0b2014-05-01 18:05:08 +0100598static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100599find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100600{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100601 struct arm_smmu_master_cfg *cfg = NULL;
602 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100603
Will Deacon8f68f8e2014-07-15 11:27:08 +0100604 if (group) {
605 cfg = iommu_group_get_iommudata(group);
606 iommu_group_put(group);
607 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100608
Will Deacon8f68f8e2014-07-15 11:27:08 +0100609 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100610}
611
Will Deacon45ae7cf2013-06-24 18:31:25 +0100612static int insert_smmu_master(struct arm_smmu_device *smmu,
613 struct arm_smmu_master *master)
614{
615 struct rb_node **new, *parent;
616
617 new = &smmu->masters.rb_node;
618 parent = NULL;
619 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700620 struct arm_smmu_master *this
621 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100622
623 parent = *new;
624 if (master->of_node < this->of_node)
625 new = &((*new)->rb_left);
626 else if (master->of_node > this->of_node)
627 new = &((*new)->rb_right);
628 else
629 return -EEXIST;
630 }
631
632 rb_link_node(&master->node, parent, new);
633 rb_insert_color(&master->node, &smmu->masters);
634 return 0;
635}
636
637static int register_smmu_master(struct arm_smmu_device *smmu,
638 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200639 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100640{
641 int i;
642 struct arm_smmu_master *master;
643
644 master = find_smmu_master(smmu, masterspec->np);
645 if (master) {
646 dev_err(dev,
647 "rejecting multiple registrations for master device %s\n",
648 masterspec->np->name);
649 return -EBUSY;
650 }
651
652 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
653 dev_err(dev,
654 "reached maximum number (%d) of stream IDs for master device %s\n",
655 MAX_MASTER_STREAMIDS, masterspec->np->name);
656 return -ENOSPC;
657 }
658
659 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
660 if (!master)
661 return -ENOMEM;
662
Will Deacona9a1b0b2014-05-01 18:05:08 +0100663 master->of_node = masterspec->np;
664 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100665
Olav Haugan3c8766d2014-08-22 17:12:32 -0700666 for (i = 0; i < master->cfg.num_streamids; ++i) {
667 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100668
Olav Haugan3c8766d2014-08-22 17:12:32 -0700669 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
670 (streamid >= smmu->num_mapping_groups)) {
671 dev_err(dev,
672 "stream ID for master device %s greater than maximum allowed (%d)\n",
673 masterspec->np->name, smmu->num_mapping_groups);
674 return -ERANGE;
675 }
676 master->cfg.streamids[i] = streamid;
677 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100678 return insert_smmu_master(smmu, master);
679}
680
Will Deacon44680ee2014-06-25 11:29:12 +0100681static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100682{
Will Deacon44680ee2014-06-25 11:29:12 +0100683 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100684 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100685 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100686
687 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100688 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100689 master = find_smmu_master(smmu, dev_node);
690 if (master)
691 break;
692 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100693 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100694
Will Deacona9a1b0b2014-05-01 18:05:08 +0100695 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100696}
697
698static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
699{
700 int idx;
701
702 do {
703 idx = find_next_zero_bit(map, end, start);
704 if (idx == end)
705 return -ENOSPC;
706 } while (test_and_set_bit(idx, map));
707
708 return idx;
709}
710
711static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
712{
713 clear_bit(idx, map);
714}
715
Patrick Daly8befb662016-08-17 20:03:28 -0700716static int arm_smmu_prepare_clocks(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700717{
718 int i, ret = 0;
719
720 for (i = 0; i < smmu->num_clocks; ++i) {
Patrick Daly8befb662016-08-17 20:03:28 -0700721 ret = clk_prepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700722 if (ret) {
Patrick Daly8befb662016-08-17 20:03:28 -0700723 dev_err(smmu->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700724 while (i--)
Patrick Daly8befb662016-08-17 20:03:28 -0700725 clk_unprepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700726 break;
727 }
728 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700729 return ret;
730}
731
Patrick Daly8befb662016-08-17 20:03:28 -0700732static void arm_smmu_unprepare_clocks(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700733{
734 int i;
735
Liam Mark3ddf8d12016-04-13 12:42:01 -0700736 for (i = smmu->num_clocks; i; --i)
737 clk_unprepare(smmu->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700738}
739
Patrick Daly8befb662016-08-17 20:03:28 -0700740/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
741static int arm_smmu_enable_clocks_atomic(struct arm_smmu_device *smmu)
742{
743 int i, ret = 0;
744 unsigned long flags;
745
746 spin_lock_irqsave(&smmu->clock_refs_lock, flags);
747 if (smmu->clock_refs_count > 0) {
748 smmu->clock_refs_count++;
749 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
750 return 0;
751 }
752
753 for (i = 0; i < smmu->num_clocks; ++i) {
754 ret = clk_enable(smmu->clocks[i]);
755 if (ret) {
756 dev_err(smmu->dev, "Couldn't enable clock #%d\n", i);
757 while (i--)
758 clk_disable(smmu->clocks[i]);
759 break;
760 }
761 }
762
763 if (!ret)
764 smmu->clock_refs_count++;
765
766 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
767 return ret;
768}
769
770/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
771static void arm_smmu_disable_clocks_atomic(struct arm_smmu_device *smmu)
772{
773 int i;
774 unsigned long flags;
775
776 spin_lock_irqsave(&smmu->clock_refs_lock, flags);
777 WARN_ON(smmu->clock_refs_count == 0);
778 if (smmu->clock_refs_count > 1) {
779 smmu->clock_refs_count--;
780 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
781 return;
782 }
783
Liam Mark3ddf8d12016-04-13 12:42:01 -0700784 for (i = smmu->num_clocks; i; --i)
785 clk_disable(smmu->clocks[i - 1]);
Patrick Daly8befb662016-08-17 20:03:28 -0700786
787 smmu->clock_refs_count--;
788 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
789}
790
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700791static int arm_smmu_enable_regulators(struct arm_smmu_device *smmu)
792{
793 if (!smmu->gdsc)
794 return 0;
795
796 return regulator_enable(smmu->gdsc);
797}
798
799static int arm_smmu_disable_regulators(struct arm_smmu_device *smmu)
800{
801 if (!smmu->gdsc)
802 return 0;
803
804 return regulator_disable(smmu->gdsc);
805}
806
Patrick Daly2764f952016-09-06 19:22:44 -0700807static int arm_smmu_request_bus(struct arm_smmu_device *smmu)
808{
809 if (!smmu->bus_client)
810 return 0;
811 return msm_bus_scale_update_bw(smmu->bus_client, 0, 1000);
812}
813
814static int arm_smmu_unrequest_bus(struct arm_smmu_device *smmu)
815{
816 if (!smmu->bus_client)
817 return 0;
818 return msm_bus_scale_update_bw(smmu->bus_client, 0, 0);
819}
820
821
Patrick Daly8befb662016-08-17 20:03:28 -0700822static int arm_smmu_power_on_slow(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700823{
824 int ret;
825
826 mutex_lock(&smmu->power_lock);
827 if (smmu->power_count > 0) {
828 smmu->power_count += 1;
829 mutex_unlock(&smmu->power_lock);
830 return 0;
831 }
832
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700833 ret = arm_smmu_enable_regulators(smmu);
834 if (ret)
835 goto out_unlock;
836
Patrick Daly2764f952016-09-06 19:22:44 -0700837 ret = arm_smmu_request_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700838 if (ret)
839 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700840
Patrick Daly2764f952016-09-06 19:22:44 -0700841 ret = arm_smmu_prepare_clocks(smmu);
842 if (ret)
843 goto out_disable_bus;
844
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700845 smmu->power_count += 1;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700846 mutex_unlock(&smmu->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700847 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700848
Patrick Daly2764f952016-09-06 19:22:44 -0700849out_disable_bus:
850 arm_smmu_unrequest_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700851out_disable_regulators:
852 arm_smmu_disable_regulators(smmu);
853out_unlock:
854 mutex_unlock(&smmu->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700855 return ret;
856}
857
Patrick Daly8befb662016-08-17 20:03:28 -0700858static void arm_smmu_power_off_slow(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700859{
860 mutex_lock(&smmu->power_lock);
861 smmu->power_count--;
862 WARN_ON(smmu->power_count < 0);
863
864 if (smmu->power_count > 0) {
865 mutex_unlock(&smmu->power_lock);
866 return;
867 }
868
Patrick Daly8befb662016-08-17 20:03:28 -0700869 arm_smmu_unprepare_clocks(smmu);
Patrick Daly2764f952016-09-06 19:22:44 -0700870 arm_smmu_unrequest_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700871 arm_smmu_disable_regulators(smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700872
873 mutex_unlock(&smmu->power_lock);
874}
875
Patrick Daly8befb662016-08-17 20:03:28 -0700876static int arm_smmu_power_on(struct arm_smmu_device *smmu)
877{
878 int ret;
879
880 ret = arm_smmu_power_on_slow(smmu);
881 if (ret)
882 return ret;
883
884 ret = arm_smmu_enable_clocks_atomic(smmu);
885 if (ret)
886 goto out_disable;
887
888 return 0;
889
890out_disable:
891 arm_smmu_power_off_slow(smmu);
892 return ret;
893}
894
895static void arm_smmu_power_off(struct arm_smmu_device *smmu)
896{
897 arm_smmu_disable_clocks_atomic(smmu);
898 arm_smmu_power_off_slow(smmu);
899}
900
901/*
902 * Must be used instead of arm_smmu_power_on if it may be called from
903 * atomic context
904 */
905static int arm_smmu_domain_power_on(struct iommu_domain *domain,
906 struct arm_smmu_device *smmu)
907{
908 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
909 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
910
911 if (atomic_domain)
912 return arm_smmu_enable_clocks_atomic(smmu);
913
914 return arm_smmu_power_on(smmu);
915}
916
917/*
918 * Must be used instead of arm_smmu_power_on if it may be called from
919 * atomic context
920 */
921static void arm_smmu_domain_power_off(struct iommu_domain *domain,
922 struct arm_smmu_device *smmu)
923{
924 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
925 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
926
927 if (atomic_domain) {
928 arm_smmu_disable_clocks_atomic(smmu);
929 return;
930 }
931
932 arm_smmu_power_off(smmu);
933}
934
Will Deacon45ae7cf2013-06-24 18:31:25 +0100935/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700936static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
937 int cbndx)
938{
939 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
940 u32 val;
941
942 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
943 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
944 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700945 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700946 dev_err(smmu->dev, "TLBSYNC timeout!\n");
947}
948
Will Deacon518f7132014-11-14 17:17:54 +0000949static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100950{
951 int count = 0;
952 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
953
954 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
955 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
956 & sTLBGSTATUS_GSACTIVE) {
957 cpu_relax();
958 if (++count == TLB_LOOP_TIMEOUT) {
959 dev_err_ratelimited(smmu->dev,
960 "TLB sync timed out -- SMMU may be deadlocked\n");
961 return;
962 }
963 udelay(1);
964 }
965}
966
Will Deacon518f7132014-11-14 17:17:54 +0000967static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100968{
Will Deacon518f7132014-11-14 17:17:54 +0000969 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700970 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000971}
972
Patrick Daly8befb662016-08-17 20:03:28 -0700973/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000974static void arm_smmu_tlb_inv_context(void *cookie)
975{
976 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100977 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
978 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100979 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000980 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100981
982 if (stage1) {
983 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800984 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100985 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700986 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100987 } else {
988 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800989 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100990 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700991 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100992 }
Will Deacon1463fe42013-07-31 19:21:27 +0100993}
994
Will Deacon518f7132014-11-14 17:17:54 +0000995static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000996 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000997{
998 struct arm_smmu_domain *smmu_domain = cookie;
999 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1000 struct arm_smmu_device *smmu = smmu_domain->smmu;
1001 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1002 void __iomem *reg;
1003
1004 if (stage1) {
1005 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1006 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1007
Robin Murphy7602b872016-04-28 17:12:09 +01001008 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001009 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001010 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001011 do {
1012 writel_relaxed(iova, reg);
1013 iova += granule;
1014 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001015 } else {
1016 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001017 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001018 do {
1019 writeq_relaxed(iova, reg);
1020 iova += granule >> 12;
1021 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001022 }
Will Deacon518f7132014-11-14 17:17:54 +00001023 } else if (smmu->version == ARM_SMMU_V2) {
1024 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1025 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1026 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001027 iova >>= 12;
1028 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001029 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001030 iova += granule >> 12;
1031 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001032 } else {
1033 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001034 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001035 }
1036}
1037
Patrick Dalyc11d1082016-09-01 15:52:44 -07001038static void *arm_smmu_alloc_pages_exact(void *cookie,
1039 size_t size, gfp_t gfp_mask)
1040{
1041 int ret;
1042 void *page = alloc_pages_exact(size, gfp_mask);
1043
1044 if (likely(page)) {
1045 ret = arm_smmu_prepare_pgtable(page, cookie);
1046 if (ret) {
1047 free_pages_exact(page, size);
1048 return NULL;
1049 }
1050 }
1051
1052 return page;
1053}
1054
1055static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1056{
1057 arm_smmu_unprepare_pgtable(cookie, virt, size);
1058 /* unprepare also frees (possibly later), no need to free here */
1059}
1060
Will Deacon518f7132014-11-14 17:17:54 +00001061static struct iommu_gather_ops arm_smmu_gather_ops = {
1062 .tlb_flush_all = arm_smmu_tlb_inv_context,
1063 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1064 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001065 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1066 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001067};
1068
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001069static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1070 dma_addr_t iova, u32 fsr)
1071{
1072 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1073 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1074 struct arm_smmu_device *smmu;
1075 void __iomem *cb_base;
1076 u64 sctlr, sctlr_orig;
1077 phys_addr_t phys;
1078
1079 smmu = smmu_domain->smmu;
1080 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1081
1082 arm_smmu_halt_nowait(smmu);
1083
1084 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
1085
1086 arm_smmu_wait_for_halt(smmu);
1087
1088 /* clear FSR to allow ATOS to log any faults */
1089 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
1090
1091 /* disable stall mode momentarily */
1092 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
1093 sctlr = sctlr_orig & ~SCTLR_CFCFG;
1094 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
1095
1096 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
1097
1098 if (!phys) {
1099 dev_err(smmu->dev,
1100 "ATOS failed. Will issue a TLBIALL and try again...\n");
1101 arm_smmu_tlb_inv_context(smmu_domain);
1102 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
1103 if (phys)
1104 dev_err(smmu->dev,
1105 "ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
1106 else
1107 dev_err(smmu->dev,
1108 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1109 }
1110
1111 /* restore SCTLR */
1112 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
1113
1114 arm_smmu_resume(smmu);
1115
1116 return phys;
1117}
1118
Will Deacon45ae7cf2013-06-24 18:31:25 +01001119static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1120{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001121 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001122 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001123 unsigned long iova;
1124 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001125 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001126 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1127 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001128 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001129 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001130 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001131 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001132 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001133 bool non_fatal_fault = !!(smmu_domain->attributes &
1134 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001135
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001136 static DEFINE_RATELIMIT_STATE(_rs,
1137 DEFAULT_RATELIMIT_INTERVAL,
1138 DEFAULT_RATELIMIT_BURST);
1139
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001140 ret = arm_smmu_power_on(smmu);
1141 if (ret)
1142 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001143
Shalaj Jain04059c52015-03-03 13:34:59 -08001144 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001145 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001146 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1147
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001148 if (!(fsr & FSR_FAULT)) {
1149 ret = IRQ_NONE;
1150 goto out_power_off;
1151 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001152
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001153 if (fatal_asf && (fsr & FSR_ASF)) {
1154 dev_err(smmu->dev,
1155 "Took an address size fault. Refusing to recover.\n");
1156 BUG();
1157 }
1158
Will Deacon45ae7cf2013-06-24 18:31:25 +01001159 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001160 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001161 if (fsr & FSR_TF)
1162 flags |= IOMMU_FAULT_TRANSLATION;
1163 if (fsr & FSR_PF)
1164 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001165 if (fsr & FSR_EF)
1166 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001167 if (fsr & FSR_SS)
1168 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001169
Robin Murphyf9a05f02016-04-13 18:13:01 +01001170 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001171 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001172 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1173 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001174 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1175 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001176 dev_dbg(smmu->dev,
1177 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1178 iova, fsr, fsynr, cfg->cbndx);
1179 dev_dbg(smmu->dev,
1180 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001181 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001182 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001183 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001184 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1185 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001186 if (__ratelimit(&_rs)) {
1187 dev_err(smmu->dev,
1188 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1189 iova, fsr, fsynr, cfg->cbndx);
1190 dev_err(smmu->dev, "FAR = %016lx\n",
1191 (unsigned long)iova);
1192 dev_err(smmu->dev,
1193 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1194 fsr,
1195 (fsr & 0x02) ? "TF " : "",
1196 (fsr & 0x04) ? "AFF " : "",
1197 (fsr & 0x08) ? "PF " : "",
1198 (fsr & 0x10) ? "EF " : "",
1199 (fsr & 0x20) ? "TLBMCF " : "",
1200 (fsr & 0x40) ? "TLBLKF " : "",
1201 (fsr & 0x80) ? "MHF " : "",
1202 (fsr & 0x40000000) ? "SS " : "",
1203 (fsr & 0x80000000) ? "MULTI " : "");
1204 dev_err(smmu->dev,
1205 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001206 if (!phys_soft)
1207 dev_err(smmu->dev,
1208 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1209 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001210 dev_err(smmu->dev,
1211 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1212 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1213 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001214 ret = IRQ_NONE;
1215 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001216 if (!non_fatal_fault) {
1217 dev_err(smmu->dev,
1218 "Unhandled arm-smmu context fault!\n");
1219 BUG();
1220 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001221 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001222
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001223 /*
1224 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1225 * if stalled. This is required to keep the IOMMU client stalled on
1226 * the outstanding fault. This gives the client a chance to take any
1227 * debug action and then terminate the stalled transaction.
1228 * So, the sequence in case of stall on fault should be:
1229 * 1) Do not clear FSR or write to RESUME here
1230 * 2) Client takes any debug action
1231 * 3) Client terminates the stalled transaction and resumes the IOMMU
1232 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1233 * not before so that the fault remains outstanding. This ensures
1234 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1235 * need to be terminated.
1236 */
1237 if (tmp != -EBUSY) {
1238 /* Clear the faulting FSR */
1239 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001240
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001241 /*
1242 * Barrier required to ensure that the FSR is cleared
1243 * before resuming SMMU operation
1244 */
1245 wmb();
1246
1247 /* Retry or terminate any stalled transactions */
1248 if (fsr & FSR_SS)
1249 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1250 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001251
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001252out_power_off:
1253 arm_smmu_power_off(smmu);
1254
Patrick Daly5ba28112016-08-30 19:18:52 -07001255 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001256}
1257
1258static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1259{
1260 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1261 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001262 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001263
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001264 if (arm_smmu_power_on(smmu))
1265 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001266
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1268 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1269 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1270 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1271
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001272 if (!gfsr) {
1273 arm_smmu_power_off(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001274 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001275 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001276
Will Deacon45ae7cf2013-06-24 18:31:25 +01001277 dev_err_ratelimited(smmu->dev,
1278 "Unexpected global fault, this could be serious\n");
1279 dev_err_ratelimited(smmu->dev,
1280 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1281 gfsr, gfsynr0, gfsynr1, gfsynr2);
1282
1283 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001284 arm_smmu_power_off(smmu);
Will Deaconadaba322013-07-31 19:21:26 +01001285 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001286}
1287
Will Deacon518f7132014-11-14 17:17:54 +00001288static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1289 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001290{
1291 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001292 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001293 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001294 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1295 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001296 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001297
Will Deacon45ae7cf2013-06-24 18:31:25 +01001298 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001299 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1300 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301
Will Deacon4a1c93c2015-03-04 12:21:03 +00001302 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001303 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1304 reg = CBA2R_RW64_64BIT;
1305 else
1306 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001307 /* 16-bit VMIDs live in CBA2R */
1308 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001309 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001310
Will Deacon4a1c93c2015-03-04 12:21:03 +00001311 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1312 }
1313
Will Deacon45ae7cf2013-06-24 18:31:25 +01001314 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001315 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001316 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001317 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001318
Will Deacon57ca90f2014-02-06 14:59:05 +00001319 /*
1320 * Use the weakest shareability/memory types, so they are
1321 * overridden by the ttbcr/pte.
1322 */
1323 if (stage1) {
1324 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1325 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001326 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1327 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001328 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001329 }
Will Deacon44680ee2014-06-25 11:29:12 +01001330 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331
Will Deacon518f7132014-11-14 17:17:54 +00001332 /* TTBRs */
1333 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001334 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001335
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001336 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001337 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001338
1339 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001340 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001341 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001342 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001343 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001344 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001345 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346
Will Deacon518f7132014-11-14 17:17:54 +00001347 /* TTBCR */
1348 if (stage1) {
1349 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1350 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
1351 if (smmu->version > ARM_SMMU_V1) {
1352 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +01001353 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +00001354 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001355 }
1356 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001357 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1358 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001359 }
1360
Will Deacon518f7132014-11-14 17:17:54 +00001361 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001362 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +00001363 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +00001365 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1366 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001367 }
1368
Will Deacon45ae7cf2013-06-24 18:31:25 +01001369 /* SCTLR */
Patrick Dalye62d3362016-03-15 18:58:28 -07001370 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_EAE_SBOP;
1371
1372 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1373 !stage1)
1374 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001375 if (stage1)
1376 reg |= SCTLR_S1_ASIDPNE;
1377#ifdef __BIG_ENDIAN
1378 reg |= SCTLR_E;
1379#endif
Will Deacon25724842013-08-21 13:49:53 +01001380 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001381}
1382
Patrick Dalyc190d932016-08-30 17:23:28 -07001383static int arm_smmu_init_asid(struct iommu_domain *domain,
1384 struct arm_smmu_device *smmu)
1385{
1386 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1387 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1388 bool dynamic = is_dynamic_domain(domain);
1389 int ret;
1390
1391 if (!dynamic) {
1392 cfg->asid = cfg->cbndx + 1;
1393 } else {
1394 mutex_lock(&smmu->idr_mutex);
1395 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1396 smmu->num_context_banks + 2,
1397 MAX_ASID + 1, GFP_KERNEL);
1398
1399 mutex_unlock(&smmu->idr_mutex);
1400 if (ret < 0) {
1401 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1402 ret);
1403 return ret;
1404 }
1405 cfg->asid = ret;
1406 }
1407 return 0;
1408}
1409
1410static void arm_smmu_free_asid(struct iommu_domain *domain)
1411{
1412 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1413 struct arm_smmu_device *smmu = smmu_domain->smmu;
1414 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1415 bool dynamic = is_dynamic_domain(domain);
1416
1417 if (cfg->asid == INVALID_ASID || !dynamic)
1418 return;
1419
1420 mutex_lock(&smmu->idr_mutex);
1421 idr_remove(&smmu->asid_idr, cfg->asid);
1422 mutex_unlock(&smmu->idr_mutex);
1423}
1424
Will Deacon45ae7cf2013-06-24 18:31:25 +01001425static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001426 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001427{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001428 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001429 unsigned long ias, oas;
1430 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001431 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001432 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001433 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Patrick Dalyc190d932016-08-30 17:23:28 -07001434 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001435
Will Deacon518f7132014-11-14 17:17:54 +00001436 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001437 if (smmu_domain->smmu)
1438 goto out_unlock;
1439
Patrick Dalyc190d932016-08-30 17:23:28 -07001440 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1441 smmu_domain->cfg.asid = INVALID_ASID;
1442
Robin Murphy98006992016-04-20 14:53:33 +01001443 /* We're bypassing these SIDs, so don't allocate an actual context */
1444 if (domain->type == IOMMU_DOMAIN_DMA) {
1445 smmu_domain->smmu = smmu;
1446 goto out_unlock;
1447 }
1448
Patrick Dalyc190d932016-08-30 17:23:28 -07001449 dynamic = is_dynamic_domain(domain);
1450 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1451 dev_err(smmu->dev, "dynamic domains not supported\n");
1452 ret = -EPERM;
1453 goto out_unlock;
1454 }
1455
Will Deaconc752ce42014-06-25 22:46:31 +01001456 /*
1457 * Mapping the requested stage onto what we support is surprisingly
1458 * complicated, mainly because the spec allows S1+S2 SMMUs without
1459 * support for nested translation. That means we end up with the
1460 * following table:
1461 *
1462 * Requested Supported Actual
1463 * S1 N S1
1464 * S1 S1+S2 S1
1465 * S1 S2 S2
1466 * S1 S1 S1
1467 * N N N
1468 * N S1+S2 S2
1469 * N S2 S2
1470 * N S1 S1
1471 *
1472 * Note that you can't actually request stage-2 mappings.
1473 */
1474 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1475 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1476 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1477 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1478
Robin Murphy7602b872016-04-28 17:12:09 +01001479 /*
1480 * Choosing a suitable context format is even more fiddly. Until we
1481 * grow some way for the caller to express a preference, and/or move
1482 * the decision into the io-pgtable code where it arguably belongs,
1483 * just aim for the closest thing to the rest of the system, and hope
1484 * that the hardware isn't esoteric enough that we can't assume AArch64
1485 * support to be a superset of AArch32 support...
1486 */
1487 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1488 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1489 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1490 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1491 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1492 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1493 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1494
1495 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1496 ret = -EINVAL;
1497 goto out_unlock;
1498 }
1499
Will Deaconc752ce42014-06-25 22:46:31 +01001500 switch (smmu_domain->stage) {
1501 case ARM_SMMU_DOMAIN_S1:
1502 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1503 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001504 ias = smmu->va_size;
1505 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001506 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001507 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001508 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001509 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001510 ias = min(ias, 32UL);
1511 oas = min(oas, 40UL);
1512 }
Will Deaconc752ce42014-06-25 22:46:31 +01001513 break;
1514 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001515 /*
1516 * We will likely want to change this if/when KVM gets
1517 * involved.
1518 */
Will Deaconc752ce42014-06-25 22:46:31 +01001519 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001520 cfg->cbar = CBAR_TYPE_S2_TRANS;
1521 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001522 ias = smmu->ipa_size;
1523 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001524 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001525 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001526 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001527 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001528 ias = min(ias, 40UL);
1529 oas = min(oas, 40UL);
1530 }
Will Deaconc752ce42014-06-25 22:46:31 +01001531 break;
1532 default:
1533 ret = -EINVAL;
1534 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001535 }
1536
Patrick Dalyc190d932016-08-30 17:23:28 -07001537 /* Dynamic domains must set cbndx through domain attribute */
1538 if (!dynamic) {
1539 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001540 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001541 if (ret < 0)
1542 goto out_unlock;
1543 cfg->cbndx = ret;
1544 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001545 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001546 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1547 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001548 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001549 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001550 }
1551
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001552 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001553 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001554 .ias = ias,
1555 .oas = oas,
1556 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001557 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001558 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001559
Will Deacon518f7132014-11-14 17:17:54 +00001560 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001561 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1562 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001563 if (!pgtbl_ops) {
1564 ret = -ENOMEM;
1565 goto out_clear_smmu;
1566 }
1567
Patrick Dalyc11d1082016-09-01 15:52:44 -07001568 /*
1569 * assign any page table memory that might have been allocated
1570 * during alloc_io_pgtable_ops
1571 */
Patrick Dalye271f212016-10-04 13:24:49 -07001572 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001573 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001574 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001575
Robin Murphyd5466352016-05-09 17:20:09 +01001576 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001577 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001578
Patrick Dalyc190d932016-08-30 17:23:28 -07001579 /* Assign an asid */
1580 ret = arm_smmu_init_asid(domain, smmu);
1581 if (ret)
1582 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001583
Patrick Dalyc190d932016-08-30 17:23:28 -07001584 if (!dynamic) {
1585 /* Initialise the context bank with our page table cfg */
1586 arm_smmu_init_context_bank(smmu_domain,
1587 &smmu_domain->pgtbl_cfg);
1588
1589 /*
1590 * Request context fault interrupt. Do this last to avoid the
1591 * handler seeing a half-initialised domain state.
1592 */
1593 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1594 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001595 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1596 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001597 if (ret < 0) {
1598 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1599 cfg->irptndx, irq);
1600 cfg->irptndx = INVALID_IRPTNDX;
1601 goto out_clear_smmu;
1602 }
1603 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001604 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001605 }
Will Deacon518f7132014-11-14 17:17:54 +00001606 mutex_unlock(&smmu_domain->init_mutex);
1607
1608 /* Publish page table ops for map/unmap */
1609 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001610 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001611
Will Deacon518f7132014-11-14 17:17:54 +00001612out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001613 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001614 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001615out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001616 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001617 return ret;
1618}
1619
1620static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1621{
Joerg Roedel1d672632015-03-26 13:43:10 +01001622 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001623 struct arm_smmu_device *smmu = smmu_domain->smmu;
1624 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001625 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001626 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001627 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001628 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001629
Robin Murphy98006992016-04-20 14:53:33 +01001630 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001631 return;
1632
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001633 ret = arm_smmu_power_on(smmu);
1634 if (ret) {
1635 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1636 smmu);
1637 return;
1638 }
1639
Patrick Dalyc190d932016-08-30 17:23:28 -07001640 dynamic = is_dynamic_domain(domain);
1641 if (dynamic) {
1642 arm_smmu_free_asid(domain);
1643 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001644 arm_smmu_power_off(smmu);
Patrick Dalye271f212016-10-04 13:24:49 -07001645 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001646 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001647 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001648 return;
1649 }
1650
Will Deacon518f7132014-11-14 17:17:54 +00001651 /*
1652 * Disable the context bank and free the page tables before freeing
1653 * it.
1654 */
Will Deacon44680ee2014-06-25 11:29:12 +01001655 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001656 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001657
Will Deacon44680ee2014-06-25 11:29:12 +01001658 if (cfg->irptndx != INVALID_IRPTNDX) {
1659 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001660 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001661 }
1662
Markus Elfring44830b02015-11-06 18:32:41 +01001663 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001664 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001665 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001666 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001667 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001668
1669 arm_smmu_power_off(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001670}
1671
Joerg Roedel1d672632015-03-26 13:43:10 +01001672static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001673{
1674 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001675
Patrick Daly09801312016-08-29 17:02:52 -07001676 /* Do not support DOMAIN_DMA for now */
1677 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001678 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001679 /*
1680 * Allocate the domain and initialise some of its data structures.
1681 * We can't really do anything meaningful until we've added a
1682 * master.
1683 */
1684 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1685 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001686 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001687
Robin Murphy9adb9592016-01-26 18:06:36 +00001688 if (type == IOMMU_DOMAIN_DMA &&
1689 iommu_get_dma_cookie(&smmu_domain->domain)) {
1690 kfree(smmu_domain);
1691 return NULL;
1692 }
1693
Will Deacon518f7132014-11-14 17:17:54 +00001694 mutex_init(&smmu_domain->init_mutex);
1695 spin_lock_init(&smmu_domain->pgtbl_lock);
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06001696 smmu_domain->cfg.cbndx = INVALID_CBNDX;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001697 smmu_domain->secure_vmid = VMID_INVAL;
1698 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1699 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001700 mutex_init(&smmu_domain->assign_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001701
1702 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001703}
1704
Joerg Roedel1d672632015-03-26 13:43:10 +01001705static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001706{
Joerg Roedel1d672632015-03-26 13:43:10 +01001707 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001708
1709 /*
1710 * Free the domain resources. We assume that all devices have
1711 * already been detached.
1712 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001713 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001714 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001715 kfree(smmu_domain);
1716}
1717
1718static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001719 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001720{
1721 int i;
1722 struct arm_smmu_smr *smrs;
1723 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1724
1725 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1726 return 0;
1727
Will Deacona9a1b0b2014-05-01 18:05:08 +01001728 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001729 return -EEXIST;
1730
Mitchel Humpherys29073202014-07-08 09:52:18 -07001731 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001732 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001733 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1734 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001735 return -ENOMEM;
1736 }
1737
Will Deacon44680ee2014-06-25 11:29:12 +01001738 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001739 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001740 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1741 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001742 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001743 dev_err(smmu->dev, "failed to allocate free SMR\n");
1744 goto err_free_smrs;
1745 }
1746
1747 smrs[i] = (struct arm_smmu_smr) {
1748 .idx = idx,
1749 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001750 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001751 };
1752 }
1753
1754 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001755 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001756 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1757 smrs[i].mask << SMR_MASK_SHIFT;
1758 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1759 }
1760
Will Deacona9a1b0b2014-05-01 18:05:08 +01001761 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001762 return 0;
1763
1764err_free_smrs:
1765 while (--i >= 0)
1766 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1767 kfree(smrs);
1768 return -ENOSPC;
1769}
1770
1771static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001772 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001773{
1774 int i;
1775 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001776 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001777
Will Deacon43b412b2014-07-15 11:22:24 +01001778 if (!smrs)
1779 return;
1780
Will Deacon45ae7cf2013-06-24 18:31:25 +01001781 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001782 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001783 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001784
Will Deacon45ae7cf2013-06-24 18:31:25 +01001785 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1786 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1787 }
1788
Will Deacona9a1b0b2014-05-01 18:05:08 +01001789 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001790 kfree(smrs);
1791}
1792
Will Deacon45ae7cf2013-06-24 18:31:25 +01001793static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001794 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001795{
1796 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001797 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001798 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1799
Will Deacon5f634952016-04-20 14:53:32 +01001800 /*
1801 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1802 * for all devices behind the SMMU. Note that we need to take
1803 * care configuring SMRs for devices both a platform_device and
1804 * and a PCI device (i.e. a PCI host controller)
1805 */
1806 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1807 return 0;
1808
Will Deacon8f68f8e2014-07-15 11:27:08 +01001809 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001810 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001811 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001812 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001813
Will Deacona9a1b0b2014-05-01 18:05:08 +01001814 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001815 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001816
Will Deacona9a1b0b2014-05-01 18:05:08 +01001817 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Patrick Dalyf4930442016-06-27 20:50:14 -07001818 s2cr = S2CR_TYPE_TRANS |
Will Deacon44680ee2014-06-25 11:29:12 +01001819 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001820 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1821 }
1822
1823 return 0;
1824}
1825
1826static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001827 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001828{
Will Deacon43b412b2014-07-15 11:22:24 +01001829 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001830 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001831 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001832
Will Deacon8f68f8e2014-07-15 11:27:08 +01001833 /* An IOMMU group is torn down by the first device to be removed */
1834 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1835 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001836
1837 /*
1838 * We *must* clear the S2CR first, because freeing the SMR means
1839 * that it can be re-allocated immediately.
1840 */
Will Deacon43b412b2014-07-15 11:22:24 +01001841 for (i = 0; i < cfg->num_streamids; ++i) {
1842 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001843 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001844
Robin Murphy25a1c962016-02-10 14:25:33 +00001845 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001846 }
1847
Will Deacona9a1b0b2014-05-01 18:05:08 +01001848 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001849}
1850
Patrick Daly09801312016-08-29 17:02:52 -07001851static void arm_smmu_detach_dev(struct iommu_domain *domain,
1852 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001853{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001854 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001855 struct arm_smmu_device *smmu = smmu_domain->smmu;
1856 struct arm_smmu_master_cfg *cfg;
1857 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07001858 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07001859
1860 if (dynamic)
1861 return;
1862
1863 cfg = find_smmu_master_cfg(dev);
1864 if (!cfg)
1865 return;
1866
1867 if (!smmu) {
1868 dev_err(dev, "Domain not attached; cannot detach!\n");
1869 return;
1870 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001871
1872 dev->archdata.iommu = NULL;
1873 arm_smmu_domain_remove_master(smmu_domain, cfg);
Patrick Daly8befb662016-08-17 20:03:28 -07001874
1875 /* Remove additional vote for atomic power */
1876 if (atomic_domain) {
1877 WARN_ON(arm_smmu_enable_clocks_atomic(smmu));
1878 arm_smmu_power_off(smmu);
1879 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001880}
1881
Patrick Dalye271f212016-10-04 13:24:49 -07001882static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07001883{
Patrick Dalye271f212016-10-04 13:24:49 -07001884 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001885 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
1886 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
1887 int source_vmid = VMID_HLOS;
1888 struct arm_smmu_pte_info *pte_info, *temp;
1889
Patrick Dalye271f212016-10-04 13:24:49 -07001890 if (!arm_smmu_is_domain_secure(smmu_domain))
1891 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001892
Patrick Dalye271f212016-10-04 13:24:49 -07001893 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001894 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
1895 PAGE_SIZE, &source_vmid, 1,
1896 dest_vmids, dest_perms, 2);
1897 if (WARN_ON(ret))
1898 break;
1899 }
1900
1901 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
1902 entry) {
1903 list_del(&pte_info->entry);
1904 kfree(pte_info);
1905 }
Patrick Dalye271f212016-10-04 13:24:49 -07001906 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001907}
1908
1909static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
1910{
1911 int ret;
1912 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07001913 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001914 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
1915 struct arm_smmu_pte_info *pte_info, *temp;
1916
Patrick Dalye271f212016-10-04 13:24:49 -07001917 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07001918 return;
1919
1920 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
1921 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
1922 PAGE_SIZE, source_vmlist, 2,
1923 &dest_vmids, &dest_perms, 1);
1924 if (WARN_ON(ret))
1925 break;
1926 free_pages_exact(pte_info->virt_addr, pte_info->size);
1927 }
1928
1929 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
1930 entry) {
1931 list_del(&pte_info->entry);
1932 kfree(pte_info);
1933 }
1934}
1935
1936static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
1937{
1938 struct arm_smmu_domain *smmu_domain = cookie;
1939 struct arm_smmu_pte_info *pte_info;
1940
Patrick Dalye271f212016-10-04 13:24:49 -07001941 if (!arm_smmu_is_domain_secure(smmu_domain)) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001942 free_pages_exact(addr, size);
1943 return;
1944 }
1945
1946 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
1947 if (!pte_info)
1948 return;
1949
1950 pte_info->virt_addr = addr;
1951 pte_info->size = size;
1952 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
1953}
1954
1955static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
1956{
1957 struct arm_smmu_domain *smmu_domain = cookie;
1958 struct arm_smmu_pte_info *pte_info;
1959
Patrick Dalye271f212016-10-04 13:24:49 -07001960 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07001961 return -EINVAL;
1962
1963 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
1964 if (!pte_info)
1965 return -ENOMEM;
1966 pte_info->virt_addr = addr;
1967 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
1968 return 0;
1969}
1970
Will Deacon45ae7cf2013-06-24 18:31:25 +01001971static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1972{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001973 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001974 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001975 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001976 struct arm_smmu_master_cfg *cfg;
Patrick Daly8befb662016-08-17 20:03:28 -07001977 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001978
Will Deacon8f68f8e2014-07-15 11:27:08 +01001979 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001980 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001981 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1982 return -ENXIO;
1983 }
1984
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001985 /* Enable Clocks and Power */
1986 ret = arm_smmu_power_on(smmu);
1987 if (ret)
1988 return ret;
1989
Patrick Daly8befb662016-08-17 20:03:28 -07001990 /*
1991 * Keep an additional vote for non-atomic power until domain is
1992 * detached
1993 */
1994 if (atomic_domain) {
1995 ret = arm_smmu_power_on(smmu);
1996 if (ret)
1997 goto out_power_off;
1998
1999 arm_smmu_disable_clocks_atomic(smmu);
2000 }
2001
Will Deacon518f7132014-11-14 17:17:54 +00002002 /* Ensure that the domain is finalised */
2003 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002004 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002005 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002006
Patrick Dalyc190d932016-08-30 17:23:28 -07002007 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002008 if (is_dynamic_domain(domain)) {
2009 ret = 0;
2010 goto out_power_off;
2011 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002012
Will Deacon45ae7cf2013-06-24 18:31:25 +01002013 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002014 * Sanity check the domain. We don't support domains across
2015 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002016 */
Will Deacon518f7132014-11-14 17:17:54 +00002017 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018 dev_err(dev,
2019 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002020 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002021 ret = -EINVAL;
2022 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002023 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002024
2025 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01002026 cfg = find_smmu_master_cfg(dev);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002027 if (!cfg) {
2028 ret = -ENODEV;
2029 goto out_power_off;
2030 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002031
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002032 /* Detach the dev from its current domain */
2033 if (dev->archdata.iommu)
Patrick Daly09801312016-08-29 17:02:52 -07002034 arm_smmu_detach_dev(dev->archdata.iommu, dev);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002035
Will Deacon844e35b2014-07-17 11:23:51 +01002036 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
2037 if (!ret)
2038 dev->archdata.iommu = domain;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002039
2040out_power_off:
2041 arm_smmu_power_off(smmu);
2042
Will Deacon45ae7cf2013-06-24 18:31:25 +01002043 return ret;
2044}
2045
Will Deacon45ae7cf2013-06-24 18:31:25 +01002046static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002047 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002048{
Will Deacon518f7132014-11-14 17:17:54 +00002049 int ret;
2050 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002051 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002052 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002053
Will Deacon518f7132014-11-14 17:17:54 +00002054 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002055 return -ENODEV;
2056
Patrick Dalye271f212016-10-04 13:24:49 -07002057 arm_smmu_secure_domain_lock(smmu_domain);
2058
Will Deacon518f7132014-11-14 17:17:54 +00002059 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2060 ret = ops->map(ops, iova, paddr, size, prot);
2061 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002062
2063 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002064 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002065
Will Deacon518f7132014-11-14 17:17:54 +00002066 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002067}
2068
2069static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2070 size_t size)
2071{
Will Deacon518f7132014-11-14 17:17:54 +00002072 size_t ret;
2073 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002074 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002075 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002076
Will Deacon518f7132014-11-14 17:17:54 +00002077 if (!ops)
2078 return 0;
2079
Patrick Daly8befb662016-08-17 20:03:28 -07002080 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002081 if (ret)
2082 return ret;
2083
Patrick Dalye271f212016-10-04 13:24:49 -07002084 arm_smmu_secure_domain_lock(smmu_domain);
2085
Will Deacon518f7132014-11-14 17:17:54 +00002086 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2087 ret = ops->unmap(ops, iova, size);
2088 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002089
Patrick Daly8befb662016-08-17 20:03:28 -07002090 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002091 /*
2092 * While splitting up block mappings, we might allocate page table
2093 * memory during unmap, so the vmids needs to be assigned to the
2094 * memory here as well.
2095 */
2096 arm_smmu_assign_table(smmu_domain);
2097 /* Also unassign any pages that were free'd during unmap */
2098 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002099 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002100 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002101}
2102
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002103static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2104 struct scatterlist *sg, unsigned int nents, int prot)
2105{
2106 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002107 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002108 unsigned long flags;
2109 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2110 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2111
2112 if (!ops)
2113 return -ENODEV;
2114
Patrick Daly8befb662016-08-17 20:03:28 -07002115 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002116 if (ret)
2117 return ret;
2118
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002119 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002120 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002121 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002122
2123 if (!ret)
2124 arm_smmu_unmap(domain, iova, size);
2125
Patrick Daly8befb662016-08-17 20:03:28 -07002126 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002127 arm_smmu_assign_table(smmu_domain);
2128
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002129 return ret;
2130}
2131
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002132static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002133 dma_addr_t iova, bool do_halt)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002134{
Joerg Roedel1d672632015-03-26 13:43:10 +01002135 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002136 struct arm_smmu_device *smmu = smmu_domain->smmu;
2137 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2138 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2139 struct device *dev = smmu->dev;
2140 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002141 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002142 u32 tmp;
2143 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002144 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002145
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002146 spin_lock_irqsave(&smmu->atos_lock, flags);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002147 if (do_halt && arm_smmu_halt(smmu)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002148 phys = 0;
2149 goto out_unlock;
2150 }
2151
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002152 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2153
Robin Murphy661d9622015-05-27 17:09:34 +01002154 /* ATS1 registers can only be written atomically */
2155 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002156 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002157 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2158 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002159 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002160
2161 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2162 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002163 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002164 dev_err(dev,
2165 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2166 &iova, &phys);
2167 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002168 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002169 }
2170
Robin Murphyf9a05f02016-04-13 18:13:01 +01002171 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002172 if (phys & CB_PAR_F) {
2173 dev_err(dev, "translation fault!\n");
2174 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002175 phys = 0;
2176 } else {
2177 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002178 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002179out_resume:
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002180 if (do_halt)
2181 arm_smmu_resume(smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002182out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002183 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002184 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002185}
2186
Will Deacon45ae7cf2013-06-24 18:31:25 +01002187static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002188 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002189{
Will Deacon518f7132014-11-14 17:17:54 +00002190 phys_addr_t ret;
2191 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002192 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002193 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002194
Will Deacon518f7132014-11-14 17:17:54 +00002195 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002196 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002197
Will Deacon518f7132014-11-14 17:17:54 +00002198 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002199 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002200 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002201
Will Deacon518f7132014-11-14 17:17:54 +00002202 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002203}
2204
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002205/*
2206 * This function can sleep, and cannot be called from atomic context. Will
2207 * power on register block if required. This restriction does not apply to the
2208 * original iova_to_phys() op.
2209 */
2210static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2211 dma_addr_t iova)
2212{
2213 phys_addr_t ret = 0;
2214 unsigned long flags;
2215 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002216 int err;
2217
2218 err = arm_smmu_power_on(smmu_domain->smmu);
2219 if (err)
2220 return 0;
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002221
2222 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2223 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2224 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002225 ret = __arm_smmu_iova_to_phys_hard(domain, iova, true);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002226
2227 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2228
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002229 arm_smmu_power_off(smmu_domain->smmu);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002230 return ret;
2231}
2232
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002233static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
2234 struct iommu_domain *domain, dma_addr_t iova)
2235{
2236 return __arm_smmu_iova_to_phys_hard(domain, iova, false);
2237}
2238
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002239static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002240{
Will Deacond0948942014-06-24 17:30:10 +01002241 switch (cap) {
2242 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002243 /*
2244 * Return true here as the SMMU can always send out coherent
2245 * requests.
2246 */
2247 return true;
Will Deacond0948942014-06-24 17:30:10 +01002248 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002249 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002250 case IOMMU_CAP_NOEXEC:
2251 return true;
Will Deacond0948942014-06-24 17:30:10 +01002252 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002253 return false;
Will Deacond0948942014-06-24 17:30:10 +01002254 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002255}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002256
Will Deacona9a1b0b2014-05-01 18:05:08 +01002257static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
2258{
2259 *((u16 *)data) = alias;
2260 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002261}
2262
Will Deacon8f68f8e2014-07-15 11:27:08 +01002263static void __arm_smmu_release_pci_iommudata(void *data)
2264{
2265 kfree(data);
2266}
2267
Joerg Roedelaf659932015-10-21 23:51:41 +02002268static int arm_smmu_init_pci_device(struct pci_dev *pdev,
2269 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002270{
Will Deacon03edb222015-01-19 14:27:33 +00002271 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02002272 u16 sid;
2273 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002274
Will Deacon03edb222015-01-19 14:27:33 +00002275 cfg = iommu_group_get_iommudata(group);
2276 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01002277 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002278 if (!cfg)
2279 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002280
Will Deacon03edb222015-01-19 14:27:33 +00002281 iommu_group_set_iommudata(group, cfg,
2282 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01002283 }
2284
Joerg Roedelaf659932015-10-21 23:51:41 +02002285 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
2286 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002287
Will Deacon03edb222015-01-19 14:27:33 +00002288 /*
2289 * Assume Stream ID == Requester ID for now.
2290 * We need a way to describe the ID mappings in FDT.
2291 */
2292 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
2293 for (i = 0; i < cfg->num_streamids; ++i)
2294 if (cfg->streamids[i] == sid)
2295 break;
2296
2297 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
2298 if (i == cfg->num_streamids)
2299 cfg->streamids[cfg->num_streamids++] = sid;
2300
2301 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002302}
2303
Joerg Roedelaf659932015-10-21 23:51:41 +02002304static int arm_smmu_init_platform_device(struct device *dev,
2305 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00002306{
Will Deacon03edb222015-01-19 14:27:33 +00002307 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002308 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00002309
2310 if (!smmu)
2311 return -ENODEV;
2312
2313 master = find_smmu_master(smmu, dev->of_node);
2314 if (!master)
2315 return -ENODEV;
2316
Will Deacon03edb222015-01-19 14:27:33 +00002317 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002318
2319 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002320}
2321
2322static int arm_smmu_add_device(struct device *dev)
2323{
Joerg Roedelaf659932015-10-21 23:51:41 +02002324 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00002325
Joerg Roedelaf659932015-10-21 23:51:41 +02002326 group = iommu_group_get_for_dev(dev);
2327 if (IS_ERR(group))
2328 return PTR_ERR(group);
2329
Peng Fan9a4a9d82015-11-20 16:56:18 +08002330 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02002331 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002332}
2333
Will Deacon45ae7cf2013-06-24 18:31:25 +01002334static void arm_smmu_remove_device(struct device *dev)
2335{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002336 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002337}
2338
Joerg Roedelaf659932015-10-21 23:51:41 +02002339static struct iommu_group *arm_smmu_device_group(struct device *dev)
2340{
2341 struct iommu_group *group;
2342 int ret;
2343
2344 if (dev_is_pci(dev))
2345 group = pci_device_group(dev);
2346 else
2347 group = generic_device_group(dev);
2348
2349 if (IS_ERR(group))
2350 return group;
2351
2352 if (dev_is_pci(dev))
2353 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
2354 else
2355 ret = arm_smmu_init_platform_device(dev, group);
2356
2357 if (ret) {
2358 iommu_group_put(group);
2359 group = ERR_PTR(ret);
2360 }
2361
2362 return group;
2363}
2364
Will Deaconc752ce42014-06-25 22:46:31 +01002365static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2366 enum iommu_attr attr, void *data)
2367{
Joerg Roedel1d672632015-03-26 13:43:10 +01002368 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002369 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002370
2371 switch (attr) {
2372 case DOMAIN_ATTR_NESTING:
2373 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2374 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002375 case DOMAIN_ATTR_PT_BASE_ADDR:
2376 *((phys_addr_t *)data) =
2377 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2378 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002379 case DOMAIN_ATTR_CONTEXT_BANK:
2380 /* context bank index isn't valid until we are attached */
2381 if (smmu_domain->smmu == NULL)
2382 return -ENODEV;
2383
2384 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2385 ret = 0;
2386 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002387 case DOMAIN_ATTR_TTBR0: {
2388 u64 val;
2389 struct arm_smmu_device *smmu = smmu_domain->smmu;
2390 /* not valid until we are attached */
2391 if (smmu == NULL)
2392 return -ENODEV;
2393
2394 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2395 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2396 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2397 << (TTBRn_ASID_SHIFT);
2398 *((u64 *)data) = val;
2399 ret = 0;
2400 break;
2401 }
2402 case DOMAIN_ATTR_CONTEXTIDR:
2403 /* not valid until attached */
2404 if (smmu_domain->smmu == NULL)
2405 return -ENODEV;
2406 *((u32 *)data) = smmu_domain->cfg.procid;
2407 ret = 0;
2408 break;
2409 case DOMAIN_ATTR_PROCID:
2410 *((u32 *)data) = smmu_domain->cfg.procid;
2411 ret = 0;
2412 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002413 case DOMAIN_ATTR_DYNAMIC:
2414 *((int *)data) = !!(smmu_domain->attributes
2415 & (1 << DOMAIN_ATTR_DYNAMIC));
2416 ret = 0;
2417 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002418 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2419 *((int *)data) = !!(smmu_domain->attributes
2420 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2421 ret = 0;
2422 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002423 case DOMAIN_ATTR_S1_BYPASS:
2424 *((int *)data) = !!(smmu_domain->attributes
2425 & (1 << DOMAIN_ATTR_S1_BYPASS));
2426 ret = 0;
2427 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002428 case DOMAIN_ATTR_SECURE_VMID:
2429 *((int *)data) = smmu_domain->secure_vmid;
2430 ret = 0;
2431 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002432 default:
2433 return -ENODEV;
2434 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002435 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002436}
2437
2438static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2439 enum iommu_attr attr, void *data)
2440{
Will Deacon518f7132014-11-14 17:17:54 +00002441 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002442 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002443
Will Deacon518f7132014-11-14 17:17:54 +00002444 mutex_lock(&smmu_domain->init_mutex);
2445
Will Deaconc752ce42014-06-25 22:46:31 +01002446 switch (attr) {
2447 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002448 if (smmu_domain->smmu) {
2449 ret = -EPERM;
2450 goto out_unlock;
2451 }
2452
Will Deaconc752ce42014-06-25 22:46:31 +01002453 if (*(int *)data)
2454 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2455 else
2456 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2457
Will Deacon518f7132014-11-14 17:17:54 +00002458 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002459 case DOMAIN_ATTR_PROCID:
2460 if (smmu_domain->smmu != NULL) {
2461 dev_err(smmu_domain->smmu->dev,
2462 "cannot change procid attribute while attached\n");
2463 ret = -EBUSY;
2464 break;
2465 }
2466 smmu_domain->cfg.procid = *((u32 *)data);
2467 ret = 0;
2468 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002469 case DOMAIN_ATTR_DYNAMIC: {
2470 int dynamic = *((int *)data);
2471
2472 if (smmu_domain->smmu != NULL) {
2473 dev_err(smmu_domain->smmu->dev,
2474 "cannot change dynamic attribute while attached\n");
2475 ret = -EBUSY;
2476 break;
2477 }
2478
2479 if (dynamic)
2480 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2481 else
2482 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2483 ret = 0;
2484 break;
2485 }
2486 case DOMAIN_ATTR_CONTEXT_BANK:
2487 /* context bank can't be set while attached */
2488 if (smmu_domain->smmu != NULL) {
2489 ret = -EBUSY;
2490 break;
2491 }
2492 /* ... and it can only be set for dynamic contexts. */
2493 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2494 ret = -EINVAL;
2495 break;
2496 }
2497
2498 /* this will be validated during attach */
2499 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2500 ret = 0;
2501 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002502 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2503 u32 non_fatal_faults = *((int *)data);
2504
2505 if (non_fatal_faults)
2506 smmu_domain->attributes |=
2507 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2508 else
2509 smmu_domain->attributes &=
2510 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2511 ret = 0;
2512 break;
2513 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002514 case DOMAIN_ATTR_S1_BYPASS: {
2515 int bypass = *((int *)data);
2516
2517 /* bypass can't be changed while attached */
2518 if (smmu_domain->smmu != NULL) {
2519 ret = -EBUSY;
2520 break;
2521 }
2522 if (bypass)
2523 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2524 else
2525 smmu_domain->attributes &=
2526 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2527
2528 ret = 0;
2529 break;
2530 }
Patrick Daly8befb662016-08-17 20:03:28 -07002531 case DOMAIN_ATTR_ATOMIC:
2532 {
2533 int atomic_ctx = *((int *)data);
2534
2535 /* can't be changed while attached */
2536 if (smmu_domain->smmu != NULL) {
2537 ret = -EBUSY;
2538 break;
2539 }
2540 if (atomic_ctx)
2541 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2542 else
2543 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2544 break;
2545 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002546 case DOMAIN_ATTR_SECURE_VMID:
2547 if (smmu_domain->secure_vmid != VMID_INVAL) {
2548 ret = -ENODEV;
2549 WARN(1, "secure vmid already set!");
2550 break;
2551 }
2552 smmu_domain->secure_vmid = *((int *)data);
2553 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002554 default:
Will Deacon518f7132014-11-14 17:17:54 +00002555 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002556 }
Will Deacon518f7132014-11-14 17:17:54 +00002557
2558out_unlock:
2559 mutex_unlock(&smmu_domain->init_mutex);
2560 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002561}
2562
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002563static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2564 unsigned long flags)
2565{
2566 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2567 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2568 struct arm_smmu_device *smmu;
2569 void __iomem *cb_base;
2570
2571 if (!smmu_domain->smmu) {
2572 pr_err("Can't trigger faults on non-attached domains\n");
2573 return;
2574 }
2575
2576 smmu = smmu_domain->smmu;
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002577 if (arm_smmu_power_on(smmu))
2578 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002579
2580 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2581 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2582 flags, cfg->cbndx);
2583 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002584 /* give the interrupt time to fire... */
2585 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002586
2587 arm_smmu_power_off(smmu);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002588}
2589
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002590static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2591 unsigned long offset)
2592{
2593 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2594 struct arm_smmu_device *smmu;
2595 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2596 void __iomem *cb_base;
2597 unsigned long val;
2598
2599 if (offset >= SZ_4K) {
2600 pr_err("Invalid offset: 0x%lx\n", offset);
2601 return 0;
2602 }
2603
2604 smmu = smmu_domain->smmu;
2605 if (!smmu) {
2606 WARN(1, "Can't read registers of a detached domain\n");
2607 val = 0;
2608 return val;
2609 }
2610
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002611 if (arm_smmu_power_on(smmu))
2612 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002613
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002614 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2615 val = readl_relaxed(cb_base + offset);
2616
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002617 arm_smmu_power_off(smmu);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002618 return val;
2619}
2620
2621static void arm_smmu_reg_write(struct iommu_domain *domain,
2622 unsigned long offset, unsigned long val)
2623{
2624 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2625 struct arm_smmu_device *smmu;
2626 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2627 void __iomem *cb_base;
2628
2629 if (offset >= SZ_4K) {
2630 pr_err("Invalid offset: 0x%lx\n", offset);
2631 return;
2632 }
2633
2634 smmu = smmu_domain->smmu;
2635 if (!smmu) {
2636 WARN(1, "Can't read registers of a detached domain\n");
2637 return;
2638 }
2639
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002640 if (arm_smmu_power_on(smmu))
2641 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002642
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002643 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2644 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002645
2646 arm_smmu_power_off(smmu);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002647}
2648
Will Deacon518f7132014-11-14 17:17:54 +00002649static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002650 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002651 .domain_alloc = arm_smmu_domain_alloc,
2652 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002653 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002654 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002655 .map = arm_smmu_map,
2656 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002657 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002658 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002659 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002660 .add_device = arm_smmu_add_device,
2661 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002662 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002663 .domain_get_attr = arm_smmu_domain_get_attr,
2664 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00002665 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002666 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002667 .reg_read = arm_smmu_reg_read,
2668 .reg_write = arm_smmu_reg_write,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002669};
2670
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002671static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002672{
2673 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002674 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002675
2676 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2677 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2678 0, 30000)) {
2679 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2680 return -EBUSY;
2681 }
2682
2683 return 0;
2684}
2685
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002686static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
2687{
2688 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2689 u32 reg;
2690
2691 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2692 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2693 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2694
2695 return wait ? arm_smmu_wait_for_halt(smmu) : 0;
2696}
2697
2698static int arm_smmu_halt(struct arm_smmu_device *smmu)
2699{
2700 return __arm_smmu_halt(smmu, true);
2701}
2702
2703static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
2704{
2705 return __arm_smmu_halt(smmu, false);
2706}
2707
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002708static void arm_smmu_resume(struct arm_smmu_device *smmu)
2709{
2710 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2711 u32 reg;
2712
2713 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2714 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2715 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2716}
2717
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002718static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
2719{
2720 int i;
2721 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
2722
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002723 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002724 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2725 writel_relaxed(regs[i].value,
2726 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002727 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002728}
2729
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002730static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002731{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002732 int i;
2733 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002734 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002735 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002736
Peng Fan3ca37122016-05-03 21:50:30 +08002737 /*
2738 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
2739 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
2740 * bit is only present in MMU-500r2 onwards.
2741 */
2742 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
2743 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
2744 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
2745 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
2746 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
2747 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
2748 }
2749
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002750 /* Make sure all context banks are disabled and clear CB_FSR */
2751 for (i = 0; i < smmu->num_context_banks; ++i) {
2752 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2753 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
2754 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002755 /*
2756 * Disable MMU-500's not-particularly-beneficial next-page
2757 * prefetcher for the sake of errata #841119 and #826419.
2758 */
2759 if (smmu->model == ARM_MMU500) {
2760 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
2761 reg &= ~ARM_MMU500_ACTLR_CPRE;
2762 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2763 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002764
2765 if (smmu->model == QCOM_SMMUV2) {
2766 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2767 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2768 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2769 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2770 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002771 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002772}
2773
2774static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
2775{
2776 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
2777 int i = 0;
2778 u32 reg;
2779
2780 /* clear global FSR */
2781 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2782 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2783
2784 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
2785 /*
2786 * Mark all SMRn as invalid and all S2CRn as bypass unless
2787 * overridden
2788 */
2789 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
2790 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2791 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
2792 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
2793 }
2794
2795 arm_smmu_context_bank_reset(smmu);
2796 }
Will Deacon1463fe42013-07-31 19:21:27 +01002797
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002798 /* Program implementation defined registers */
2799 arm_smmu_impl_def_programming(smmu);
2800
Will Deacon45ae7cf2013-06-24 18:31:25 +01002801 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002802 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
2803 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
2804
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002805 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002806
Will Deacon45ae7cf2013-06-24 18:31:25 +01002807 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002808 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002809
2810 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002811 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002812
Robin Murphy25a1c962016-02-10 14:25:33 +00002813 /* Enable client access, handling unmatched streams as appropriate */
2814 reg &= ~sCR0_CLIENTPD;
2815 if (disable_bypass)
2816 reg |= sCR0_USFCFG;
2817 else
2818 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002819
2820 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002821 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002822
2823 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002824 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002825
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002826 if (smmu->features & ARM_SMMU_FEAT_VMID16)
2827 reg |= sCR0_VMID16EN;
2828
Will Deacon45ae7cf2013-06-24 18:31:25 +01002829 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00002830 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002831 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002832}
2833
2834static int arm_smmu_id_size_to_bits(int size)
2835{
2836 switch (size) {
2837 case 0:
2838 return 32;
2839 case 1:
2840 return 36;
2841 case 2:
2842 return 40;
2843 case 3:
2844 return 42;
2845 case 4:
2846 return 44;
2847 case 5:
2848 default:
2849 return 48;
2850 }
2851}
2852
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002853static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
2854{
2855 struct device *dev = smmu->dev;
2856 int i, ntuples, ret;
2857 u32 *tuples;
2858 struct arm_smmu_impl_def_reg *regs, *regit;
2859
2860 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
2861 return 0;
2862
2863 ntuples /= sizeof(u32);
2864 if (ntuples % 2) {
2865 dev_err(dev,
2866 "Invalid number of attach-impl-defs registers: %d\n",
2867 ntuples);
2868 return -EINVAL;
2869 }
2870
2871 regs = devm_kmalloc(
2872 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
2873 GFP_KERNEL);
2874 if (!regs)
2875 return -ENOMEM;
2876
2877 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
2878 if (!tuples)
2879 return -ENOMEM;
2880
2881 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
2882 tuples, ntuples);
2883 if (ret)
2884 return ret;
2885
2886 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
2887 regit->offset = tuples[i];
2888 regit->value = tuples[i + 1];
2889 }
2890
2891 devm_kfree(dev, tuples);
2892
2893 smmu->impl_def_attach_registers = regs;
2894 smmu->num_impl_def_attach_registers = ntuples / 2;
2895
2896 return 0;
2897}
2898
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002899static int arm_smmu_init_clocks(struct arm_smmu_device *smmu)
2900{
2901 const char *cname;
2902 struct property *prop;
2903 int i;
2904 struct device *dev = smmu->dev;
2905
2906 smmu->num_clocks =
2907 of_property_count_strings(dev->of_node, "clock-names");
2908
2909 if (smmu->num_clocks < 1)
2910 return 0;
2911
2912 smmu->clocks = devm_kzalloc(
2913 dev, sizeof(*smmu->clocks) * smmu->num_clocks,
2914 GFP_KERNEL);
2915
2916 if (!smmu->clocks) {
2917 dev_err(dev,
2918 "Failed to allocate memory for clocks\n");
2919 return -ENODEV;
2920 }
2921
2922 i = 0;
2923 of_property_for_each_string(dev->of_node, "clock-names",
2924 prop, cname) {
2925 struct clk *c = devm_clk_get(dev, cname);
2926
2927 if (IS_ERR(c)) {
2928 dev_err(dev, "Couldn't get clock: %s",
2929 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07002930 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002931 }
2932
2933 if (clk_get_rate(c) == 0) {
2934 long rate = clk_round_rate(c, 1000);
2935
2936 clk_set_rate(c, rate);
2937 }
2938
2939 smmu->clocks[i] = c;
2940
2941 ++i;
2942 }
2943 return 0;
2944}
2945
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07002946static int arm_smmu_init_regulators(struct arm_smmu_device *smmu)
2947{
2948 struct device *dev = smmu->dev;
2949
2950 if (!of_get_property(dev->of_node, "vdd-supply", NULL))
2951 return 0;
2952
2953 smmu->gdsc = devm_regulator_get(dev, "vdd");
2954 if (IS_ERR(smmu->gdsc))
2955 return PTR_ERR(smmu->gdsc);
2956
2957 return 0;
2958}
2959
Patrick Daly2764f952016-09-06 19:22:44 -07002960static int arm_smmu_init_bus_scaling(struct platform_device *pdev,
2961 struct arm_smmu_device *smmu)
2962{
2963 u32 master_id;
2964
2965 if (of_property_read_u32(pdev->dev.of_node, "qcom,bus-master-id",
2966 &master_id)) {
2967 dev_dbg(smmu->dev, "No bus scaling info\n");
2968 return 0;
2969 }
2970
2971 smmu->bus_client_name = devm_kasprintf(
2972 smmu->dev, GFP_KERNEL, "smmu-bus-client-%s",
2973 dev_name(smmu->dev));
2974
2975 if (!smmu->bus_client_name)
2976 return -ENOMEM;
2977
2978 smmu->bus_client = msm_bus_scale_register(
2979 master_id, MSM_BUS_SLAVE_EBI_CH0, smmu->bus_client_name, true);
2980 if (IS_ERR(&smmu->bus_client)) {
2981 int ret = PTR_ERR(smmu->bus_client);
2982
2983 if (ret != -EPROBE_DEFER)
2984 dev_err(smmu->dev, "Bus client registration failed\n");
2985 return ret;
2986 }
2987
2988 return 0;
2989}
2990
Will Deacon45ae7cf2013-06-24 18:31:25 +01002991static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
2992{
2993 unsigned long size;
2994 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
2995 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01002996 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002997
2998 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01002999 dev_notice(smmu->dev, "SMMUv%d with:\n",
3000 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003001
3002 /* ID0 */
3003 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003004
3005 /* Restrict available stages based on module parameter */
3006 if (force_stage == 1)
3007 id &= ~(ID0_S2TS | ID0_NTS);
3008 else if (force_stage == 2)
3009 id &= ~(ID0_S1TS | ID0_NTS);
3010
Will Deacon45ae7cf2013-06-24 18:31:25 +01003011 if (id & ID0_S1TS) {
3012 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
3013 dev_notice(smmu->dev, "\tstage 1 translation\n");
3014 }
3015
3016 if (id & ID0_S2TS) {
3017 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
3018 dev_notice(smmu->dev, "\tstage 2 translation\n");
3019 }
3020
3021 if (id & ID0_NTS) {
3022 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
3023 dev_notice(smmu->dev, "\tnested translation\n");
3024 }
3025
3026 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003027 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003028 dev_err(smmu->dev, "\tno translation support!\n");
3029 return -ENODEV;
3030 }
3031
Robin Murphyb7862e32016-04-13 18:13:03 +01003032 if ((id & ID0_S1TS) &&
3033 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003034 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
3035 dev_notice(smmu->dev, "\taddress translation ops\n");
3036 }
3037
Robin Murphybae2c2d2015-07-29 19:46:05 +01003038 /*
3039 * In order for DMA API calls to work properly, we must defer to what
3040 * the DT says about coherency, regardless of what the hardware claims.
3041 * Fortunately, this also opens up a workaround for systems where the
3042 * ID register value has ended up configured incorrectly.
3043 */
3044 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3045 cttw_reg = !!(id & ID0_CTTW);
3046 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003047 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003048 if (cttw_dt || cttw_reg)
3049 dev_notice(smmu->dev, "\t%scoherent table walk\n",
3050 cttw_dt ? "" : "non-");
3051 if (cttw_dt != cttw_reg)
3052 dev_notice(smmu->dev,
3053 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003054
3055 if (id & ID0_SMS) {
3056 u32 smr, sid, mask;
3057
3058 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
3059 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
3060 ID0_NUMSMRG_MASK;
3061 if (smmu->num_mapping_groups == 0) {
3062 dev_err(smmu->dev,
3063 "stream-matching supported, but no SMRs present!\n");
3064 return -ENODEV;
3065 }
3066
Dhaval Patel031d7462015-05-09 14:47:29 -07003067 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3068 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
3069 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
3070 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3071 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Will Deacon45ae7cf2013-06-24 18:31:25 +01003072
Dhaval Patel031d7462015-05-09 14:47:29 -07003073 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3074 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
3075 if ((mask & sid) != sid) {
3076 dev_err(smmu->dev,
3077 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
3078 mask, sid);
3079 return -ENODEV;
3080 }
3081
3082 dev_notice(smmu->dev,
3083 "\tstream matching with %u register groups, mask 0x%x",
3084 smmu->num_mapping_groups, mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003085 }
Olav Haugan3c8766d2014-08-22 17:12:32 -07003086 } else {
3087 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
3088 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003089 }
3090
Robin Murphy7602b872016-04-28 17:12:09 +01003091 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3092 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3093 if (!(id & ID0_PTFS_NO_AARCH32S))
3094 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3095 }
3096
Will Deacon45ae7cf2013-06-24 18:31:25 +01003097 /* ID1 */
3098 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003099 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003100
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003101 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003102 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003103 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003104 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003105 dev_warn(smmu->dev,
3106 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3107 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003108
Will Deacon518f7132014-11-14 17:17:54 +00003109 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003110 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3111 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3112 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3113 return -ENODEV;
3114 }
3115 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
3116 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003117 /*
3118 * Cavium CN88xx erratum #27704.
3119 * Ensure ASID and VMID allocation is unique across all SMMUs in
3120 * the system.
3121 */
3122 if (smmu->model == CAVIUM_SMMUV2) {
3123 smmu->cavium_id_base =
3124 atomic_add_return(smmu->num_context_banks,
3125 &cavium_smmu_context_count);
3126 smmu->cavium_id_base -= smmu->num_context_banks;
3127 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003128
3129 /* ID2 */
3130 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3131 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003132 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003133
Will Deacon518f7132014-11-14 17:17:54 +00003134 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003135 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003136 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003137
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003138 if (id & ID2_VMID16)
3139 smmu->features |= ARM_SMMU_FEAT_VMID16;
3140
Robin Murphyf1d84542015-03-04 16:41:05 +00003141 /*
3142 * What the page table walker can address actually depends on which
3143 * descriptor format is in use, but since a) we don't know that yet,
3144 * and b) it can vary per context bank, this will have to do...
3145 */
3146 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3147 dev_warn(smmu->dev,
3148 "failed to set DMA mask for table walker\n");
3149
Robin Murphyb7862e32016-04-13 18:13:03 +01003150 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003151 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003152 if (smmu->version == ARM_SMMU_V1_64K)
3153 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003154 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003155 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003156 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003157 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003158 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003159 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003160 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003161 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003162 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003163 }
3164
Robin Murphy7602b872016-04-28 17:12:09 +01003165 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003166 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003167 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003168 if (smmu->features &
3169 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003170 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003171 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003172 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003173 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003174 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003175
Robin Murphyd5466352016-05-09 17:20:09 +01003176 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3177 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3178 else
3179 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
3180 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
3181 smmu->pgsize_bitmap);
3182
Will Deacon518f7132014-11-14 17:17:54 +00003183
Will Deacon28d60072014-09-01 16:24:48 +01003184 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
3185 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00003186 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003187
3188 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
3189 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00003190 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003191
Will Deacon45ae7cf2013-06-24 18:31:25 +01003192 return 0;
3193}
3194
Robin Murphy67b65a32016-04-13 18:12:57 +01003195struct arm_smmu_match_data {
3196 enum arm_smmu_arch_version version;
3197 enum arm_smmu_implementation model;
3198};
3199
3200#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
3201static struct arm_smmu_match_data name = { .version = ver, .model = imp }
3202
3203ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
3204ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01003205ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003206ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01003207ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003208ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01003209
Joerg Roedel09b52692014-10-02 12:24:45 +02003210static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003211 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3212 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3213 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003214 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003215 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003216 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003217 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01003218 { },
3219};
3220MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3221
Will Deacon45ae7cf2013-06-24 18:31:25 +01003222static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3223{
Robin Murphy09360402014-08-28 17:51:59 +01003224 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01003225 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003226 struct resource *res;
3227 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003228 struct device *dev = &pdev->dev;
3229 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003230 struct of_phandle_iterator it;
3231 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003232 int num_irqs, i, err;
3233
3234 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3235 if (!smmu) {
3236 dev_err(dev, "failed to allocate arm_smmu_device\n");
3237 return -ENOMEM;
3238 }
3239 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003240 spin_lock_init(&smmu->atos_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003241 mutex_init(&smmu->power_lock);
Patrick Daly8befb662016-08-17 20:03:28 -07003242 spin_lock_init(&smmu->clock_refs_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003243 idr_init(&smmu->asid_idr);
3244 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003245
Robin Murphy09360402014-08-28 17:51:59 +01003246 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01003247 data = of_id->data;
3248 smmu->version = data->version;
3249 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01003250
Will Deacon45ae7cf2013-06-24 18:31:25 +01003251 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003252 smmu->base = devm_ioremap_resource(dev, res);
3253 if (IS_ERR(smmu->base))
3254 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003255 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003256
3257 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3258 &smmu->num_global_irqs)) {
3259 dev_err(dev, "missing #global-interrupts property\n");
3260 return -ENODEV;
3261 }
3262
3263 num_irqs = 0;
3264 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3265 num_irqs++;
3266 if (num_irqs > smmu->num_global_irqs)
3267 smmu->num_context_irqs++;
3268 }
3269
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003270 if (!smmu->num_context_irqs) {
3271 dev_err(dev, "found %d interrupts but expected at least %d\n",
3272 num_irqs, smmu->num_global_irqs + 1);
3273 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003274 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003275
3276 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3277 GFP_KERNEL);
3278 if (!smmu->irqs) {
3279 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3280 return -ENOMEM;
3281 }
3282
3283 for (i = 0; i < num_irqs; ++i) {
3284 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003285
Will Deacon45ae7cf2013-06-24 18:31:25 +01003286 if (irq < 0) {
3287 dev_err(dev, "failed to get irq index %d\n", i);
3288 return -ENODEV;
3289 }
3290 smmu->irqs[i] = irq;
3291 }
3292
Dhaval Patel031d7462015-05-09 14:47:29 -07003293 parse_driver_options(smmu);
3294
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003295 err = arm_smmu_init_clocks(smmu);
Olav Haugan3c8766d2014-08-22 17:12:32 -07003296 if (err)
3297 return err;
3298
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003299 err = arm_smmu_init_regulators(smmu);
3300 if (err)
3301 return err;
3302
Patrick Daly2764f952016-09-06 19:22:44 -07003303 err = arm_smmu_init_bus_scaling(pdev, smmu);
3304 if (err)
3305 return err;
3306
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003307 err = arm_smmu_power_on(smmu);
3308 if (err)
3309 return err;
3310
3311 err = arm_smmu_device_cfg_probe(smmu);
3312 if (err)
3313 goto out_power_off;
3314
Will Deacon45ae7cf2013-06-24 18:31:25 +01003315 i = 0;
3316 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003317
3318 err = -ENOMEM;
3319 /* No need to zero the memory for masterspec */
3320 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
3321 if (!masterspec)
3322 goto out_put_masters;
3323
3324 of_for_each_phandle(&it, err, dev->of_node,
3325 "mmu-masters", "#stream-id-cells", 0) {
3326 int count = of_phandle_iterator_args(&it, masterspec->args,
3327 MAX_MASTER_STREAMIDS);
3328 masterspec->np = of_node_get(it.node);
3329 masterspec->args_count = count;
3330
3331 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003332 if (err) {
3333 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003334 masterspec->np->name);
3335 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003336 goto out_put_masters;
3337 }
3338
3339 i++;
3340 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003341
Will Deacon45ae7cf2013-06-24 18:31:25 +01003342 dev_notice(dev, "registered %d master devices\n", i);
3343
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003344 kfree(masterspec);
3345
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003346 err = arm_smmu_parse_impl_def_registers(smmu);
3347 if (err)
3348 goto out_put_masters;
3349
Robin Murphyb7862e32016-04-13 18:13:03 +01003350 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003351 smmu->num_context_banks != smmu->num_context_irqs) {
3352 dev_err(dev,
3353 "found only %d context interrupt(s) but %d required\n",
3354 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00003355 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01003356 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003357 }
3358
Will Deacon45ae7cf2013-06-24 18:31:25 +01003359 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003360 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3361 NULL, arm_smmu_global_fault,
3362 IRQF_ONESHOT | IRQF_SHARED,
3363 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003364 if (err) {
3365 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3366 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08003367 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003368 }
3369 }
3370
3371 INIT_LIST_HEAD(&smmu->list);
3372 spin_lock(&arm_smmu_devices_lock);
3373 list_add(&smmu->list, &arm_smmu_devices);
3374 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01003375
3376 arm_smmu_device_reset(smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003377 arm_smmu_power_off(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003378 return 0;
3379
Will Deacon45ae7cf2013-06-24 18:31:25 +01003380out_put_masters:
3381 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003382 struct arm_smmu_master *master
3383 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003384 of_node_put(master->of_node);
3385 }
3386
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003387out_power_off:
3388 arm_smmu_power_off(smmu);
3389
Will Deacon45ae7cf2013-06-24 18:31:25 +01003390 return err;
3391}
3392
3393static int arm_smmu_device_remove(struct platform_device *pdev)
3394{
3395 int i;
3396 struct device *dev = &pdev->dev;
3397 struct arm_smmu_device *curr, *smmu = NULL;
3398 struct rb_node *node;
3399
3400 spin_lock(&arm_smmu_devices_lock);
3401 list_for_each_entry(curr, &arm_smmu_devices, list) {
3402 if (curr->dev == dev) {
3403 smmu = curr;
3404 list_del(&smmu->list);
3405 break;
3406 }
3407 }
3408 spin_unlock(&arm_smmu_devices_lock);
3409
3410 if (!smmu)
3411 return -ENODEV;
3412
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003413 if (arm_smmu_power_on(smmu))
3414 return -EINVAL;
3415
Will Deacon45ae7cf2013-06-24 18:31:25 +01003416 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003417 struct arm_smmu_master *master
3418 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003419 of_node_put(master->of_node);
3420 }
3421
Will Deaconecfadb62013-07-31 19:21:28 +01003422 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003423 dev_err(dev, "removing device with active domains!\n");
3424
3425 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08003426 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003427
Patrick Dalyc190d932016-08-30 17:23:28 -07003428 idr_destroy(&smmu->asid_idr);
3429
Will Deacon45ae7cf2013-06-24 18:31:25 +01003430 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003431 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003432 arm_smmu_power_off(smmu);
3433
Patrick Daly2764f952016-09-06 19:22:44 -07003434 msm_bus_scale_unregister(smmu->bus_client);
3435
Will Deacon45ae7cf2013-06-24 18:31:25 +01003436 return 0;
3437}
3438
Will Deacon45ae7cf2013-06-24 18:31:25 +01003439static struct platform_driver arm_smmu_driver = {
3440 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003441 .name = "arm-smmu",
3442 .of_match_table = of_match_ptr(arm_smmu_of_match),
3443 },
3444 .probe = arm_smmu_device_dt_probe,
3445 .remove = arm_smmu_device_remove,
3446};
3447
3448static int __init arm_smmu_init(void)
3449{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003450 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003451 int ret;
3452
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003453 /*
3454 * Play nice with systems that don't have an ARM SMMU by checking that
3455 * an ARM SMMU exists in the system before proceeding with the driver
3456 * and IOMMU bus operation registration.
3457 */
3458 np = of_find_matching_node(NULL, arm_smmu_of_match);
3459 if (!np)
3460 return 0;
3461
3462 of_node_put(np);
3463
Will Deacon45ae7cf2013-06-24 18:31:25 +01003464 ret = platform_driver_register(&arm_smmu_driver);
3465 if (ret)
3466 return ret;
3467
3468 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01003469 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003470 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3471
Will Deacond123cf82014-02-04 22:17:53 +00003472#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01003473 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003474 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00003475#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003476
Will Deacona9a1b0b2014-05-01 18:05:08 +01003477#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08003478 if (!iommu_present(&pci_bus_type)) {
3479 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01003480 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08003481 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01003482#endif
3483
Will Deacon45ae7cf2013-06-24 18:31:25 +01003484 return 0;
3485}
3486
3487static void __exit arm_smmu_exit(void)
3488{
3489 return platform_driver_unregister(&arm_smmu_driver);
3490}
3491
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003492subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003493module_exit(arm_smmu_exit);
3494
3495MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
3496MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
3497MODULE_LICENSE("GPL v2");