blob: 9f29abd289b3efe564d4c07cf752d24d8722c85b [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070047#include <soc/qcom/secure_buffer.h>
Patrick Daly2764f952016-09-06 19:22:44 -070048#include <linux/msm-bus.h>
49#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050
51#include <linux/amba/bus.h>
52
Will Deacon518f7132014-11-14 17:17:54 +000053#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020056#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
58/* Maximum number of context banks per SMMU */
59#define ARM_SMMU_MAX_CBS 128
60
61/* Maximum number of mapping groups per SMMU */
62#define ARM_SMMU_MAX_SMRS 128
63
Will Deacon45ae7cf2013-06-24 18:31:25 +010064/* SMMU global address space */
65#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010066#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010067
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000068/*
69 * SMMU global address space with conditional offset to access secure
70 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
71 * nsGFSYNR0: 0x450)
72 */
73#define ARM_SMMU_GR0_NS(smmu) \
74 ((smmu)->base + \
75 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
76 ? 0x400 : 0))
77
Robin Murphyf9a05f02016-04-13 18:13:01 +010078/*
79 * Some 64-bit registers only make sense to write atomically, but in such
80 * cases all the data relevant to AArch32 formats lies within the lower word,
81 * therefore this actually makes more sense than it might first appear.
82 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010086#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010087#endif
88
Will Deacon45ae7cf2013-06-24 18:31:25 +010089/* Configuration registers */
90#define ARM_SMMU_GR0_sCR0 0x0
91#define sCR0_CLIENTPD (1 << 0)
92#define sCR0_GFRE (1 << 1)
93#define sCR0_GFIE (1 << 2)
94#define sCR0_GCFGFRE (1 << 4)
95#define sCR0_GCFGFIE (1 << 5)
96#define sCR0_USFCFG (1 << 10)
97#define sCR0_VMIDPNE (1 << 11)
98#define sCR0_PTM (1 << 12)
99#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800100#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100101#define sCR0_BSU_SHIFT 14
102#define sCR0_BSU_MASK 0x3
103
Peng Fan3ca37122016-05-03 21:50:30 +0800104/* Auxiliary Configuration register */
105#define ARM_SMMU_GR0_sACR 0x10
106
Will Deacon45ae7cf2013-06-24 18:31:25 +0100107/* Identification registers */
108#define ARM_SMMU_GR0_ID0 0x20
109#define ARM_SMMU_GR0_ID1 0x24
110#define ARM_SMMU_GR0_ID2 0x28
111#define ARM_SMMU_GR0_ID3 0x2c
112#define ARM_SMMU_GR0_ID4 0x30
113#define ARM_SMMU_GR0_ID5 0x34
114#define ARM_SMMU_GR0_ID6 0x38
115#define ARM_SMMU_GR0_ID7 0x3c
116#define ARM_SMMU_GR0_sGFSR 0x48
117#define ARM_SMMU_GR0_sGFSYNR0 0x50
118#define ARM_SMMU_GR0_sGFSYNR1 0x54
119#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100120
121#define ID0_S1TS (1 << 30)
122#define ID0_S2TS (1 << 29)
123#define ID0_NTS (1 << 28)
124#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000125#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100126#define ID0_PTFS_NO_AARCH32 (1 << 25)
127#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128#define ID0_CTTW (1 << 14)
129#define ID0_NUMIRPT_SHIFT 16
130#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700131#define ID0_NUMSIDB_SHIFT 9
132#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100133#define ID0_NUMSMRG_SHIFT 0
134#define ID0_NUMSMRG_MASK 0xff
135
136#define ID1_PAGESIZE (1 << 31)
137#define ID1_NUMPAGENDXB_SHIFT 28
138#define ID1_NUMPAGENDXB_MASK 7
139#define ID1_NUMS2CB_SHIFT 16
140#define ID1_NUMS2CB_MASK 0xff
141#define ID1_NUMCB_SHIFT 0
142#define ID1_NUMCB_MASK 0xff
143
144#define ID2_OAS_SHIFT 4
145#define ID2_OAS_MASK 0xf
146#define ID2_IAS_SHIFT 0
147#define ID2_IAS_MASK 0xf
148#define ID2_UBS_SHIFT 8
149#define ID2_UBS_MASK 0xf
150#define ID2_PTFS_4K (1 << 12)
151#define ID2_PTFS_16K (1 << 13)
152#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800153#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Peng Fan3ca37122016-05-03 21:50:30 +0800155#define ID7_MAJOR_SHIFT 4
156#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100159#define ARM_SMMU_GR0_TLBIVMID 0x64
160#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
161#define ARM_SMMU_GR0_TLBIALLH 0x6c
162#define ARM_SMMU_GR0_sTLBGSYNC 0x70
163#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
164#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800165#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167/* Stream mapping registers */
168#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
169#define SMR_VALID (1 << 31)
170#define SMR_MASK_SHIFT 16
171#define SMR_MASK_MASK 0x7fff
172#define SMR_ID_SHIFT 0
173#define SMR_ID_MASK 0x7fff
174
175#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
176#define S2CR_CBNDX_SHIFT 0
177#define S2CR_CBNDX_MASK 0xff
178#define S2CR_TYPE_SHIFT 16
179#define S2CR_TYPE_MASK 0x3
180#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
181#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
182#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
183
184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
Shalaj Jain04059c52015-03-03 13:34:59 -0800203#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
204#define CBFRSYNRA_SID_MASK (0xffff)
205
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600223#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000225#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100226#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700228#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100229#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100232#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000233#define ARM_SMMU_CB_S1_TLBIVAL 0x620
234#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
235#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700236#define ARM_SMMU_CB_TLBSYNC 0x7f0
237#define ARM_SMMU_CB_TLBSTATUS 0x7f4
238#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100239#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000240#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241
242#define SCTLR_S1_ASIDPNE (1 << 12)
243#define SCTLR_CFCFG (1 << 7)
244#define SCTLR_CFIE (1 << 6)
245#define SCTLR_CFRE (1 << 5)
246#define SCTLR_E (1 << 4)
247#define SCTLR_AFE (1 << 2)
248#define SCTLR_TRE (1 << 1)
249#define SCTLR_M (1 << 0)
250#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
251
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100252#define ARM_MMU500_ACTLR_CPRE (1 << 1)
253
Peng Fan3ca37122016-05-03 21:50:30 +0800254#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
255
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700256/* Definitions for implementation-defined registers */
257#define ACTLR_QCOM_OSH_SHIFT 28
258#define ACTLR_QCOM_OSH 1
259
260#define ACTLR_QCOM_ISH_SHIFT 29
261#define ACTLR_QCOM_ISH 1
262
263#define ACTLR_QCOM_NSH_SHIFT 30
264#define ACTLR_QCOM_NSH 1
265
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700266#define ARM_SMMU_IMPL_DEF0(smmu) \
267 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
268#define ARM_SMMU_IMPL_DEF1(smmu) \
269 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
270#define IMPL_DEF1_MICRO_MMU_CTRL 0
271#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
272#define MICRO_MMU_CTRL_IDLE (1 << 3)
273
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000274#define CB_PAR_F (1 << 0)
275
276#define ATSR_ACTIVE (1 << 0)
277
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278#define RESUME_RETRY (0 << 0)
279#define RESUME_TERMINATE (1 << 0)
280
Will Deacon45ae7cf2013-06-24 18:31:25 +0100281#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100282#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100283
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100284#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100285
286#define FSR_MULTI (1 << 31)
287#define FSR_SS (1 << 30)
288#define FSR_UUT (1 << 8)
289#define FSR_ASF (1 << 7)
290#define FSR_TLBLKF (1 << 6)
291#define FSR_TLBMCF (1 << 5)
292#define FSR_EF (1 << 4)
293#define FSR_PF (1 << 3)
294#define FSR_AFF (1 << 2)
295#define FSR_TF (1 << 1)
296
Mitchel Humpherys29073202014-07-08 09:52:18 -0700297#define FSR_IGN (FSR_AFF | FSR_ASF | \
298 FSR_TLBMCF | FSR_TLBLKF)
299#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100300 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100301
302#define FSYNR0_WNR (1 << 4)
303
Will Deacon4cf740b2014-07-14 19:47:39 +0100304static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000305module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100306MODULE_PARM_DESC(force_stage,
307 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000308static bool disable_bypass;
309module_param(disable_bypass, bool, S_IRUGO);
310MODULE_PARM_DESC(disable_bypass,
311 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100312
Robin Murphy09360402014-08-28 17:51:59 +0100313enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100314 ARM_SMMU_V1,
315 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100316 ARM_SMMU_V2,
317};
318
Robin Murphy67b65a32016-04-13 18:12:57 +0100319enum arm_smmu_implementation {
320 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100321 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100322 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700323 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100324};
325
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700326struct arm_smmu_impl_def_reg {
327 u32 offset;
328 u32 value;
329};
330
Will Deacon45ae7cf2013-06-24 18:31:25 +0100331struct arm_smmu_smr {
332 u8 idx;
333 u16 mask;
334 u16 id;
335};
336
Will Deacona9a1b0b2014-05-01 18:05:08 +0100337struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100338 int num_streamids;
339 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100340 struct arm_smmu_smr *smrs;
341};
342
Will Deacona9a1b0b2014-05-01 18:05:08 +0100343struct arm_smmu_master {
344 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100345 struct rb_node node;
346 struct arm_smmu_master_cfg cfg;
347};
348
Will Deacon45ae7cf2013-06-24 18:31:25 +0100349struct arm_smmu_device {
350 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351
352 void __iomem *base;
353 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100354 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355
356#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
357#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
358#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
359#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
360#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000361#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800362#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100363#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
364#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
365#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
366#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
367#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100368 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000369
370#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800371#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800372#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700373#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000374 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100375 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100376 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100377
378 u32 num_context_banks;
379 u32 num_s2_context_banks;
380 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
381 atomic_t irptndx;
382
383 u32 num_mapping_groups;
384 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
385
Will Deacon518f7132014-11-14 17:17:54 +0000386 unsigned long va_size;
387 unsigned long ipa_size;
388 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100389 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100390
391 u32 num_global_irqs;
392 u32 num_context_irqs;
393 unsigned int *irqs;
394
Will Deacon45ae7cf2013-06-24 18:31:25 +0100395 struct list_head list;
396 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800397
398 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700399 /* Specific to QCOM */
400 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
401 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800402
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700403 int num_clocks;
404 struct clk **clocks;
405
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700406 struct regulator *gdsc;
407
Patrick Daly2764f952016-09-06 19:22:44 -0700408 struct msm_bus_client_handle *bus_client;
409 char *bus_client_name;
410
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700411 /* Protects power_count */
412 struct mutex power_lock;
413 int power_count;
Patrick Daly8befb662016-08-17 20:03:28 -0700414 /* Protects clock_refs_count */
415 spinlock_t clock_refs_lock;
416 int clock_refs_count;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700417
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800418 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700419
420 /* protects idr */
421 struct mutex idr_mutex;
422 struct idr asid_idr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100423};
424
Robin Murphy7602b872016-04-28 17:12:09 +0100425enum arm_smmu_context_fmt {
426 ARM_SMMU_CTX_FMT_NONE,
427 ARM_SMMU_CTX_FMT_AARCH64,
428 ARM_SMMU_CTX_FMT_AARCH32_L,
429 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100430};
431
432struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100433 u8 cbndx;
434 u8 irptndx;
435 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600436 u32 procid;
437 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100438 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100439};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100440#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600441#define INVALID_CBNDX 0xff
442#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700443/*
444 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
445 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
446 */
447#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100448
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600449#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800450#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100451
Will Deaconc752ce42014-06-25 22:46:31 +0100452enum arm_smmu_domain_stage {
453 ARM_SMMU_DOMAIN_S1 = 0,
454 ARM_SMMU_DOMAIN_S2,
455 ARM_SMMU_DOMAIN_NESTED,
456};
457
Patrick Dalyc11d1082016-09-01 15:52:44 -0700458struct arm_smmu_pte_info {
459 void *virt_addr;
460 size_t size;
461 struct list_head entry;
462};
463
Will Deacon45ae7cf2013-06-24 18:31:25 +0100464struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100465 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000466 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700467 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000468 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100469 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100470 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000471 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700472 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700473 u32 secure_vmid;
474 struct list_head pte_info_list;
475 struct list_head unassign_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100476 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100477};
478
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200479struct arm_smmu_phandle_args {
480 struct device_node *np;
481 int args_count;
482 uint32_t args[MAX_MASTER_STREAMIDS];
483};
484
Will Deacon45ae7cf2013-06-24 18:31:25 +0100485static DEFINE_SPINLOCK(arm_smmu_devices_lock);
486static LIST_HEAD(arm_smmu_devices);
487
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000488struct arm_smmu_option_prop {
489 u32 opt;
490 const char *prop;
491};
492
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800493static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
494
Mitchel Humpherys29073202014-07-08 09:52:18 -0700495static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000496 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800497 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800498 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700499 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000500 { 0, NULL},
501};
502
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800503static int arm_smmu_halt(struct arm_smmu_device *smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700504static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu);
505static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800506static void arm_smmu_resume(struct arm_smmu_device *smmu);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800507static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
508 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700509static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
510 dma_addr_t iova);
511static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
512 struct iommu_domain *domain, dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600513static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800514
Patrick Dalyc11d1082016-09-01 15:52:44 -0700515static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
516static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
517static void arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
518static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
519
Joerg Roedel1d672632015-03-26 13:43:10 +0100520static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
521{
522 return container_of(dom, struct arm_smmu_domain, domain);
523}
524
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000525static void parse_driver_options(struct arm_smmu_device *smmu)
526{
527 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700528
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000529 do {
530 if (of_property_read_bool(smmu->dev->of_node,
531 arm_smmu_options[i].prop)) {
532 smmu->options |= arm_smmu_options[i].opt;
533 dev_notice(smmu->dev, "option %s\n",
534 arm_smmu_options[i].prop);
535 }
536 } while (arm_smmu_options[++i].opt);
537}
538
Patrick Dalyc190d932016-08-30 17:23:28 -0700539static bool is_dynamic_domain(struct iommu_domain *domain)
540{
541 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
542
543 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
544}
545
Will Deacon8f68f8e2014-07-15 11:27:08 +0100546static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100547{
548 if (dev_is_pci(dev)) {
549 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700550
Will Deacona9a1b0b2014-05-01 18:05:08 +0100551 while (!pci_is_root_bus(bus))
552 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100553 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100554 }
555
Will Deacon8f68f8e2014-07-15 11:27:08 +0100556 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100557}
558
Will Deacon45ae7cf2013-06-24 18:31:25 +0100559static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
560 struct device_node *dev_node)
561{
562 struct rb_node *node = smmu->masters.rb_node;
563
564 while (node) {
565 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700566
Will Deacon45ae7cf2013-06-24 18:31:25 +0100567 master = container_of(node, struct arm_smmu_master, node);
568
569 if (dev_node < master->of_node)
570 node = node->rb_left;
571 else if (dev_node > master->of_node)
572 node = node->rb_right;
573 else
574 return master;
575 }
576
577 return NULL;
578}
579
Will Deacona9a1b0b2014-05-01 18:05:08 +0100580static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100581find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100582{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100583 struct arm_smmu_master_cfg *cfg = NULL;
584 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100585
Will Deacon8f68f8e2014-07-15 11:27:08 +0100586 if (group) {
587 cfg = iommu_group_get_iommudata(group);
588 iommu_group_put(group);
589 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100590
Will Deacon8f68f8e2014-07-15 11:27:08 +0100591 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100592}
593
Will Deacon45ae7cf2013-06-24 18:31:25 +0100594static int insert_smmu_master(struct arm_smmu_device *smmu,
595 struct arm_smmu_master *master)
596{
597 struct rb_node **new, *parent;
598
599 new = &smmu->masters.rb_node;
600 parent = NULL;
601 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700602 struct arm_smmu_master *this
603 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100604
605 parent = *new;
606 if (master->of_node < this->of_node)
607 new = &((*new)->rb_left);
608 else if (master->of_node > this->of_node)
609 new = &((*new)->rb_right);
610 else
611 return -EEXIST;
612 }
613
614 rb_link_node(&master->node, parent, new);
615 rb_insert_color(&master->node, &smmu->masters);
616 return 0;
617}
618
619static int register_smmu_master(struct arm_smmu_device *smmu,
620 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200621 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100622{
623 int i;
624 struct arm_smmu_master *master;
625
626 master = find_smmu_master(smmu, masterspec->np);
627 if (master) {
628 dev_err(dev,
629 "rejecting multiple registrations for master device %s\n",
630 masterspec->np->name);
631 return -EBUSY;
632 }
633
634 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
635 dev_err(dev,
636 "reached maximum number (%d) of stream IDs for master device %s\n",
637 MAX_MASTER_STREAMIDS, masterspec->np->name);
638 return -ENOSPC;
639 }
640
641 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
642 if (!master)
643 return -ENOMEM;
644
Will Deacona9a1b0b2014-05-01 18:05:08 +0100645 master->of_node = masterspec->np;
646 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100647
Olav Haugan3c8766d2014-08-22 17:12:32 -0700648 for (i = 0; i < master->cfg.num_streamids; ++i) {
649 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100650
Olav Haugan3c8766d2014-08-22 17:12:32 -0700651 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
652 (streamid >= smmu->num_mapping_groups)) {
653 dev_err(dev,
654 "stream ID for master device %s greater than maximum allowed (%d)\n",
655 masterspec->np->name, smmu->num_mapping_groups);
656 return -ERANGE;
657 }
658 master->cfg.streamids[i] = streamid;
659 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100660 return insert_smmu_master(smmu, master);
661}
662
Will Deacon44680ee2014-06-25 11:29:12 +0100663static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100664{
Will Deacon44680ee2014-06-25 11:29:12 +0100665 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100666 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100667 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100668
669 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100670 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100671 master = find_smmu_master(smmu, dev_node);
672 if (master)
673 break;
674 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100675 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100676
Will Deacona9a1b0b2014-05-01 18:05:08 +0100677 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100678}
679
680static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
681{
682 int idx;
683
684 do {
685 idx = find_next_zero_bit(map, end, start);
686 if (idx == end)
687 return -ENOSPC;
688 } while (test_and_set_bit(idx, map));
689
690 return idx;
691}
692
693static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
694{
695 clear_bit(idx, map);
696}
697
Patrick Daly8befb662016-08-17 20:03:28 -0700698static int arm_smmu_prepare_clocks(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700699{
700 int i, ret = 0;
701
702 for (i = 0; i < smmu->num_clocks; ++i) {
Patrick Daly8befb662016-08-17 20:03:28 -0700703 ret = clk_prepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700704 if (ret) {
Patrick Daly8befb662016-08-17 20:03:28 -0700705 dev_err(smmu->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700706 while (i--)
Patrick Daly8befb662016-08-17 20:03:28 -0700707 clk_unprepare(smmu->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700708 break;
709 }
710 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700711 return ret;
712}
713
Patrick Daly8befb662016-08-17 20:03:28 -0700714static void arm_smmu_unprepare_clocks(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700715{
716 int i;
717
Liam Mark3ddf8d12016-04-13 12:42:01 -0700718 for (i = smmu->num_clocks; i; --i)
719 clk_unprepare(smmu->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700720}
721
Patrick Daly8befb662016-08-17 20:03:28 -0700722/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
723static int arm_smmu_enable_clocks_atomic(struct arm_smmu_device *smmu)
724{
725 int i, ret = 0;
726 unsigned long flags;
727
728 spin_lock_irqsave(&smmu->clock_refs_lock, flags);
729 if (smmu->clock_refs_count > 0) {
730 smmu->clock_refs_count++;
731 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
732 return 0;
733 }
734
735 for (i = 0; i < smmu->num_clocks; ++i) {
736 ret = clk_enable(smmu->clocks[i]);
737 if (ret) {
738 dev_err(smmu->dev, "Couldn't enable clock #%d\n", i);
739 while (i--)
740 clk_disable(smmu->clocks[i]);
741 break;
742 }
743 }
744
745 if (!ret)
746 smmu->clock_refs_count++;
747
748 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
749 return ret;
750}
751
752/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
753static void arm_smmu_disable_clocks_atomic(struct arm_smmu_device *smmu)
754{
755 int i;
756 unsigned long flags;
757
758 spin_lock_irqsave(&smmu->clock_refs_lock, flags);
759 WARN_ON(smmu->clock_refs_count == 0);
760 if (smmu->clock_refs_count > 1) {
761 smmu->clock_refs_count--;
762 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
763 return;
764 }
765
Liam Mark3ddf8d12016-04-13 12:42:01 -0700766 for (i = smmu->num_clocks; i; --i)
767 clk_disable(smmu->clocks[i - 1]);
Patrick Daly8befb662016-08-17 20:03:28 -0700768
769 smmu->clock_refs_count--;
770 spin_unlock_irqrestore(&smmu->clock_refs_lock, flags);
771}
772
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700773static int arm_smmu_enable_regulators(struct arm_smmu_device *smmu)
774{
775 if (!smmu->gdsc)
776 return 0;
777
778 return regulator_enable(smmu->gdsc);
779}
780
781static int arm_smmu_disable_regulators(struct arm_smmu_device *smmu)
782{
783 if (!smmu->gdsc)
784 return 0;
785
786 return regulator_disable(smmu->gdsc);
787}
788
Patrick Daly2764f952016-09-06 19:22:44 -0700789static int arm_smmu_request_bus(struct arm_smmu_device *smmu)
790{
791 if (!smmu->bus_client)
792 return 0;
793 return msm_bus_scale_update_bw(smmu->bus_client, 0, 1000);
794}
795
796static int arm_smmu_unrequest_bus(struct arm_smmu_device *smmu)
797{
798 if (!smmu->bus_client)
799 return 0;
800 return msm_bus_scale_update_bw(smmu->bus_client, 0, 0);
801}
802
803
Patrick Daly8befb662016-08-17 20:03:28 -0700804static int arm_smmu_power_on_slow(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700805{
806 int ret;
807
808 mutex_lock(&smmu->power_lock);
809 if (smmu->power_count > 0) {
810 smmu->power_count += 1;
811 mutex_unlock(&smmu->power_lock);
812 return 0;
813 }
814
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700815 ret = arm_smmu_enable_regulators(smmu);
816 if (ret)
817 goto out_unlock;
818
Patrick Daly2764f952016-09-06 19:22:44 -0700819 ret = arm_smmu_request_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700820 if (ret)
821 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700822
Patrick Daly2764f952016-09-06 19:22:44 -0700823 ret = arm_smmu_prepare_clocks(smmu);
824 if (ret)
825 goto out_disable_bus;
826
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700827 smmu->power_count += 1;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700828 mutex_unlock(&smmu->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700829 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700830
Patrick Daly2764f952016-09-06 19:22:44 -0700831out_disable_bus:
832 arm_smmu_unrequest_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700833out_disable_regulators:
834 arm_smmu_disable_regulators(smmu);
835out_unlock:
836 mutex_unlock(&smmu->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700837 return ret;
838}
839
Patrick Daly8befb662016-08-17 20:03:28 -0700840static void arm_smmu_power_off_slow(struct arm_smmu_device *smmu)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700841{
842 mutex_lock(&smmu->power_lock);
843 smmu->power_count--;
844 WARN_ON(smmu->power_count < 0);
845
846 if (smmu->power_count > 0) {
847 mutex_unlock(&smmu->power_lock);
848 return;
849 }
850
Patrick Daly8befb662016-08-17 20:03:28 -0700851 arm_smmu_unprepare_clocks(smmu);
Patrick Daly2764f952016-09-06 19:22:44 -0700852 arm_smmu_unrequest_bus(smmu);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700853 arm_smmu_disable_regulators(smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700854
855 mutex_unlock(&smmu->power_lock);
856}
857
Patrick Daly8befb662016-08-17 20:03:28 -0700858static int arm_smmu_power_on(struct arm_smmu_device *smmu)
859{
860 int ret;
861
862 ret = arm_smmu_power_on_slow(smmu);
863 if (ret)
864 return ret;
865
866 ret = arm_smmu_enable_clocks_atomic(smmu);
867 if (ret)
868 goto out_disable;
869
870 return 0;
871
872out_disable:
873 arm_smmu_power_off_slow(smmu);
874 return ret;
875}
876
877static void arm_smmu_power_off(struct arm_smmu_device *smmu)
878{
879 arm_smmu_disable_clocks_atomic(smmu);
880 arm_smmu_power_off_slow(smmu);
881}
882
883/*
884 * Must be used instead of arm_smmu_power_on if it may be called from
885 * atomic context
886 */
887static int arm_smmu_domain_power_on(struct iommu_domain *domain,
888 struct arm_smmu_device *smmu)
889{
890 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
891 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
892
893 if (atomic_domain)
894 return arm_smmu_enable_clocks_atomic(smmu);
895
896 return arm_smmu_power_on(smmu);
897}
898
899/*
900 * Must be used instead of arm_smmu_power_on if it may be called from
901 * atomic context
902 */
903static void arm_smmu_domain_power_off(struct iommu_domain *domain,
904 struct arm_smmu_device *smmu)
905{
906 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
907 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
908
909 if (atomic_domain) {
910 arm_smmu_disable_clocks_atomic(smmu);
911 return;
912 }
913
914 arm_smmu_power_off(smmu);
915}
916
Will Deacon45ae7cf2013-06-24 18:31:25 +0100917/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700918static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
919 int cbndx)
920{
921 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
922 u32 val;
923
924 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
925 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
926 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700927 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700928 dev_err(smmu->dev, "TLBSYNC timeout!\n");
929}
930
Will Deacon518f7132014-11-14 17:17:54 +0000931static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100932{
933 int count = 0;
934 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
935
936 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
937 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
938 & sTLBGSTATUS_GSACTIVE) {
939 cpu_relax();
940 if (++count == TLB_LOOP_TIMEOUT) {
941 dev_err_ratelimited(smmu->dev,
942 "TLB sync timed out -- SMMU may be deadlocked\n");
943 return;
944 }
945 udelay(1);
946 }
947}
948
Will Deacon518f7132014-11-14 17:17:54 +0000949static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100950{
Will Deacon518f7132014-11-14 17:17:54 +0000951 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700952 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000953}
954
Patrick Daly8befb662016-08-17 20:03:28 -0700955/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000956static void arm_smmu_tlb_inv_context(void *cookie)
957{
958 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100959 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
960 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100961 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000962 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100963
964 if (stage1) {
965 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800966 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100967 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700968 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100969 } else {
970 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800971 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100972 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700973 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100974 }
Will Deacon1463fe42013-07-31 19:21:27 +0100975}
976
Will Deacon518f7132014-11-14 17:17:54 +0000977static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000978 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000979{
980 struct arm_smmu_domain *smmu_domain = cookie;
981 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
982 struct arm_smmu_device *smmu = smmu_domain->smmu;
983 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
984 void __iomem *reg;
985
986 if (stage1) {
987 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
988 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
989
Robin Murphy7602b872016-04-28 17:12:09 +0100990 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000991 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800992 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000993 do {
994 writel_relaxed(iova, reg);
995 iova += granule;
996 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000997 } else {
998 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800999 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001000 do {
1001 writeq_relaxed(iova, reg);
1002 iova += granule >> 12;
1003 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001004 }
Will Deacon518f7132014-11-14 17:17:54 +00001005 } else if (smmu->version == ARM_SMMU_V2) {
1006 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1007 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1008 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001009 iova >>= 12;
1010 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001011 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001012 iova += granule >> 12;
1013 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001014 } else {
1015 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001016 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001017 }
1018}
1019
Patrick Dalyc11d1082016-09-01 15:52:44 -07001020static void *arm_smmu_alloc_pages_exact(void *cookie,
1021 size_t size, gfp_t gfp_mask)
1022{
1023 int ret;
1024 void *page = alloc_pages_exact(size, gfp_mask);
1025
1026 if (likely(page)) {
1027 ret = arm_smmu_prepare_pgtable(page, cookie);
1028 if (ret) {
1029 free_pages_exact(page, size);
1030 return NULL;
1031 }
1032 }
1033
1034 return page;
1035}
1036
1037static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1038{
1039 arm_smmu_unprepare_pgtable(cookie, virt, size);
1040 /* unprepare also frees (possibly later), no need to free here */
1041}
1042
Will Deacon518f7132014-11-14 17:17:54 +00001043static struct iommu_gather_ops arm_smmu_gather_ops = {
1044 .tlb_flush_all = arm_smmu_tlb_inv_context,
1045 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1046 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001047 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1048 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001049};
1050
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001051static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1052 dma_addr_t iova, u32 fsr)
1053{
1054 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1055 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1056 struct arm_smmu_device *smmu;
1057 void __iomem *cb_base;
1058 u64 sctlr, sctlr_orig;
1059 phys_addr_t phys;
1060
1061 smmu = smmu_domain->smmu;
1062 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1063
1064 arm_smmu_halt_nowait(smmu);
1065
1066 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
1067
1068 arm_smmu_wait_for_halt(smmu);
1069
1070 /* clear FSR to allow ATOS to log any faults */
1071 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
1072
1073 /* disable stall mode momentarily */
1074 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
1075 sctlr = sctlr_orig & ~SCTLR_CFCFG;
1076 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
1077
1078 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
1079
1080 if (!phys) {
1081 dev_err(smmu->dev,
1082 "ATOS failed. Will issue a TLBIALL and try again...\n");
1083 arm_smmu_tlb_inv_context(smmu_domain);
1084 phys = arm_smmu_iova_to_phys_hard_no_halt(domain, iova);
1085 if (phys)
1086 dev_err(smmu->dev,
1087 "ATOS succeeded this time. Maybe we missed a TLB invalidation while messing with page tables earlier??\n");
1088 else
1089 dev_err(smmu->dev,
1090 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1091 }
1092
1093 /* restore SCTLR */
1094 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
1095
1096 arm_smmu_resume(smmu);
1097
1098 return phys;
1099}
1100
Will Deacon45ae7cf2013-06-24 18:31:25 +01001101static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1102{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001103 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001104 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001105 unsigned long iova;
1106 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001107 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001108 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1109 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001110 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001111 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001112 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001113 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001114 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001115 bool non_fatal_fault = !!(smmu_domain->attributes &
1116 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001117
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001118 static DEFINE_RATELIMIT_STATE(_rs,
1119 DEFAULT_RATELIMIT_INTERVAL,
1120 DEFAULT_RATELIMIT_BURST);
1121
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001122 ret = arm_smmu_power_on(smmu);
1123 if (ret)
1124 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001125
Shalaj Jain04059c52015-03-03 13:34:59 -08001126 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001127 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001128 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1129
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001130 if (!(fsr & FSR_FAULT)) {
1131 ret = IRQ_NONE;
1132 goto out_power_off;
1133 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001134
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001135 if (fatal_asf && (fsr & FSR_ASF)) {
1136 dev_err(smmu->dev,
1137 "Took an address size fault. Refusing to recover.\n");
1138 BUG();
1139 }
1140
Will Deacon45ae7cf2013-06-24 18:31:25 +01001141 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001142 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001143 if (fsr & FSR_TF)
1144 flags |= IOMMU_FAULT_TRANSLATION;
1145 if (fsr & FSR_PF)
1146 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001147 if (fsr & FSR_EF)
1148 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001149 if (fsr & FSR_SS)
1150 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001151
Robin Murphyf9a05f02016-04-13 18:13:01 +01001152 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001153 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001154 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1155 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001156 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1157 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001158 dev_dbg(smmu->dev,
1159 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1160 iova, fsr, fsynr, cfg->cbndx);
1161 dev_dbg(smmu->dev,
1162 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001163 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001164 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001165 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001166 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1167 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001168 if (__ratelimit(&_rs)) {
1169 dev_err(smmu->dev,
1170 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1171 iova, fsr, fsynr, cfg->cbndx);
1172 dev_err(smmu->dev, "FAR = %016lx\n",
1173 (unsigned long)iova);
1174 dev_err(smmu->dev,
1175 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1176 fsr,
1177 (fsr & 0x02) ? "TF " : "",
1178 (fsr & 0x04) ? "AFF " : "",
1179 (fsr & 0x08) ? "PF " : "",
1180 (fsr & 0x10) ? "EF " : "",
1181 (fsr & 0x20) ? "TLBMCF " : "",
1182 (fsr & 0x40) ? "TLBLKF " : "",
1183 (fsr & 0x80) ? "MHF " : "",
1184 (fsr & 0x40000000) ? "SS " : "",
1185 (fsr & 0x80000000) ? "MULTI " : "");
1186 dev_err(smmu->dev,
1187 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001188 if (!phys_soft)
1189 dev_err(smmu->dev,
1190 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1191 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001192 dev_err(smmu->dev,
1193 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1194 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1195 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001196 ret = IRQ_NONE;
1197 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001198 if (!non_fatal_fault) {
1199 dev_err(smmu->dev,
1200 "Unhandled arm-smmu context fault!\n");
1201 BUG();
1202 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001203 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001204
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001205 /*
1206 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1207 * if stalled. This is required to keep the IOMMU client stalled on
1208 * the outstanding fault. This gives the client a chance to take any
1209 * debug action and then terminate the stalled transaction.
1210 * So, the sequence in case of stall on fault should be:
1211 * 1) Do not clear FSR or write to RESUME here
1212 * 2) Client takes any debug action
1213 * 3) Client terminates the stalled transaction and resumes the IOMMU
1214 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1215 * not before so that the fault remains outstanding. This ensures
1216 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1217 * need to be terminated.
1218 */
1219 if (tmp != -EBUSY) {
1220 /* Clear the faulting FSR */
1221 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001222
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001223 /*
1224 * Barrier required to ensure that the FSR is cleared
1225 * before resuming SMMU operation
1226 */
1227 wmb();
1228
1229 /* Retry or terminate any stalled transactions */
1230 if (fsr & FSR_SS)
1231 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1232 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001233
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001234out_power_off:
1235 arm_smmu_power_off(smmu);
1236
Patrick Daly5ba28112016-08-30 19:18:52 -07001237 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238}
1239
1240static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1241{
1242 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1243 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001244 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001245
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001246 if (arm_smmu_power_on(smmu))
1247 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001248
Will Deacon45ae7cf2013-06-24 18:31:25 +01001249 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1250 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1251 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1252 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1253
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001254 if (!gfsr) {
1255 arm_smmu_power_off(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001256 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001257 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001258
Will Deacon45ae7cf2013-06-24 18:31:25 +01001259 dev_err_ratelimited(smmu->dev,
1260 "Unexpected global fault, this could be serious\n");
1261 dev_err_ratelimited(smmu->dev,
1262 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1263 gfsr, gfsynr0, gfsynr1, gfsynr2);
1264
1265 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001266 arm_smmu_power_off(smmu);
Will Deaconadaba322013-07-31 19:21:26 +01001267 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001268}
1269
Will Deacon518f7132014-11-14 17:17:54 +00001270static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1271 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272{
1273 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001274 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001275 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001276 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1277 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001278 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001279
Will Deacon45ae7cf2013-06-24 18:31:25 +01001280 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001281 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1282 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001283
Will Deacon4a1c93c2015-03-04 12:21:03 +00001284 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001285 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1286 reg = CBA2R_RW64_64BIT;
1287 else
1288 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001289 /* 16-bit VMIDs live in CBA2R */
1290 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001291 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001292
Will Deacon4a1c93c2015-03-04 12:21:03 +00001293 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1294 }
1295
Will Deacon45ae7cf2013-06-24 18:31:25 +01001296 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001297 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001298 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001299 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001300
Will Deacon57ca90f2014-02-06 14:59:05 +00001301 /*
1302 * Use the weakest shareability/memory types, so they are
1303 * overridden by the ttbcr/pte.
1304 */
1305 if (stage1) {
1306 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1307 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001308 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1309 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001310 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001311 }
Will Deacon44680ee2014-06-25 11:29:12 +01001312 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001313
Will Deacon518f7132014-11-14 17:17:54 +00001314 /* TTBRs */
1315 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001316 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001317
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001318 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001319 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001320
1321 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001322 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001323 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001324 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001325 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001326 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001327 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001328
Will Deacon518f7132014-11-14 17:17:54 +00001329 /* TTBCR */
1330 if (stage1) {
1331 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1332 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
1333 if (smmu->version > ARM_SMMU_V1) {
1334 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +01001335 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +00001336 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001337 }
1338 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001339 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1340 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001341 }
1342
Will Deacon518f7132014-11-14 17:17:54 +00001343 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001344 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +00001345 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +00001347 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1348 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001349 }
1350
Will Deacon45ae7cf2013-06-24 18:31:25 +01001351 /* SCTLR */
Patrick Dalye62d3362016-03-15 18:58:28 -07001352 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_EAE_SBOP;
1353
1354 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1355 !stage1)
1356 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001357 if (stage1)
1358 reg |= SCTLR_S1_ASIDPNE;
1359#ifdef __BIG_ENDIAN
1360 reg |= SCTLR_E;
1361#endif
Will Deacon25724842013-08-21 13:49:53 +01001362 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001363}
1364
Patrick Dalyc190d932016-08-30 17:23:28 -07001365static int arm_smmu_init_asid(struct iommu_domain *domain,
1366 struct arm_smmu_device *smmu)
1367{
1368 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1369 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1370 bool dynamic = is_dynamic_domain(domain);
1371 int ret;
1372
1373 if (!dynamic) {
1374 cfg->asid = cfg->cbndx + 1;
1375 } else {
1376 mutex_lock(&smmu->idr_mutex);
1377 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1378 smmu->num_context_banks + 2,
1379 MAX_ASID + 1, GFP_KERNEL);
1380
1381 mutex_unlock(&smmu->idr_mutex);
1382 if (ret < 0) {
1383 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1384 ret);
1385 return ret;
1386 }
1387 cfg->asid = ret;
1388 }
1389 return 0;
1390}
1391
1392static void arm_smmu_free_asid(struct iommu_domain *domain)
1393{
1394 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1395 struct arm_smmu_device *smmu = smmu_domain->smmu;
1396 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1397 bool dynamic = is_dynamic_domain(domain);
1398
1399 if (cfg->asid == INVALID_ASID || !dynamic)
1400 return;
1401
1402 mutex_lock(&smmu->idr_mutex);
1403 idr_remove(&smmu->asid_idr, cfg->asid);
1404 mutex_unlock(&smmu->idr_mutex);
1405}
1406
Will Deacon45ae7cf2013-06-24 18:31:25 +01001407static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001408 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001409{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001410 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001411 unsigned long ias, oas;
1412 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001413 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001414 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001415 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Patrick Dalyc190d932016-08-30 17:23:28 -07001416 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001417
Will Deacon518f7132014-11-14 17:17:54 +00001418 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001419 if (smmu_domain->smmu)
1420 goto out_unlock;
1421
Patrick Dalyc190d932016-08-30 17:23:28 -07001422 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1423 smmu_domain->cfg.asid = INVALID_ASID;
1424
Robin Murphy98006992016-04-20 14:53:33 +01001425 /* We're bypassing these SIDs, so don't allocate an actual context */
1426 if (domain->type == IOMMU_DOMAIN_DMA) {
1427 smmu_domain->smmu = smmu;
1428 goto out_unlock;
1429 }
1430
Patrick Dalyc190d932016-08-30 17:23:28 -07001431 dynamic = is_dynamic_domain(domain);
1432 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1433 dev_err(smmu->dev, "dynamic domains not supported\n");
1434 ret = -EPERM;
1435 goto out_unlock;
1436 }
1437
Will Deaconc752ce42014-06-25 22:46:31 +01001438 /*
1439 * Mapping the requested stage onto what we support is surprisingly
1440 * complicated, mainly because the spec allows S1+S2 SMMUs without
1441 * support for nested translation. That means we end up with the
1442 * following table:
1443 *
1444 * Requested Supported Actual
1445 * S1 N S1
1446 * S1 S1+S2 S1
1447 * S1 S2 S2
1448 * S1 S1 S1
1449 * N N N
1450 * N S1+S2 S2
1451 * N S2 S2
1452 * N S1 S1
1453 *
1454 * Note that you can't actually request stage-2 mappings.
1455 */
1456 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1457 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1458 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1459 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1460
Robin Murphy7602b872016-04-28 17:12:09 +01001461 /*
1462 * Choosing a suitable context format is even more fiddly. Until we
1463 * grow some way for the caller to express a preference, and/or move
1464 * the decision into the io-pgtable code where it arguably belongs,
1465 * just aim for the closest thing to the rest of the system, and hope
1466 * that the hardware isn't esoteric enough that we can't assume AArch64
1467 * support to be a superset of AArch32 support...
1468 */
1469 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1470 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1471 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1472 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1473 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1474 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1475 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1476
1477 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1478 ret = -EINVAL;
1479 goto out_unlock;
1480 }
1481
Will Deaconc752ce42014-06-25 22:46:31 +01001482 switch (smmu_domain->stage) {
1483 case ARM_SMMU_DOMAIN_S1:
1484 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1485 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001486 ias = smmu->va_size;
1487 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001488 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001489 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001490 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001491 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001492 ias = min(ias, 32UL);
1493 oas = min(oas, 40UL);
1494 }
Will Deaconc752ce42014-06-25 22:46:31 +01001495 break;
1496 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001497 /*
1498 * We will likely want to change this if/when KVM gets
1499 * involved.
1500 */
Will Deaconc752ce42014-06-25 22:46:31 +01001501 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001502 cfg->cbar = CBAR_TYPE_S2_TRANS;
1503 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001504 ias = smmu->ipa_size;
1505 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001506 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001507 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001508 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001509 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001510 ias = min(ias, 40UL);
1511 oas = min(oas, 40UL);
1512 }
Will Deaconc752ce42014-06-25 22:46:31 +01001513 break;
1514 default:
1515 ret = -EINVAL;
1516 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001517 }
1518
Patrick Dalyc190d932016-08-30 17:23:28 -07001519 /* Dynamic domains must set cbndx through domain attribute */
1520 if (!dynamic) {
1521 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001522 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001523 if (ret < 0)
1524 goto out_unlock;
1525 cfg->cbndx = ret;
1526 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001527 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001528 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1529 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001530 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001531 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001532 }
1533
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001534 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001535 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001536 .ias = ias,
1537 .oas = oas,
1538 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001539 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001540 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001541
Will Deacon518f7132014-11-14 17:17:54 +00001542 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001543 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1544 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001545 if (!pgtbl_ops) {
1546 ret = -ENOMEM;
1547 goto out_clear_smmu;
1548 }
1549
Patrick Dalyc11d1082016-09-01 15:52:44 -07001550 /*
1551 * assign any page table memory that might have been allocated
1552 * during alloc_io_pgtable_ops
1553 */
1554 arm_smmu_assign_table(smmu_domain);
1555
Robin Murphyd5466352016-05-09 17:20:09 +01001556 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001557 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001558
Patrick Dalyc190d932016-08-30 17:23:28 -07001559 /* Assign an asid */
1560 ret = arm_smmu_init_asid(domain, smmu);
1561 if (ret)
1562 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001563
Patrick Dalyc190d932016-08-30 17:23:28 -07001564 if (!dynamic) {
1565 /* Initialise the context bank with our page table cfg */
1566 arm_smmu_init_context_bank(smmu_domain,
1567 &smmu_domain->pgtbl_cfg);
1568
1569 /*
1570 * Request context fault interrupt. Do this last to avoid the
1571 * handler seeing a half-initialised domain state.
1572 */
1573 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1574 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001575 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1576 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001577 if (ret < 0) {
1578 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1579 cfg->irptndx, irq);
1580 cfg->irptndx = INVALID_IRPTNDX;
1581 goto out_clear_smmu;
1582 }
1583 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001584 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001585 }
Will Deacon518f7132014-11-14 17:17:54 +00001586 mutex_unlock(&smmu_domain->init_mutex);
1587
1588 /* Publish page table ops for map/unmap */
1589 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001590 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001591
Will Deacon518f7132014-11-14 17:17:54 +00001592out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001593 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001594 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001595out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001596 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001597 return ret;
1598}
1599
1600static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1601{
Joerg Roedel1d672632015-03-26 13:43:10 +01001602 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001603 struct arm_smmu_device *smmu = smmu_domain->smmu;
1604 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001605 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001606 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001607 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001608 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001609
Robin Murphy98006992016-04-20 14:53:33 +01001610 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001611 return;
1612
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001613 ret = arm_smmu_power_on(smmu);
1614 if (ret) {
1615 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1616 smmu);
1617 return;
1618 }
1619
Patrick Dalyc190d932016-08-30 17:23:28 -07001620 dynamic = is_dynamic_domain(domain);
1621 if (dynamic) {
1622 arm_smmu_free_asid(domain);
1623 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001624 arm_smmu_power_off(smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001625 arm_smmu_unassign_table(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001626 return;
1627 }
1628
Will Deacon518f7132014-11-14 17:17:54 +00001629 /*
1630 * Disable the context bank and free the page tables before freeing
1631 * it.
1632 */
Will Deacon44680ee2014-06-25 11:29:12 +01001633 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001634 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001635
Will Deacon44680ee2014-06-25 11:29:12 +01001636 if (cfg->irptndx != INVALID_IRPTNDX) {
1637 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001638 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001639 }
1640
Markus Elfring44830b02015-11-06 18:32:41 +01001641 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001642 arm_smmu_unassign_table(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001643 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001644
1645 arm_smmu_power_off(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001646}
1647
Joerg Roedel1d672632015-03-26 13:43:10 +01001648static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001649{
1650 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001651
Patrick Daly09801312016-08-29 17:02:52 -07001652 /* Do not support DOMAIN_DMA for now */
1653 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001654 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001655 /*
1656 * Allocate the domain and initialise some of its data structures.
1657 * We can't really do anything meaningful until we've added a
1658 * master.
1659 */
1660 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1661 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001662 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001663
Robin Murphy9adb9592016-01-26 18:06:36 +00001664 if (type == IOMMU_DOMAIN_DMA &&
1665 iommu_get_dma_cookie(&smmu_domain->domain)) {
1666 kfree(smmu_domain);
1667 return NULL;
1668 }
1669
Will Deacon518f7132014-11-14 17:17:54 +00001670 mutex_init(&smmu_domain->init_mutex);
1671 spin_lock_init(&smmu_domain->pgtbl_lock);
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06001672 smmu_domain->cfg.cbndx = INVALID_CBNDX;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001673 smmu_domain->secure_vmid = VMID_INVAL;
1674 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1675 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Joerg Roedel1d672632015-03-26 13:43:10 +01001676
1677 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001678}
1679
Joerg Roedel1d672632015-03-26 13:43:10 +01001680static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001681{
Joerg Roedel1d672632015-03-26 13:43:10 +01001682 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001683
1684 /*
1685 * Free the domain resources. We assume that all devices have
1686 * already been detached.
1687 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001688 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001689 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001690 kfree(smmu_domain);
1691}
1692
1693static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001694 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001695{
1696 int i;
1697 struct arm_smmu_smr *smrs;
1698 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1699
1700 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1701 return 0;
1702
Will Deacona9a1b0b2014-05-01 18:05:08 +01001703 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001704 return -EEXIST;
1705
Mitchel Humpherys29073202014-07-08 09:52:18 -07001706 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001707 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001708 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1709 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001710 return -ENOMEM;
1711 }
1712
Will Deacon44680ee2014-06-25 11:29:12 +01001713 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001714 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001715 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1716 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001717 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001718 dev_err(smmu->dev, "failed to allocate free SMR\n");
1719 goto err_free_smrs;
1720 }
1721
1722 smrs[i] = (struct arm_smmu_smr) {
1723 .idx = idx,
1724 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001725 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001726 };
1727 }
1728
1729 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001730 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001731 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1732 smrs[i].mask << SMR_MASK_SHIFT;
1733 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1734 }
1735
Will Deacona9a1b0b2014-05-01 18:05:08 +01001736 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001737 return 0;
1738
1739err_free_smrs:
1740 while (--i >= 0)
1741 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1742 kfree(smrs);
1743 return -ENOSPC;
1744}
1745
1746static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001747 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001748{
1749 int i;
1750 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001751 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752
Will Deacon43b412b2014-07-15 11:22:24 +01001753 if (!smrs)
1754 return;
1755
Will Deacon45ae7cf2013-06-24 18:31:25 +01001756 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001757 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001758 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001759
Will Deacon45ae7cf2013-06-24 18:31:25 +01001760 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1761 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1762 }
1763
Will Deacona9a1b0b2014-05-01 18:05:08 +01001764 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001765 kfree(smrs);
1766}
1767
Will Deacon45ae7cf2013-06-24 18:31:25 +01001768static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001769 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001770{
1771 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001772 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001773 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1774
Will Deacon5f634952016-04-20 14:53:32 +01001775 /*
1776 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1777 * for all devices behind the SMMU. Note that we need to take
1778 * care configuring SMRs for devices both a platform_device and
1779 * and a PCI device (i.e. a PCI host controller)
1780 */
1781 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1782 return 0;
1783
Will Deacon8f68f8e2014-07-15 11:27:08 +01001784 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001785 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001786 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001787 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001788
Will Deacona9a1b0b2014-05-01 18:05:08 +01001789 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001790 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001791
Will Deacona9a1b0b2014-05-01 18:05:08 +01001792 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Patrick Dalyf4930442016-06-27 20:50:14 -07001793 s2cr = S2CR_TYPE_TRANS |
Will Deacon44680ee2014-06-25 11:29:12 +01001794 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001795 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1796 }
1797
1798 return 0;
1799}
1800
1801static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001802 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001803{
Will Deacon43b412b2014-07-15 11:22:24 +01001804 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001805 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001806 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001807
Will Deacon8f68f8e2014-07-15 11:27:08 +01001808 /* An IOMMU group is torn down by the first device to be removed */
1809 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1810 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001811
1812 /*
1813 * We *must* clear the S2CR first, because freeing the SMR means
1814 * that it can be re-allocated immediately.
1815 */
Will Deacon43b412b2014-07-15 11:22:24 +01001816 for (i = 0; i < cfg->num_streamids; ++i) {
1817 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001818 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001819
Robin Murphy25a1c962016-02-10 14:25:33 +00001820 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001821 }
1822
Will Deacona9a1b0b2014-05-01 18:05:08 +01001823 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001824}
1825
Patrick Daly09801312016-08-29 17:02:52 -07001826static void arm_smmu_detach_dev(struct iommu_domain *domain,
1827 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001828{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001829 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001830 struct arm_smmu_device *smmu = smmu_domain->smmu;
1831 struct arm_smmu_master_cfg *cfg;
1832 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07001833 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07001834
1835 if (dynamic)
1836 return;
1837
1838 cfg = find_smmu_master_cfg(dev);
1839 if (!cfg)
1840 return;
1841
1842 if (!smmu) {
1843 dev_err(dev, "Domain not attached; cannot detach!\n");
1844 return;
1845 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001846
1847 dev->archdata.iommu = NULL;
1848 arm_smmu_domain_remove_master(smmu_domain, cfg);
Patrick Daly8befb662016-08-17 20:03:28 -07001849
1850 /* Remove additional vote for atomic power */
1851 if (atomic_domain) {
1852 WARN_ON(arm_smmu_enable_clocks_atomic(smmu));
1853 arm_smmu_power_off(smmu);
1854 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001855}
1856
Patrick Dalyc11d1082016-09-01 15:52:44 -07001857static void arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
1858{
1859 int ret;
1860 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
1861 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
1862 int source_vmid = VMID_HLOS;
1863 struct arm_smmu_pte_info *pte_info, *temp;
1864
1865 if (smmu_domain->secure_vmid == VMID_INVAL)
1866 return;
1867
1868 list_for_each_entry(pte_info, &smmu_domain->pte_info_list,
1869 entry) {
1870 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
1871 PAGE_SIZE, &source_vmid, 1,
1872 dest_vmids, dest_perms, 2);
1873 if (WARN_ON(ret))
1874 break;
1875 }
1876
1877 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
1878 entry) {
1879 list_del(&pte_info->entry);
1880 kfree(pte_info);
1881 }
1882}
1883
1884static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
1885{
1886 int ret;
1887 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07001888 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001889 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
1890 struct arm_smmu_pte_info *pte_info, *temp;
1891
1892 if (smmu_domain->secure_vmid == VMID_INVAL)
1893 return;
1894
1895 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
1896 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
1897 PAGE_SIZE, source_vmlist, 2,
1898 &dest_vmids, &dest_perms, 1);
1899 if (WARN_ON(ret))
1900 break;
1901 free_pages_exact(pte_info->virt_addr, pte_info->size);
1902 }
1903
1904 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
1905 entry) {
1906 list_del(&pte_info->entry);
1907 kfree(pte_info);
1908 }
1909}
1910
1911static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
1912{
1913 struct arm_smmu_domain *smmu_domain = cookie;
1914 struct arm_smmu_pte_info *pte_info;
1915
1916 if (smmu_domain->secure_vmid == VMID_INVAL) {
1917 free_pages_exact(addr, size);
1918 return;
1919 }
1920
1921 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
1922 if (!pte_info)
1923 return;
1924
1925 pte_info->virt_addr = addr;
1926 pte_info->size = size;
1927 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
1928}
1929
1930static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
1931{
1932 struct arm_smmu_domain *smmu_domain = cookie;
1933 struct arm_smmu_pte_info *pte_info;
1934
1935 if (smmu_domain->secure_vmid == VMID_INVAL)
1936 return -EINVAL;
1937
1938 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
1939 if (!pte_info)
1940 return -ENOMEM;
1941 pte_info->virt_addr = addr;
1942 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
1943 return 0;
1944}
1945
Will Deacon45ae7cf2013-06-24 18:31:25 +01001946static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1947{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001948 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001949 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001950 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001951 struct arm_smmu_master_cfg *cfg;
Patrick Daly8befb662016-08-17 20:03:28 -07001952 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001953
Will Deacon8f68f8e2014-07-15 11:27:08 +01001954 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001955 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001956 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1957 return -ENXIO;
1958 }
1959
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001960 /* Enable Clocks and Power */
1961 ret = arm_smmu_power_on(smmu);
1962 if (ret)
1963 return ret;
1964
Patrick Daly8befb662016-08-17 20:03:28 -07001965 /*
1966 * Keep an additional vote for non-atomic power until domain is
1967 * detached
1968 */
1969 if (atomic_domain) {
1970 ret = arm_smmu_power_on(smmu);
1971 if (ret)
1972 goto out_power_off;
1973
1974 arm_smmu_disable_clocks_atomic(smmu);
1975 }
1976
Will Deacon518f7132014-11-14 17:17:54 +00001977 /* Ensure that the domain is finalised */
1978 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001979 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001980 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00001981
Patrick Dalyc190d932016-08-30 17:23:28 -07001982 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001983 if (is_dynamic_domain(domain)) {
1984 ret = 0;
1985 goto out_power_off;
1986 }
Patrick Dalyc190d932016-08-30 17:23:28 -07001987
Will Deacon45ae7cf2013-06-24 18:31:25 +01001988 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001989 * Sanity check the domain. We don't support domains across
1990 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001991 */
Will Deacon518f7132014-11-14 17:17:54 +00001992 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001993 dev_err(dev,
1994 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001995 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001996 ret = -EINVAL;
1997 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001998 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001999
2000 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01002001 cfg = find_smmu_master_cfg(dev);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002002 if (!cfg) {
2003 ret = -ENODEV;
2004 goto out_power_off;
2005 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002006
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002007 /* Detach the dev from its current domain */
2008 if (dev->archdata.iommu)
Patrick Daly09801312016-08-29 17:02:52 -07002009 arm_smmu_detach_dev(dev->archdata.iommu, dev);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002010
Will Deacon844e35b2014-07-17 11:23:51 +01002011 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
2012 if (!ret)
2013 dev->archdata.iommu = domain;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002014
2015out_power_off:
2016 arm_smmu_power_off(smmu);
2017
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018 return ret;
2019}
2020
Will Deacon45ae7cf2013-06-24 18:31:25 +01002021static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002022 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002023{
Will Deacon518f7132014-11-14 17:17:54 +00002024 int ret;
2025 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002026 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002027 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002028
Will Deacon518f7132014-11-14 17:17:54 +00002029 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002030 return -ENODEV;
2031
Will Deacon518f7132014-11-14 17:17:54 +00002032 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2033 ret = ops->map(ops, iova, paddr, size, prot);
2034 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002035
2036 arm_smmu_assign_table(smmu_domain);
2037
Will Deacon518f7132014-11-14 17:17:54 +00002038 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002039}
2040
2041static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2042 size_t size)
2043{
Will Deacon518f7132014-11-14 17:17:54 +00002044 size_t ret;
2045 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002046 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002047 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002048
Will Deacon518f7132014-11-14 17:17:54 +00002049 if (!ops)
2050 return 0;
2051
Patrick Daly8befb662016-08-17 20:03:28 -07002052 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002053 if (ret)
2054 return ret;
2055
Will Deacon518f7132014-11-14 17:17:54 +00002056 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2057 ret = ops->unmap(ops, iova, size);
2058 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002059
Patrick Daly8befb662016-08-17 20:03:28 -07002060 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002061 /*
2062 * While splitting up block mappings, we might allocate page table
2063 * memory during unmap, so the vmids needs to be assigned to the
2064 * memory here as well.
2065 */
2066 arm_smmu_assign_table(smmu_domain);
2067 /* Also unassign any pages that were free'd during unmap */
2068 arm_smmu_unassign_table(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002069 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002070}
2071
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002072static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2073 struct scatterlist *sg, unsigned int nents, int prot)
2074{
2075 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002076 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002077 unsigned long flags;
2078 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2079 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2080
2081 if (!ops)
2082 return -ENODEV;
2083
Patrick Daly8befb662016-08-17 20:03:28 -07002084 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002085 if (ret)
2086 return ret;
2087
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002088 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002089 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002090 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002091
2092 if (!ret)
2093 arm_smmu_unmap(domain, iova, size);
2094
Patrick Daly8befb662016-08-17 20:03:28 -07002095 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002096 arm_smmu_assign_table(smmu_domain);
2097
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002098 return ret;
2099}
2100
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002101static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002102 dma_addr_t iova, bool do_halt)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002103{
Joerg Roedel1d672632015-03-26 13:43:10 +01002104 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002105 struct arm_smmu_device *smmu = smmu_domain->smmu;
2106 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2107 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2108 struct device *dev = smmu->dev;
2109 void __iomem *cb_base;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002110 unsigned long flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002111 u32 tmp;
2112 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002113 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002114
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002115 spin_lock_irqsave(&smmu->atos_lock, flags);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002116 if (do_halt && arm_smmu_halt(smmu)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002117 phys = 0;
2118 goto out_unlock;
2119 }
2120
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002121 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2122
Robin Murphy661d9622015-05-27 17:09:34 +01002123 /* ATS1 registers can only be written atomically */
2124 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002125 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002126 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2127 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002128 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002129
2130 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2131 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002132 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002133 dev_err(dev,
2134 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2135 &iova, &phys);
2136 phys = 0;
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002137 goto out_resume;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002138 }
2139
Robin Murphyf9a05f02016-04-13 18:13:01 +01002140 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002141 if (phys & CB_PAR_F) {
2142 dev_err(dev, "translation fault!\n");
2143 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002144 phys = 0;
2145 } else {
2146 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002147 }
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002148out_resume:
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002149 if (do_halt)
2150 arm_smmu_resume(smmu);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002151out_unlock:
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08002152 spin_unlock_irqrestore(&smmu->atos_lock, flags);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002153 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002154}
2155
Will Deacon45ae7cf2013-06-24 18:31:25 +01002156static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002157 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002158{
Will Deacon518f7132014-11-14 17:17:54 +00002159 phys_addr_t ret;
2160 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002161 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002162 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002163
Will Deacon518f7132014-11-14 17:17:54 +00002164 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002165 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002166
Will Deacon518f7132014-11-14 17:17:54 +00002167 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002168 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002169 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002170
Will Deacon518f7132014-11-14 17:17:54 +00002171 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002172}
2173
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002174/*
2175 * This function can sleep, and cannot be called from atomic context. Will
2176 * power on register block if required. This restriction does not apply to the
2177 * original iova_to_phys() op.
2178 */
2179static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2180 dma_addr_t iova)
2181{
2182 phys_addr_t ret = 0;
2183 unsigned long flags;
2184 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002185 int err;
2186
2187 err = arm_smmu_power_on(smmu_domain->smmu);
2188 if (err)
2189 return 0;
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002190
2191 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2192 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2193 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002194 ret = __arm_smmu_iova_to_phys_hard(domain, iova, true);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002195
2196 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2197
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002198 arm_smmu_power_off(smmu_domain->smmu);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002199 return ret;
2200}
2201
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002202static phys_addr_t arm_smmu_iova_to_phys_hard_no_halt(
2203 struct iommu_domain *domain, dma_addr_t iova)
2204{
2205 return __arm_smmu_iova_to_phys_hard(domain, iova, false);
2206}
2207
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002208static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002209{
Will Deacond0948942014-06-24 17:30:10 +01002210 switch (cap) {
2211 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002212 /*
2213 * Return true here as the SMMU can always send out coherent
2214 * requests.
2215 */
2216 return true;
Will Deacond0948942014-06-24 17:30:10 +01002217 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002218 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002219 case IOMMU_CAP_NOEXEC:
2220 return true;
Will Deacond0948942014-06-24 17:30:10 +01002221 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002222 return false;
Will Deacond0948942014-06-24 17:30:10 +01002223 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002224}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002225
Will Deacona9a1b0b2014-05-01 18:05:08 +01002226static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
2227{
2228 *((u16 *)data) = alias;
2229 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002230}
2231
Will Deacon8f68f8e2014-07-15 11:27:08 +01002232static void __arm_smmu_release_pci_iommudata(void *data)
2233{
2234 kfree(data);
2235}
2236
Joerg Roedelaf659932015-10-21 23:51:41 +02002237static int arm_smmu_init_pci_device(struct pci_dev *pdev,
2238 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002239{
Will Deacon03edb222015-01-19 14:27:33 +00002240 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02002241 u16 sid;
2242 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002243
Will Deacon03edb222015-01-19 14:27:33 +00002244 cfg = iommu_group_get_iommudata(group);
2245 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01002246 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002247 if (!cfg)
2248 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002249
Will Deacon03edb222015-01-19 14:27:33 +00002250 iommu_group_set_iommudata(group, cfg,
2251 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01002252 }
2253
Joerg Roedelaf659932015-10-21 23:51:41 +02002254 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
2255 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002256
Will Deacon03edb222015-01-19 14:27:33 +00002257 /*
2258 * Assume Stream ID == Requester ID for now.
2259 * We need a way to describe the ID mappings in FDT.
2260 */
2261 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
2262 for (i = 0; i < cfg->num_streamids; ++i)
2263 if (cfg->streamids[i] == sid)
2264 break;
2265
2266 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
2267 if (i == cfg->num_streamids)
2268 cfg->streamids[cfg->num_streamids++] = sid;
2269
2270 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002271}
2272
Joerg Roedelaf659932015-10-21 23:51:41 +02002273static int arm_smmu_init_platform_device(struct device *dev,
2274 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00002275{
Will Deacon03edb222015-01-19 14:27:33 +00002276 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002277 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00002278
2279 if (!smmu)
2280 return -ENODEV;
2281
2282 master = find_smmu_master(smmu, dev->of_node);
2283 if (!master)
2284 return -ENODEV;
2285
Will Deacon03edb222015-01-19 14:27:33 +00002286 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002287
2288 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002289}
2290
2291static int arm_smmu_add_device(struct device *dev)
2292{
Joerg Roedelaf659932015-10-21 23:51:41 +02002293 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00002294
Joerg Roedelaf659932015-10-21 23:51:41 +02002295 group = iommu_group_get_for_dev(dev);
2296 if (IS_ERR(group))
2297 return PTR_ERR(group);
2298
Peng Fan9a4a9d82015-11-20 16:56:18 +08002299 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02002300 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002301}
2302
Will Deacon45ae7cf2013-06-24 18:31:25 +01002303static void arm_smmu_remove_device(struct device *dev)
2304{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002305 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002306}
2307
Joerg Roedelaf659932015-10-21 23:51:41 +02002308static struct iommu_group *arm_smmu_device_group(struct device *dev)
2309{
2310 struct iommu_group *group;
2311 int ret;
2312
2313 if (dev_is_pci(dev))
2314 group = pci_device_group(dev);
2315 else
2316 group = generic_device_group(dev);
2317
2318 if (IS_ERR(group))
2319 return group;
2320
2321 if (dev_is_pci(dev))
2322 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
2323 else
2324 ret = arm_smmu_init_platform_device(dev, group);
2325
2326 if (ret) {
2327 iommu_group_put(group);
2328 group = ERR_PTR(ret);
2329 }
2330
2331 return group;
2332}
2333
Will Deaconc752ce42014-06-25 22:46:31 +01002334static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2335 enum iommu_attr attr, void *data)
2336{
Joerg Roedel1d672632015-03-26 13:43:10 +01002337 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002338 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002339
2340 switch (attr) {
2341 case DOMAIN_ATTR_NESTING:
2342 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2343 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002344 case DOMAIN_ATTR_PT_BASE_ADDR:
2345 *((phys_addr_t *)data) =
2346 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2347 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002348 case DOMAIN_ATTR_CONTEXT_BANK:
2349 /* context bank index isn't valid until we are attached */
2350 if (smmu_domain->smmu == NULL)
2351 return -ENODEV;
2352
2353 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2354 ret = 0;
2355 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002356 case DOMAIN_ATTR_TTBR0: {
2357 u64 val;
2358 struct arm_smmu_device *smmu = smmu_domain->smmu;
2359 /* not valid until we are attached */
2360 if (smmu == NULL)
2361 return -ENODEV;
2362
2363 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2364 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2365 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2366 << (TTBRn_ASID_SHIFT);
2367 *((u64 *)data) = val;
2368 ret = 0;
2369 break;
2370 }
2371 case DOMAIN_ATTR_CONTEXTIDR:
2372 /* not valid until attached */
2373 if (smmu_domain->smmu == NULL)
2374 return -ENODEV;
2375 *((u32 *)data) = smmu_domain->cfg.procid;
2376 ret = 0;
2377 break;
2378 case DOMAIN_ATTR_PROCID:
2379 *((u32 *)data) = smmu_domain->cfg.procid;
2380 ret = 0;
2381 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002382 case DOMAIN_ATTR_DYNAMIC:
2383 *((int *)data) = !!(smmu_domain->attributes
2384 & (1 << DOMAIN_ATTR_DYNAMIC));
2385 ret = 0;
2386 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002387 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2388 *((int *)data) = !!(smmu_domain->attributes
2389 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2390 ret = 0;
2391 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002392 case DOMAIN_ATTR_S1_BYPASS:
2393 *((int *)data) = !!(smmu_domain->attributes
2394 & (1 << DOMAIN_ATTR_S1_BYPASS));
2395 ret = 0;
2396 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002397 case DOMAIN_ATTR_SECURE_VMID:
2398 *((int *)data) = smmu_domain->secure_vmid;
2399 ret = 0;
2400 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002401 default:
2402 return -ENODEV;
2403 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002404 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002405}
2406
2407static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2408 enum iommu_attr attr, void *data)
2409{
Will Deacon518f7132014-11-14 17:17:54 +00002410 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002411 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002412
Will Deacon518f7132014-11-14 17:17:54 +00002413 mutex_lock(&smmu_domain->init_mutex);
2414
Will Deaconc752ce42014-06-25 22:46:31 +01002415 switch (attr) {
2416 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002417 if (smmu_domain->smmu) {
2418 ret = -EPERM;
2419 goto out_unlock;
2420 }
2421
Will Deaconc752ce42014-06-25 22:46:31 +01002422 if (*(int *)data)
2423 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2424 else
2425 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2426
Will Deacon518f7132014-11-14 17:17:54 +00002427 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002428 case DOMAIN_ATTR_PROCID:
2429 if (smmu_domain->smmu != NULL) {
2430 dev_err(smmu_domain->smmu->dev,
2431 "cannot change procid attribute while attached\n");
2432 ret = -EBUSY;
2433 break;
2434 }
2435 smmu_domain->cfg.procid = *((u32 *)data);
2436 ret = 0;
2437 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002438 case DOMAIN_ATTR_DYNAMIC: {
2439 int dynamic = *((int *)data);
2440
2441 if (smmu_domain->smmu != NULL) {
2442 dev_err(smmu_domain->smmu->dev,
2443 "cannot change dynamic attribute while attached\n");
2444 ret = -EBUSY;
2445 break;
2446 }
2447
2448 if (dynamic)
2449 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2450 else
2451 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2452 ret = 0;
2453 break;
2454 }
2455 case DOMAIN_ATTR_CONTEXT_BANK:
2456 /* context bank can't be set while attached */
2457 if (smmu_domain->smmu != NULL) {
2458 ret = -EBUSY;
2459 break;
2460 }
2461 /* ... and it can only be set for dynamic contexts. */
2462 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2463 ret = -EINVAL;
2464 break;
2465 }
2466
2467 /* this will be validated during attach */
2468 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2469 ret = 0;
2470 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002471 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2472 u32 non_fatal_faults = *((int *)data);
2473
2474 if (non_fatal_faults)
2475 smmu_domain->attributes |=
2476 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2477 else
2478 smmu_domain->attributes &=
2479 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2480 ret = 0;
2481 break;
2482 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002483 case DOMAIN_ATTR_S1_BYPASS: {
2484 int bypass = *((int *)data);
2485
2486 /* bypass can't be changed while attached */
2487 if (smmu_domain->smmu != NULL) {
2488 ret = -EBUSY;
2489 break;
2490 }
2491 if (bypass)
2492 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2493 else
2494 smmu_domain->attributes &=
2495 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2496
2497 ret = 0;
2498 break;
2499 }
Patrick Daly8befb662016-08-17 20:03:28 -07002500 case DOMAIN_ATTR_ATOMIC:
2501 {
2502 int atomic_ctx = *((int *)data);
2503
2504 /* can't be changed while attached */
2505 if (smmu_domain->smmu != NULL) {
2506 ret = -EBUSY;
2507 break;
2508 }
2509 if (atomic_ctx)
2510 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2511 else
2512 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2513 break;
2514 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002515 case DOMAIN_ATTR_SECURE_VMID:
2516 if (smmu_domain->secure_vmid != VMID_INVAL) {
2517 ret = -ENODEV;
2518 WARN(1, "secure vmid already set!");
2519 break;
2520 }
2521 smmu_domain->secure_vmid = *((int *)data);
2522 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002523 default:
Will Deacon518f7132014-11-14 17:17:54 +00002524 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002525 }
Will Deacon518f7132014-11-14 17:17:54 +00002526
2527out_unlock:
2528 mutex_unlock(&smmu_domain->init_mutex);
2529 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002530}
2531
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002532static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2533 unsigned long flags)
2534{
2535 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2536 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2537 struct arm_smmu_device *smmu;
2538 void __iomem *cb_base;
2539
2540 if (!smmu_domain->smmu) {
2541 pr_err("Can't trigger faults on non-attached domains\n");
2542 return;
2543 }
2544
2545 smmu = smmu_domain->smmu;
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002546 if (arm_smmu_power_on(smmu))
2547 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002548
2549 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2550 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2551 flags, cfg->cbndx);
2552 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002553 /* give the interrupt time to fire... */
2554 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002555
2556 arm_smmu_power_off(smmu);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002557}
2558
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002559static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2560 unsigned long offset)
2561{
2562 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2563 struct arm_smmu_device *smmu;
2564 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2565 void __iomem *cb_base;
2566 unsigned long val;
2567
2568 if (offset >= SZ_4K) {
2569 pr_err("Invalid offset: 0x%lx\n", offset);
2570 return 0;
2571 }
2572
2573 smmu = smmu_domain->smmu;
2574 if (!smmu) {
2575 WARN(1, "Can't read registers of a detached domain\n");
2576 val = 0;
2577 return val;
2578 }
2579
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002580 if (arm_smmu_power_on(smmu))
2581 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002582
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002583 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2584 val = readl_relaxed(cb_base + offset);
2585
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002586 arm_smmu_power_off(smmu);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002587 return val;
2588}
2589
2590static void arm_smmu_reg_write(struct iommu_domain *domain,
2591 unsigned long offset, unsigned long val)
2592{
2593 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2594 struct arm_smmu_device *smmu;
2595 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2596 void __iomem *cb_base;
2597
2598 if (offset >= SZ_4K) {
2599 pr_err("Invalid offset: 0x%lx\n", offset);
2600 return;
2601 }
2602
2603 smmu = smmu_domain->smmu;
2604 if (!smmu) {
2605 WARN(1, "Can't read registers of a detached domain\n");
2606 return;
2607 }
2608
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002609 if (arm_smmu_power_on(smmu))
2610 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002611
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002612 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2613 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002614
2615 arm_smmu_power_off(smmu);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002616}
2617
Will Deacon518f7132014-11-14 17:17:54 +00002618static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002619 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002620 .domain_alloc = arm_smmu_domain_alloc,
2621 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002622 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002623 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002624 .map = arm_smmu_map,
2625 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002626 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002627 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002628 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002629 .add_device = arm_smmu_add_device,
2630 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002631 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002632 .domain_get_attr = arm_smmu_domain_get_attr,
2633 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00002634 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002635 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002636 .reg_read = arm_smmu_reg_read,
2637 .reg_write = arm_smmu_reg_write,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002638};
2639
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002640static int arm_smmu_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002641{
2642 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002643 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002644
2645 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2646 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2647 0, 30000)) {
2648 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2649 return -EBUSY;
2650 }
2651
2652 return 0;
2653}
2654
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002655static int __arm_smmu_halt(struct arm_smmu_device *smmu, bool wait)
2656{
2657 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2658 u32 reg;
2659
2660 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2661 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2662 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2663
2664 return wait ? arm_smmu_wait_for_halt(smmu) : 0;
2665}
2666
2667static int arm_smmu_halt(struct arm_smmu_device *smmu)
2668{
2669 return __arm_smmu_halt(smmu, true);
2670}
2671
2672static int arm_smmu_halt_nowait(struct arm_smmu_device *smmu)
2673{
2674 return __arm_smmu_halt(smmu, false);
2675}
2676
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002677static void arm_smmu_resume(struct arm_smmu_device *smmu)
2678{
2679 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2680 u32 reg;
2681
2682 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2683 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2684 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2685}
2686
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002687static void arm_smmu_impl_def_programming(struct arm_smmu_device *smmu)
2688{
2689 int i;
2690 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
2691
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002692 arm_smmu_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002693 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2694 writel_relaxed(regs[i].value,
2695 ARM_SMMU_GR0(smmu) + regs[i].offset);
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002696 arm_smmu_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002697}
2698
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002699static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002700{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002701 int i;
2702 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002703 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002704 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002705
Peng Fan3ca37122016-05-03 21:50:30 +08002706 /*
2707 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
2708 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
2709 * bit is only present in MMU-500r2 onwards.
2710 */
2711 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
2712 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
2713 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
2714 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
2715 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
2716 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
2717 }
2718
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002719 /* Make sure all context banks are disabled and clear CB_FSR */
2720 for (i = 0; i < smmu->num_context_banks; ++i) {
2721 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2722 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
2723 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002724 /*
2725 * Disable MMU-500's not-particularly-beneficial next-page
2726 * prefetcher for the sake of errata #841119 and #826419.
2727 */
2728 if (smmu->model == ARM_MMU500) {
2729 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
2730 reg &= ~ARM_MMU500_ACTLR_CPRE;
2731 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2732 }
Patrick Dalyf0d4e212016-06-20 15:50:14 -07002733
2734 if (smmu->model == QCOM_SMMUV2) {
2735 reg = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2736 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2737 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2738 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
2739 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002740 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08002741}
2742
2743static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
2744{
2745 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
2746 int i = 0;
2747 u32 reg;
2748
2749 /* clear global FSR */
2750 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2751 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
2752
2753 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
2754 /*
2755 * Mark all SMRn as invalid and all S2CRn as bypass unless
2756 * overridden
2757 */
2758 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
2759 for (i = 0; i < smmu->num_mapping_groups; ++i) {
2760 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
2761 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
2762 }
2763
2764 arm_smmu_context_bank_reset(smmu);
2765 }
Will Deacon1463fe42013-07-31 19:21:27 +01002766
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002767 /* Program implementation defined registers */
2768 arm_smmu_impl_def_programming(smmu);
2769
Will Deacon45ae7cf2013-06-24 18:31:25 +01002770 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002771 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
2772 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
2773
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002774 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002775
Will Deacon45ae7cf2013-06-24 18:31:25 +01002776 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002777 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002778
2779 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002780 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002781
Robin Murphy25a1c962016-02-10 14:25:33 +00002782 /* Enable client access, handling unmatched streams as appropriate */
2783 reg &= ~sCR0_CLIENTPD;
2784 if (disable_bypass)
2785 reg |= sCR0_USFCFG;
2786 else
2787 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002788
2789 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002790 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002791
2792 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01002793 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002794
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08002795 if (smmu->features & ARM_SMMU_FEAT_VMID16)
2796 reg |= sCR0_VMID16EN;
2797
Will Deacon45ae7cf2013-06-24 18:31:25 +01002798 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00002799 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002800 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002801}
2802
2803static int arm_smmu_id_size_to_bits(int size)
2804{
2805 switch (size) {
2806 case 0:
2807 return 32;
2808 case 1:
2809 return 36;
2810 case 2:
2811 return 40;
2812 case 3:
2813 return 42;
2814 case 4:
2815 return 44;
2816 case 5:
2817 default:
2818 return 48;
2819 }
2820}
2821
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002822static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
2823{
2824 struct device *dev = smmu->dev;
2825 int i, ntuples, ret;
2826 u32 *tuples;
2827 struct arm_smmu_impl_def_reg *regs, *regit;
2828
2829 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
2830 return 0;
2831
2832 ntuples /= sizeof(u32);
2833 if (ntuples % 2) {
2834 dev_err(dev,
2835 "Invalid number of attach-impl-defs registers: %d\n",
2836 ntuples);
2837 return -EINVAL;
2838 }
2839
2840 regs = devm_kmalloc(
2841 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
2842 GFP_KERNEL);
2843 if (!regs)
2844 return -ENOMEM;
2845
2846 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
2847 if (!tuples)
2848 return -ENOMEM;
2849
2850 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
2851 tuples, ntuples);
2852 if (ret)
2853 return ret;
2854
2855 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
2856 regit->offset = tuples[i];
2857 regit->value = tuples[i + 1];
2858 }
2859
2860 devm_kfree(dev, tuples);
2861
2862 smmu->impl_def_attach_registers = regs;
2863 smmu->num_impl_def_attach_registers = ntuples / 2;
2864
2865 return 0;
2866}
2867
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002868static int arm_smmu_init_clocks(struct arm_smmu_device *smmu)
2869{
2870 const char *cname;
2871 struct property *prop;
2872 int i;
2873 struct device *dev = smmu->dev;
2874
2875 smmu->num_clocks =
2876 of_property_count_strings(dev->of_node, "clock-names");
2877
2878 if (smmu->num_clocks < 1)
2879 return 0;
2880
2881 smmu->clocks = devm_kzalloc(
2882 dev, sizeof(*smmu->clocks) * smmu->num_clocks,
2883 GFP_KERNEL);
2884
2885 if (!smmu->clocks) {
2886 dev_err(dev,
2887 "Failed to allocate memory for clocks\n");
2888 return -ENODEV;
2889 }
2890
2891 i = 0;
2892 of_property_for_each_string(dev->of_node, "clock-names",
2893 prop, cname) {
2894 struct clk *c = devm_clk_get(dev, cname);
2895
2896 if (IS_ERR(c)) {
2897 dev_err(dev, "Couldn't get clock: %s",
2898 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07002899 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002900 }
2901
2902 if (clk_get_rate(c) == 0) {
2903 long rate = clk_round_rate(c, 1000);
2904
2905 clk_set_rate(c, rate);
2906 }
2907
2908 smmu->clocks[i] = c;
2909
2910 ++i;
2911 }
2912 return 0;
2913}
2914
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07002915static int arm_smmu_init_regulators(struct arm_smmu_device *smmu)
2916{
2917 struct device *dev = smmu->dev;
2918
2919 if (!of_get_property(dev->of_node, "vdd-supply", NULL))
2920 return 0;
2921
2922 smmu->gdsc = devm_regulator_get(dev, "vdd");
2923 if (IS_ERR(smmu->gdsc))
2924 return PTR_ERR(smmu->gdsc);
2925
2926 return 0;
2927}
2928
Patrick Daly2764f952016-09-06 19:22:44 -07002929static int arm_smmu_init_bus_scaling(struct platform_device *pdev,
2930 struct arm_smmu_device *smmu)
2931{
2932 u32 master_id;
2933
2934 if (of_property_read_u32(pdev->dev.of_node, "qcom,bus-master-id",
2935 &master_id)) {
2936 dev_dbg(smmu->dev, "No bus scaling info\n");
2937 return 0;
2938 }
2939
2940 smmu->bus_client_name = devm_kasprintf(
2941 smmu->dev, GFP_KERNEL, "smmu-bus-client-%s",
2942 dev_name(smmu->dev));
2943
2944 if (!smmu->bus_client_name)
2945 return -ENOMEM;
2946
2947 smmu->bus_client = msm_bus_scale_register(
2948 master_id, MSM_BUS_SLAVE_EBI_CH0, smmu->bus_client_name, true);
2949 if (IS_ERR(&smmu->bus_client)) {
2950 int ret = PTR_ERR(smmu->bus_client);
2951
2952 if (ret != -EPROBE_DEFER)
2953 dev_err(smmu->dev, "Bus client registration failed\n");
2954 return ret;
2955 }
2956
2957 return 0;
2958}
2959
Will Deacon45ae7cf2013-06-24 18:31:25 +01002960static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
2961{
2962 unsigned long size;
2963 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
2964 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01002965 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002966
2967 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01002968 dev_notice(smmu->dev, "SMMUv%d with:\n",
2969 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002970
2971 /* ID0 */
2972 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01002973
2974 /* Restrict available stages based on module parameter */
2975 if (force_stage == 1)
2976 id &= ~(ID0_S2TS | ID0_NTS);
2977 else if (force_stage == 2)
2978 id &= ~(ID0_S1TS | ID0_NTS);
2979
Will Deacon45ae7cf2013-06-24 18:31:25 +01002980 if (id & ID0_S1TS) {
2981 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2982 dev_notice(smmu->dev, "\tstage 1 translation\n");
2983 }
2984
2985 if (id & ID0_S2TS) {
2986 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2987 dev_notice(smmu->dev, "\tstage 2 translation\n");
2988 }
2989
2990 if (id & ID0_NTS) {
2991 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
2992 dev_notice(smmu->dev, "\tnested translation\n");
2993 }
2994
2995 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01002996 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002997 dev_err(smmu->dev, "\tno translation support!\n");
2998 return -ENODEV;
2999 }
3000
Robin Murphyb7862e32016-04-13 18:13:03 +01003001 if ((id & ID0_S1TS) &&
3002 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003003 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
3004 dev_notice(smmu->dev, "\taddress translation ops\n");
3005 }
3006
Robin Murphybae2c2d2015-07-29 19:46:05 +01003007 /*
3008 * In order for DMA API calls to work properly, we must defer to what
3009 * the DT says about coherency, regardless of what the hardware claims.
3010 * Fortunately, this also opens up a workaround for systems where the
3011 * ID register value has ended up configured incorrectly.
3012 */
3013 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3014 cttw_reg = !!(id & ID0_CTTW);
3015 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003016 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003017 if (cttw_dt || cttw_reg)
3018 dev_notice(smmu->dev, "\t%scoherent table walk\n",
3019 cttw_dt ? "" : "non-");
3020 if (cttw_dt != cttw_reg)
3021 dev_notice(smmu->dev,
3022 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003023
3024 if (id & ID0_SMS) {
3025 u32 smr, sid, mask;
3026
3027 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
3028 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
3029 ID0_NUMSMRG_MASK;
3030 if (smmu->num_mapping_groups == 0) {
3031 dev_err(smmu->dev,
3032 "stream-matching supported, but no SMRs present!\n");
3033 return -ENODEV;
3034 }
3035
Dhaval Patel031d7462015-05-09 14:47:29 -07003036 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3037 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
3038 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
3039 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3040 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Will Deacon45ae7cf2013-06-24 18:31:25 +01003041
Dhaval Patel031d7462015-05-09 14:47:29 -07003042 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3043 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
3044 if ((mask & sid) != sid) {
3045 dev_err(smmu->dev,
3046 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
3047 mask, sid);
3048 return -ENODEV;
3049 }
3050
3051 dev_notice(smmu->dev,
3052 "\tstream matching with %u register groups, mask 0x%x",
3053 smmu->num_mapping_groups, mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003054 }
Olav Haugan3c8766d2014-08-22 17:12:32 -07003055 } else {
3056 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
3057 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003058 }
3059
Robin Murphy7602b872016-04-28 17:12:09 +01003060 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3061 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3062 if (!(id & ID0_PTFS_NO_AARCH32S))
3063 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3064 }
3065
Will Deacon45ae7cf2013-06-24 18:31:25 +01003066 /* ID1 */
3067 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003068 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003069
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003070 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003071 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003072 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003073 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003074 dev_warn(smmu->dev,
3075 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3076 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003077
Will Deacon518f7132014-11-14 17:17:54 +00003078 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003079 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3080 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3081 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3082 return -ENODEV;
3083 }
3084 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
3085 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003086 /*
3087 * Cavium CN88xx erratum #27704.
3088 * Ensure ASID and VMID allocation is unique across all SMMUs in
3089 * the system.
3090 */
3091 if (smmu->model == CAVIUM_SMMUV2) {
3092 smmu->cavium_id_base =
3093 atomic_add_return(smmu->num_context_banks,
3094 &cavium_smmu_context_count);
3095 smmu->cavium_id_base -= smmu->num_context_banks;
3096 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003097
3098 /* ID2 */
3099 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3100 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003101 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003102
Will Deacon518f7132014-11-14 17:17:54 +00003103 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003104 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003105 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003106
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003107 if (id & ID2_VMID16)
3108 smmu->features |= ARM_SMMU_FEAT_VMID16;
3109
Robin Murphyf1d84542015-03-04 16:41:05 +00003110 /*
3111 * What the page table walker can address actually depends on which
3112 * descriptor format is in use, but since a) we don't know that yet,
3113 * and b) it can vary per context bank, this will have to do...
3114 */
3115 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3116 dev_warn(smmu->dev,
3117 "failed to set DMA mask for table walker\n");
3118
Robin Murphyb7862e32016-04-13 18:13:03 +01003119 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003120 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003121 if (smmu->version == ARM_SMMU_V1_64K)
3122 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003123 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003124 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003125 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003126 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003127 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003128 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003129 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003130 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003131 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003132 }
3133
Robin Murphy7602b872016-04-28 17:12:09 +01003134 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003135 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003136 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003137 if (smmu->features &
3138 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003139 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003140 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003141 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003142 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003143 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003144
Robin Murphyd5466352016-05-09 17:20:09 +01003145 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3146 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3147 else
3148 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
3149 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
3150 smmu->pgsize_bitmap);
3151
Will Deacon518f7132014-11-14 17:17:54 +00003152
Will Deacon28d60072014-09-01 16:24:48 +01003153 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
3154 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00003155 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003156
3157 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
3158 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00003159 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003160
Will Deacon45ae7cf2013-06-24 18:31:25 +01003161 return 0;
3162}
3163
Robin Murphy67b65a32016-04-13 18:12:57 +01003164struct arm_smmu_match_data {
3165 enum arm_smmu_arch_version version;
3166 enum arm_smmu_implementation model;
3167};
3168
3169#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
3170static struct arm_smmu_match_data name = { .version = ver, .model = imp }
3171
3172ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
3173ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01003174ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003175ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01003176ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003177ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01003178
Joerg Roedel09b52692014-10-02 12:24:45 +02003179static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003180 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3181 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3182 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003183 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003184 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003185 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003186 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01003187 { },
3188};
3189MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3190
Will Deacon45ae7cf2013-06-24 18:31:25 +01003191static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3192{
Robin Murphy09360402014-08-28 17:51:59 +01003193 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01003194 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003195 struct resource *res;
3196 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003197 struct device *dev = &pdev->dev;
3198 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003199 struct of_phandle_iterator it;
3200 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003201 int num_irqs, i, err;
3202
3203 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3204 if (!smmu) {
3205 dev_err(dev, "failed to allocate arm_smmu_device\n");
3206 return -ENOMEM;
3207 }
3208 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003209 spin_lock_init(&smmu->atos_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003210 mutex_init(&smmu->power_lock);
Patrick Daly8befb662016-08-17 20:03:28 -07003211 spin_lock_init(&smmu->clock_refs_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003212 idr_init(&smmu->asid_idr);
3213 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003214
Robin Murphy09360402014-08-28 17:51:59 +01003215 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01003216 data = of_id->data;
3217 smmu->version = data->version;
3218 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01003219
Will Deacon45ae7cf2013-06-24 18:31:25 +01003220 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003221 smmu->base = devm_ioremap_resource(dev, res);
3222 if (IS_ERR(smmu->base))
3223 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003224 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003225
3226 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3227 &smmu->num_global_irqs)) {
3228 dev_err(dev, "missing #global-interrupts property\n");
3229 return -ENODEV;
3230 }
3231
3232 num_irqs = 0;
3233 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3234 num_irqs++;
3235 if (num_irqs > smmu->num_global_irqs)
3236 smmu->num_context_irqs++;
3237 }
3238
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003239 if (!smmu->num_context_irqs) {
3240 dev_err(dev, "found %d interrupts but expected at least %d\n",
3241 num_irqs, smmu->num_global_irqs + 1);
3242 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003243 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003244
3245 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3246 GFP_KERNEL);
3247 if (!smmu->irqs) {
3248 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3249 return -ENOMEM;
3250 }
3251
3252 for (i = 0; i < num_irqs; ++i) {
3253 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003254
Will Deacon45ae7cf2013-06-24 18:31:25 +01003255 if (irq < 0) {
3256 dev_err(dev, "failed to get irq index %d\n", i);
3257 return -ENODEV;
3258 }
3259 smmu->irqs[i] = irq;
3260 }
3261
Dhaval Patel031d7462015-05-09 14:47:29 -07003262 parse_driver_options(smmu);
3263
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003264 err = arm_smmu_init_clocks(smmu);
Olav Haugan3c8766d2014-08-22 17:12:32 -07003265 if (err)
3266 return err;
3267
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003268 err = arm_smmu_init_regulators(smmu);
3269 if (err)
3270 return err;
3271
Patrick Daly2764f952016-09-06 19:22:44 -07003272 err = arm_smmu_init_bus_scaling(pdev, smmu);
3273 if (err)
3274 return err;
3275
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003276 err = arm_smmu_power_on(smmu);
3277 if (err)
3278 return err;
3279
3280 err = arm_smmu_device_cfg_probe(smmu);
3281 if (err)
3282 goto out_power_off;
3283
Will Deacon45ae7cf2013-06-24 18:31:25 +01003284 i = 0;
3285 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003286
3287 err = -ENOMEM;
3288 /* No need to zero the memory for masterspec */
3289 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
3290 if (!masterspec)
3291 goto out_put_masters;
3292
3293 of_for_each_phandle(&it, err, dev->of_node,
3294 "mmu-masters", "#stream-id-cells", 0) {
3295 int count = of_phandle_iterator_args(&it, masterspec->args,
3296 MAX_MASTER_STREAMIDS);
3297 masterspec->np = of_node_get(it.node);
3298 masterspec->args_count = count;
3299
3300 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003301 if (err) {
3302 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003303 masterspec->np->name);
3304 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003305 goto out_put_masters;
3306 }
3307
3308 i++;
3309 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003310
Will Deacon45ae7cf2013-06-24 18:31:25 +01003311 dev_notice(dev, "registered %d master devices\n", i);
3312
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003313 kfree(masterspec);
3314
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003315 err = arm_smmu_parse_impl_def_registers(smmu);
3316 if (err)
3317 goto out_put_masters;
3318
Robin Murphyb7862e32016-04-13 18:13:03 +01003319 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003320 smmu->num_context_banks != smmu->num_context_irqs) {
3321 dev_err(dev,
3322 "found only %d context interrupt(s) but %d required\n",
3323 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cd2013-11-15 09:42:30 +00003324 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01003325 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003326 }
3327
Will Deacon45ae7cf2013-06-24 18:31:25 +01003328 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003329 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3330 NULL, arm_smmu_global_fault,
3331 IRQF_ONESHOT | IRQF_SHARED,
3332 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003333 if (err) {
3334 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3335 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08003336 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003337 }
3338 }
3339
3340 INIT_LIST_HEAD(&smmu->list);
3341 spin_lock(&arm_smmu_devices_lock);
3342 list_add(&smmu->list, &arm_smmu_devices);
3343 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01003344
3345 arm_smmu_device_reset(smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003346 arm_smmu_power_off(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003347 return 0;
3348
Will Deacon45ae7cf2013-06-24 18:31:25 +01003349out_put_masters:
3350 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003351 struct arm_smmu_master *master
3352 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003353 of_node_put(master->of_node);
3354 }
3355
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003356out_power_off:
3357 arm_smmu_power_off(smmu);
3358
Will Deacon45ae7cf2013-06-24 18:31:25 +01003359 return err;
3360}
3361
3362static int arm_smmu_device_remove(struct platform_device *pdev)
3363{
3364 int i;
3365 struct device *dev = &pdev->dev;
3366 struct arm_smmu_device *curr, *smmu = NULL;
3367 struct rb_node *node;
3368
3369 spin_lock(&arm_smmu_devices_lock);
3370 list_for_each_entry(curr, &arm_smmu_devices, list) {
3371 if (curr->dev == dev) {
3372 smmu = curr;
3373 list_del(&smmu->list);
3374 break;
3375 }
3376 }
3377 spin_unlock(&arm_smmu_devices_lock);
3378
3379 if (!smmu)
3380 return -ENODEV;
3381
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003382 if (arm_smmu_power_on(smmu))
3383 return -EINVAL;
3384
Will Deacon45ae7cf2013-06-24 18:31:25 +01003385 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003386 struct arm_smmu_master *master
3387 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003388 of_node_put(master->of_node);
3389 }
3390
Will Deaconecfadb62013-07-31 19:21:28 +01003391 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003392 dev_err(dev, "removing device with active domains!\n");
3393
3394 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08003395 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003396
Patrick Dalyc190d932016-08-30 17:23:28 -07003397 idr_destroy(&smmu->asid_idr);
3398
Will Deacon45ae7cf2013-06-24 18:31:25 +01003399 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003400 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003401 arm_smmu_power_off(smmu);
3402
Patrick Daly2764f952016-09-06 19:22:44 -07003403 msm_bus_scale_unregister(smmu->bus_client);
3404
Will Deacon45ae7cf2013-06-24 18:31:25 +01003405 return 0;
3406}
3407
Will Deacon45ae7cf2013-06-24 18:31:25 +01003408static struct platform_driver arm_smmu_driver = {
3409 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003410 .name = "arm-smmu",
3411 .of_match_table = of_match_ptr(arm_smmu_of_match),
3412 },
3413 .probe = arm_smmu_device_dt_probe,
3414 .remove = arm_smmu_device_remove,
3415};
3416
3417static int __init arm_smmu_init(void)
3418{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003419 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003420 int ret;
3421
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003422 /*
3423 * Play nice with systems that don't have an ARM SMMU by checking that
3424 * an ARM SMMU exists in the system before proceeding with the driver
3425 * and IOMMU bus operation registration.
3426 */
3427 np = of_find_matching_node(NULL, arm_smmu_of_match);
3428 if (!np)
3429 return 0;
3430
3431 of_node_put(np);
3432
Will Deacon45ae7cf2013-06-24 18:31:25 +01003433 ret = platform_driver_register(&arm_smmu_driver);
3434 if (ret)
3435 return ret;
3436
3437 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01003438 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003439 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3440
Will Deacond123cf82014-02-04 22:17:53 +00003441#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01003442 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003443 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00003444#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003445
Will Deacona9a1b0b2014-05-01 18:05:08 +01003446#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08003447 if (!iommu_present(&pci_bus_type)) {
3448 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01003449 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08003450 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01003451#endif
3452
Will Deacon45ae7cf2013-06-24 18:31:25 +01003453 return 0;
3454}
3455
3456static void __exit arm_smmu_exit(void)
3457{
3458 return platform_driver_unregister(&arm_smmu_driver);
3459}
3460
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003461subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003462module_exit(arm_smmu_exit);
3463
3464MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
3465MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
3466MODULE_LICENSE("GPL v2");