blob: a60cded8a6eddf445dcb1e2b0177999c207c8a74 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000031#include <linux/acpi.h>
32#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010033#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000035#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010036#include <linux/dma-mapping.h>
37#include <linux/err.h>
38#include <linux/interrupt.h>
39#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010040#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000042#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043#include <linux/module.h>
44#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010045#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010046#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010047#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010048#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010049#include <linux/platform_device.h>
50#include <linux/slab.h>
51#include <linux/spinlock.h>
52
53#include <linux/amba/bus.h>
54
Will Deacon518f7132014-11-14 17:17:54 +000055#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010056
Will Deacon45ae7cf2013-06-24 18:31:25 +010057/* Maximum number of context banks per SMMU */
58#define ARM_SMMU_MAX_CBS 128
59
Will Deacon45ae7cf2013-06-24 18:31:25 +010060/* SMMU global address space */
61#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010062#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010063
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000064/*
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
67 * nsGFSYNR0: 0x450)
68 */
69#define ARM_SMMU_GR0_NS(smmu) \
70 ((smmu)->base + \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 ? 0x400 : 0))
73
Robin Murphyf9a05f02016-04-13 18:13:01 +010074/*
75 * Some 64-bit registers only make sense to write atomically, but in such
76 * cases all the data relevant to AArch32 formats lies within the lower word,
77 * therefore this actually makes more sense than it might first appear.
78 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010079#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010080#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#endif
84
Will Deacon45ae7cf2013-06-24 18:31:25 +010085/* Configuration registers */
86#define ARM_SMMU_GR0_sCR0 0x0
87#define sCR0_CLIENTPD (1 << 0)
88#define sCR0_GFRE (1 << 1)
89#define sCR0_GFIE (1 << 2)
90#define sCR0_GCFGFRE (1 << 4)
91#define sCR0_GCFGFIE (1 << 5)
92#define sCR0_USFCFG (1 << 10)
93#define sCR0_VMIDPNE (1 << 11)
94#define sCR0_PTM (1 << 12)
95#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080096#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010097#define sCR0_BSU_SHIFT 14
98#define sCR0_BSU_MASK 0x3
99
Peng Fan3ca37122016-05-03 21:50:30 +0800100/* Auxiliary Configuration register */
101#define ARM_SMMU_GR0_sACR 0x10
102
Will Deacon45ae7cf2013-06-24 18:31:25 +0100103/* Identification registers */
104#define ARM_SMMU_GR0_ID0 0x20
105#define ARM_SMMU_GR0_ID1 0x24
106#define ARM_SMMU_GR0_ID2 0x28
107#define ARM_SMMU_GR0_ID3 0x2c
108#define ARM_SMMU_GR0_ID4 0x30
109#define ARM_SMMU_GR0_ID5 0x34
110#define ARM_SMMU_GR0_ID6 0x38
111#define ARM_SMMU_GR0_ID7 0x3c
112#define ARM_SMMU_GR0_sGFSR 0x48
113#define ARM_SMMU_GR0_sGFSYNR0 0x50
114#define ARM_SMMU_GR0_sGFSYNR1 0x54
115#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100116
117#define ID0_S1TS (1 << 30)
118#define ID0_S2TS (1 << 29)
119#define ID0_NTS (1 << 28)
120#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000121#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100122#define ID0_PTFS_NO_AARCH32 (1 << 25)
123#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100124#define ID0_CTTW (1 << 14)
125#define ID0_NUMIRPT_SHIFT 16
126#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700127#define ID0_NUMSIDB_SHIFT 9
128#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100129#define ID0_NUMSMRG_SHIFT 0
130#define ID0_NUMSMRG_MASK 0xff
131
132#define ID1_PAGESIZE (1 << 31)
133#define ID1_NUMPAGENDXB_SHIFT 28
134#define ID1_NUMPAGENDXB_MASK 7
135#define ID1_NUMS2CB_SHIFT 16
136#define ID1_NUMS2CB_MASK 0xff
137#define ID1_NUMCB_SHIFT 0
138#define ID1_NUMCB_MASK 0xff
139
140#define ID2_OAS_SHIFT 4
141#define ID2_OAS_MASK 0xf
142#define ID2_IAS_SHIFT 0
143#define ID2_IAS_MASK 0xf
144#define ID2_UBS_SHIFT 8
145#define ID2_UBS_MASK 0xf
146#define ID2_PTFS_4K (1 << 12)
147#define ID2_PTFS_16K (1 << 13)
148#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800149#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100150
Peng Fan3ca37122016-05-03 21:50:30 +0800151#define ID7_MAJOR_SHIFT 4
152#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155#define ARM_SMMU_GR0_TLBIVMID 0x64
156#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
157#define ARM_SMMU_GR0_TLBIALLH 0x6c
158#define ARM_SMMU_GR0_sTLBGSYNC 0x70
159#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
160#define sTLBGSTATUS_GSACTIVE (1 << 0)
161#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
162
163/* Stream mapping registers */
164#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
165#define SMR_VALID (1 << 31)
166#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100167#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100168
169#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
170#define S2CR_CBNDX_SHIFT 0
171#define S2CR_CBNDX_MASK 0xff
172#define S2CR_TYPE_SHIFT 16
173#define S2CR_TYPE_MASK 0x3
Robin Murphy8e8b2032016-09-12 17:13:50 +0100174enum arm_smmu_s2cr_type {
175 S2CR_TYPE_TRANS,
176 S2CR_TYPE_BYPASS,
177 S2CR_TYPE_FAULT,
178};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100179
Robin Murphyd3461802016-01-26 18:06:34 +0000180#define S2CR_PRIVCFG_SHIFT 24
Robin Murphy8e8b2032016-09-12 17:13:50 +0100181#define S2CR_PRIVCFG_MASK 0x3
182enum arm_smmu_s2cr_privcfg {
183 S2CR_PRIVCFG_DEFAULT,
184 S2CR_PRIVCFG_DIPAN,
185 S2CR_PRIVCFG_UNPRIV,
186 S2CR_PRIVCFG_PRIV,
187};
Robin Murphyd3461802016-01-26 18:06:34 +0000188
Will Deacon45ae7cf2013-06-24 18:31:25 +0100189/* Context bank attribute registers */
190#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
191#define CBAR_VMID_SHIFT 0
192#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000193#define CBAR_S1_BPSHCFG_SHIFT 8
194#define CBAR_S1_BPSHCFG_MASK 3
195#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100196#define CBAR_S1_MEMATTR_SHIFT 12
197#define CBAR_S1_MEMATTR_MASK 0xf
198#define CBAR_S1_MEMATTR_WB 0xf
199#define CBAR_TYPE_SHIFT 16
200#define CBAR_TYPE_MASK 0x3
201#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
202#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
203#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
204#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
205#define CBAR_IRPTNDX_SHIFT 24
206#define CBAR_IRPTNDX_MASK 0xff
207
208#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
209#define CBA2R_RW64_32BIT (0 << 0)
210#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800211#define CBA2R_VMID_SHIFT 16
212#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213
214/* Translation context bank */
215#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100216#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100217
218#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100219#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100220#define ARM_SMMU_CB_RESUME 0x8
221#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100222#define ARM_SMMU_CB_TTBR0 0x20
223#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100225#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000227#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100228#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100230#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100231#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100233#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000234#define ARM_SMMU_CB_S1_TLBIVAL 0x620
235#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
236#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100237#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000238#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100239
240#define SCTLR_S1_ASIDPNE (1 << 12)
241#define SCTLR_CFCFG (1 << 7)
242#define SCTLR_CFIE (1 << 6)
243#define SCTLR_CFRE (1 << 5)
244#define SCTLR_E (1 << 4)
245#define SCTLR_AFE (1 << 2)
246#define SCTLR_TRE (1 << 1)
247#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100249#define ARM_MMU500_ACTLR_CPRE (1 << 1)
250
Peng Fan3ca37122016-05-03 21:50:30 +0800251#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Nipun Gupta6eb18d42016-11-04 15:25:23 +0530252#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
Peng Fan3ca37122016-05-03 21:50:30 +0800253
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000254#define CB_PAR_F (1 << 0)
255
256#define ATSR_ACTIVE (1 << 0)
257
Will Deacon45ae7cf2013-06-24 18:31:25 +0100258#define RESUME_RETRY (0 << 0)
259#define RESUME_TERMINATE (1 << 0)
260
Will Deacon45ae7cf2013-06-24 18:31:25 +0100261#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100262#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100263
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100264#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100265
266#define FSR_MULTI (1 << 31)
267#define FSR_SS (1 << 30)
268#define FSR_UUT (1 << 8)
269#define FSR_ASF (1 << 7)
270#define FSR_TLBLKF (1 << 6)
271#define FSR_TLBMCF (1 << 5)
272#define FSR_EF (1 << 4)
273#define FSR_PF (1 << 3)
274#define FSR_AFF (1 << 2)
275#define FSR_TF (1 << 1)
276
Mitchel Humpherys29073202014-07-08 09:52:18 -0700277#define FSR_IGN (FSR_AFF | FSR_ASF | \
278 FSR_TLBMCF | FSR_TLBLKF)
279#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100280 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100281
282#define FSYNR0_WNR (1 << 4)
283
Will Deacon4cf740b2014-07-14 19:47:39 +0100284static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000285module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100286MODULE_PARM_DESC(force_stage,
287 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000288static bool disable_bypass;
289module_param(disable_bypass, bool, S_IRUGO);
290MODULE_PARM_DESC(disable_bypass,
291 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100292
Robin Murphy09360402014-08-28 17:51:59 +0100293enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100294 ARM_SMMU_V1,
295 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100296 ARM_SMMU_V2,
297};
298
Robin Murphy67b65a32016-04-13 18:12:57 +0100299enum arm_smmu_implementation {
300 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100301 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100302 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100303};
304
Robin Murphy8e8b2032016-09-12 17:13:50 +0100305struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100306 struct iommu_group *group;
307 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100308 enum arm_smmu_s2cr_type type;
309 enum arm_smmu_s2cr_privcfg privcfg;
310 u8 cbndx;
311};
312
313#define s2cr_init_val (struct arm_smmu_s2cr){ \
314 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
315}
316
Will Deacon45ae7cf2013-06-24 18:31:25 +0100317struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100318 u16 mask;
319 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100320 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100321};
322
Will Deacona9a1b0b2014-05-01 18:05:08 +0100323struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100324 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100325 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100326};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100327#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100328#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
329#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000330#define fwspec_smendx(fw, i) \
331 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100332#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000333 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100334
335struct arm_smmu_device {
336 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100337
338 void __iomem *base;
339 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100340 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100341
342#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
343#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
344#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
345#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
346#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000347#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800348#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100349#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
350#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
351#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
352#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
353#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100354 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000355
356#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
357 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100358 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100359 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100360
361 u32 num_context_banks;
362 u32 num_s2_context_banks;
363 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
364 atomic_t irptndx;
365
366 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100367 u16 streamid_mask;
368 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100369 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100370 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100371 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100372
Will Deacon518f7132014-11-14 17:17:54 +0000373 unsigned long va_size;
374 unsigned long ipa_size;
375 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100376 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100377
378 u32 num_global_irqs;
379 u32 num_context_irqs;
380 unsigned int *irqs;
381
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800382 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100383};
384
Robin Murphy7602b872016-04-28 17:12:09 +0100385enum arm_smmu_context_fmt {
386 ARM_SMMU_CTX_FMT_NONE,
387 ARM_SMMU_CTX_FMT_AARCH64,
388 ARM_SMMU_CTX_FMT_AARCH32_L,
389 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100390};
391
392struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100393 u8 cbndx;
394 u8 irptndx;
395 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100396 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100397};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100398#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100399
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800400#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
401#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100402
Will Deaconc752ce42014-06-25 22:46:31 +0100403enum arm_smmu_domain_stage {
404 ARM_SMMU_DOMAIN_S1 = 0,
405 ARM_SMMU_DOMAIN_S2,
406 ARM_SMMU_DOMAIN_NESTED,
407};
408
Will Deacon45ae7cf2013-06-24 18:31:25 +0100409struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100410 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000411 struct io_pgtable_ops *pgtbl_ops;
412 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100413 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100414 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000415 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100416 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100417};
418
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000419struct arm_smmu_option_prop {
420 u32 opt;
421 const char *prop;
422};
423
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800424static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
425
Robin Murphy021bb842016-09-14 15:26:46 +0100426static bool using_legacy_binding, using_generic_binding;
427
Mitchel Humpherys29073202014-07-08 09:52:18 -0700428static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000429 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
430 { 0, NULL},
431};
432
Joerg Roedel1d672632015-03-26 13:43:10 +0100433static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
434{
435 return container_of(dom, struct arm_smmu_domain, domain);
436}
437
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000438static void parse_driver_options(struct arm_smmu_device *smmu)
439{
440 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700441
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000442 do {
443 if (of_property_read_bool(smmu->dev->of_node,
444 arm_smmu_options[i].prop)) {
445 smmu->options |= arm_smmu_options[i].opt;
446 dev_notice(smmu->dev, "option %s\n",
447 arm_smmu_options[i].prop);
448 }
449 } while (arm_smmu_options[++i].opt);
450}
451
Will Deacon8f68f8e2014-07-15 11:27:08 +0100452static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100453{
454 if (dev_is_pci(dev)) {
455 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700456
Will Deacona9a1b0b2014-05-01 18:05:08 +0100457 while (!pci_is_root_bus(bus))
458 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100459 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100460 }
461
Robin Murphyf80cd882016-09-14 15:21:39 +0100462 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100463}
464
Robin Murphyf80cd882016-09-14 15:21:39 +0100465static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100466{
Robin Murphyf80cd882016-09-14 15:21:39 +0100467 *((__be32 *)data) = cpu_to_be32(alias);
468 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100469}
470
Robin Murphyf80cd882016-09-14 15:21:39 +0100471static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100472{
Robin Murphyf80cd882016-09-14 15:21:39 +0100473 struct of_phandle_iterator *it = *(void **)data;
474 struct device_node *np = it->node;
475 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100476
Robin Murphyf80cd882016-09-14 15:21:39 +0100477 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
478 "#stream-id-cells", 0)
479 if (it->node == np) {
480 *(void **)data = dev;
481 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700482 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100483 it->node = np;
484 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100485}
486
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100487static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100488static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100489
Robin Murphyadfec2e2016-09-12 17:13:55 +0100490static int arm_smmu_register_legacy_master(struct device *dev,
491 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100492{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100493 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100494 struct device_node *np;
495 struct of_phandle_iterator it;
496 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100497 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100498 __be32 pci_sid;
499 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100500
Robin Murphyf80cd882016-09-14 15:21:39 +0100501 np = dev_get_dev_node(dev);
502 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
503 of_node_put(np);
504 return -ENODEV;
505 }
506
507 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100508 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
509 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100510 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100511 of_node_put(np);
512 if (err == 0)
513 return -ENODEV;
514 if (err < 0)
515 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100516
Robin Murphyf80cd882016-09-14 15:21:39 +0100517 if (dev_is_pci(dev)) {
518 /* "mmu-masters" assumes Stream ID == Requester ID */
519 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
520 &pci_sid);
521 it.cur = &pci_sid;
522 it.cur_count = 1;
523 }
524
Robin Murphyadfec2e2016-09-12 17:13:55 +0100525 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
526 &arm_smmu_ops);
527 if (err)
528 return err;
529
530 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
531 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100532 return -ENOMEM;
533
Robin Murphyadfec2e2016-09-12 17:13:55 +0100534 *smmu = dev_get_drvdata(smmu_dev);
535 of_phandle_iterator_args(&it, sids, it.cur_count);
536 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
537 kfree(sids);
538 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100539}
540
541static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
542{
543 int idx;
544
545 do {
546 idx = find_next_zero_bit(map, end, start);
547 if (idx == end)
548 return -ENOSPC;
549 } while (test_and_set_bit(idx, map));
550
551 return idx;
552}
553
554static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
555{
556 clear_bit(idx, map);
557}
558
559/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000560static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100561{
562 int count = 0;
563 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
564
565 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
566 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
567 & sTLBGSTATUS_GSACTIVE) {
568 cpu_relax();
569 if (++count == TLB_LOOP_TIMEOUT) {
570 dev_err_ratelimited(smmu->dev,
571 "TLB sync timed out -- SMMU may be deadlocked\n");
572 return;
573 }
574 udelay(1);
575 }
576}
577
Will Deacon518f7132014-11-14 17:17:54 +0000578static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100579{
Will Deacon518f7132014-11-14 17:17:54 +0000580 struct arm_smmu_domain *smmu_domain = cookie;
581 __arm_smmu_tlb_sync(smmu_domain->smmu);
582}
583
584static void arm_smmu_tlb_inv_context(void *cookie)
585{
586 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100587 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
588 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100589 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000590 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100591
592 if (stage1) {
593 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800594 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100595 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100596 } else {
597 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800598 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100599 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100600 }
601
Will Deacon518f7132014-11-14 17:17:54 +0000602 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100603}
604
Will Deacon518f7132014-11-14 17:17:54 +0000605static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000606 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000607{
608 struct arm_smmu_domain *smmu_domain = cookie;
609 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
610 struct arm_smmu_device *smmu = smmu_domain->smmu;
611 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
612 void __iomem *reg;
613
614 if (stage1) {
615 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
616 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
617
Robin Murphy7602b872016-04-28 17:12:09 +0100618 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000619 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800620 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000621 do {
622 writel_relaxed(iova, reg);
623 iova += granule;
624 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000625 } else {
626 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800627 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000628 do {
629 writeq_relaxed(iova, reg);
630 iova += granule >> 12;
631 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000632 }
Will Deacon518f7132014-11-14 17:17:54 +0000633 } else if (smmu->version == ARM_SMMU_V2) {
634 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
635 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
636 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000637 iova >>= 12;
638 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100639 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000640 iova += granule >> 12;
641 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000642 } else {
643 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800644 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000645 }
646}
647
Bhumika Goyal5896f3a2016-10-25 23:36:10 +0530648static const struct iommu_gather_ops arm_smmu_gather_ops = {
Will Deacon518f7132014-11-14 17:17:54 +0000649 .tlb_flush_all = arm_smmu_tlb_inv_context,
650 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
651 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000652};
653
Will Deacon45ae7cf2013-06-24 18:31:25 +0100654static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
655{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100656 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100657 unsigned long iova;
658 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100659 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100660 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
661 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100662 void __iomem *cb_base;
663
Will Deacon44680ee2014-06-25 11:29:12 +0100664 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100665 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
666
667 if (!(fsr & FSR_FAULT))
668 return IRQ_NONE;
669
Will Deacon45ae7cf2013-06-24 18:31:25 +0100670 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100671 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100672
Will Deacon3714ce1d2016-08-05 19:49:45 +0100673 dev_err_ratelimited(smmu->dev,
674 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
675 fsr, iova, fsynr, cfg->cbndx);
676
Will Deacon45ae7cf2013-06-24 18:31:25 +0100677 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100678 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100679}
680
681static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
682{
683 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
684 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000685 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100686
687 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
688 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
689 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
690 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
691
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000692 if (!gfsr)
693 return IRQ_NONE;
694
Will Deacon45ae7cf2013-06-24 18:31:25 +0100695 dev_err_ratelimited(smmu->dev,
696 "Unexpected global fault, this could be serious\n");
697 dev_err_ratelimited(smmu->dev,
698 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
699 gfsr, gfsynr0, gfsynr1, gfsynr2);
700
701 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100702 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100703}
704
Will Deacon518f7132014-11-14 17:17:54 +0000705static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
706 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100707{
Robin Murphy60705292016-08-11 17:44:06 +0100708 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100709 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100710 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100711 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
712 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100713 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100714
Will Deacon45ae7cf2013-06-24 18:31:25 +0100715 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100716 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
717 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100718
Will Deacon4a1c93c2015-03-04 12:21:03 +0000719 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100720 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
721 reg = CBA2R_RW64_64BIT;
722 else
723 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800724 /* 16-bit VMIDs live in CBA2R */
725 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800726 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800727
Will Deacon4a1c93c2015-03-04 12:21:03 +0000728 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
729 }
730
Will Deacon45ae7cf2013-06-24 18:31:25 +0100731 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100732 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100733 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700734 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100735
Will Deacon57ca90f2014-02-06 14:59:05 +0000736 /*
737 * Use the weakest shareability/memory types, so they are
738 * overridden by the ttbcr/pte.
739 */
740 if (stage1) {
741 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
742 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800743 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
744 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800745 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000746 }
Will Deacon44680ee2014-06-25 11:29:12 +0100747 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100748
Will Deacon518f7132014-11-14 17:17:54 +0000749 /* TTBRs */
750 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100751 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100752
Robin Murphy60705292016-08-11 17:44:06 +0100753 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
754 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
755 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
756 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
757 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
758 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
759 } else {
760 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
761 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
762 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
763 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
764 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
765 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
766 }
Will Deacon518f7132014-11-14 17:17:54 +0000767 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100768 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100769 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000770 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100771
Will Deacon518f7132014-11-14 17:17:54 +0000772 /* TTBCR */
773 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100774 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
775 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
776 reg2 = 0;
777 } else {
778 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
779 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
780 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100781 }
Robin Murphy60705292016-08-11 17:44:06 +0100782 if (smmu->version > ARM_SMMU_V1)
783 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100784 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000785 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100786 }
Robin Murphy60705292016-08-11 17:44:06 +0100787 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100788
Will Deacon518f7132014-11-14 17:17:54 +0000789 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100790 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100791 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
792 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
793 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
794 } else {
795 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
796 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
797 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100798 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphy60705292016-08-11 17:44:06 +0100799 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100800 }
801
Will Deacon45ae7cf2013-06-24 18:31:25 +0100802 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100803 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100804 if (stage1)
805 reg |= SCTLR_S1_ASIDPNE;
806#ifdef __BIG_ENDIAN
807 reg |= SCTLR_E;
808#endif
Will Deacon25724842013-08-21 13:49:53 +0100809 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100810}
811
812static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100813 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100814{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100815 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000816 unsigned long ias, oas;
817 struct io_pgtable_ops *pgtbl_ops;
818 struct io_pgtable_cfg pgtbl_cfg;
819 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100820 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100821 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100822
Will Deacon518f7132014-11-14 17:17:54 +0000823 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100824 if (smmu_domain->smmu)
825 goto out_unlock;
826
Will Deaconc752ce42014-06-25 22:46:31 +0100827 /*
828 * Mapping the requested stage onto what we support is surprisingly
829 * complicated, mainly because the spec allows S1+S2 SMMUs without
830 * support for nested translation. That means we end up with the
831 * following table:
832 *
833 * Requested Supported Actual
834 * S1 N S1
835 * S1 S1+S2 S1
836 * S1 S2 S2
837 * S1 S1 S1
838 * N N N
839 * N S1+S2 S2
840 * N S2 S2
841 * N S1 S1
842 *
843 * Note that you can't actually request stage-2 mappings.
844 */
845 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
846 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
847 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
848 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
849
Robin Murphy7602b872016-04-28 17:12:09 +0100850 /*
851 * Choosing a suitable context format is even more fiddly. Until we
852 * grow some way for the caller to express a preference, and/or move
853 * the decision into the io-pgtable code where it arguably belongs,
854 * just aim for the closest thing to the rest of the system, and hope
855 * that the hardware isn't esoteric enough that we can't assume AArch64
856 * support to be a superset of AArch32 support...
857 */
858 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
859 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100860 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
861 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
862 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
863 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
864 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100865 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
866 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
867 ARM_SMMU_FEAT_FMT_AARCH64_16K |
868 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
869 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
870
871 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
872 ret = -EINVAL;
873 goto out_unlock;
874 }
875
Will Deaconc752ce42014-06-25 22:46:31 +0100876 switch (smmu_domain->stage) {
877 case ARM_SMMU_DOMAIN_S1:
878 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
879 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000880 ias = smmu->va_size;
881 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100882 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000883 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100884 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000885 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100886 ias = min(ias, 32UL);
887 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100888 } else {
889 fmt = ARM_V7S;
890 ias = min(ias, 32UL);
891 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100892 }
Will Deaconc752ce42014-06-25 22:46:31 +0100893 break;
894 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100895 /*
896 * We will likely want to change this if/when KVM gets
897 * involved.
898 */
Will Deaconc752ce42014-06-25 22:46:31 +0100899 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100900 cfg->cbar = CBAR_TYPE_S2_TRANS;
901 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000902 ias = smmu->ipa_size;
903 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100904 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000905 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100906 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000907 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100908 ias = min(ias, 40UL);
909 oas = min(oas, 40UL);
910 }
Will Deaconc752ce42014-06-25 22:46:31 +0100911 break;
912 default:
913 ret = -EINVAL;
914 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100915 }
916
917 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
918 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200919 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100920 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100921
Will Deacon44680ee2014-06-25 11:29:12 +0100922 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100923 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100924 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
925 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100926 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100927 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100928 }
929
Will Deacon518f7132014-11-14 17:17:54 +0000930 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100931 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000932 .ias = ias,
933 .oas = oas,
934 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100935 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000936 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100937
Will Deacon518f7132014-11-14 17:17:54 +0000938 smmu_domain->smmu = smmu;
939 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
940 if (!pgtbl_ops) {
941 ret = -ENOMEM;
942 goto out_clear_smmu;
943 }
944
Robin Murphyd5466352016-05-09 17:20:09 +0100945 /* Update the domain's page sizes to reflect the page table format */
946 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100947 domain->geometry.aperture_end = (1UL << ias) - 1;
948 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000949
950 /* Initialise the context bank with our page table cfg */
951 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
952
953 /*
954 * Request context fault interrupt. Do this last to avoid the
955 * handler seeing a half-initialised domain state.
956 */
Will Deacon44680ee2014-06-25 11:29:12 +0100957 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800958 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
959 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200960 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100961 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100962 cfg->irptndx, irq);
963 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100964 }
965
Will Deacon518f7132014-11-14 17:17:54 +0000966 mutex_unlock(&smmu_domain->init_mutex);
967
968 /* Publish page table ops for map/unmap */
969 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100970 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100971
Will Deacon518f7132014-11-14 17:17:54 +0000972out_clear_smmu:
973 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100974out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000975 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100976 return ret;
977}
978
979static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
980{
Joerg Roedel1d672632015-03-26 13:43:10 +0100981 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100982 struct arm_smmu_device *smmu = smmu_domain->smmu;
983 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +0100984 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100985 int irq;
986
Robin Murphy021bb842016-09-14 15:26:46 +0100987 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100988 return;
989
Will Deacon518f7132014-11-14 17:17:54 +0000990 /*
991 * Disable the context bank and free the page tables before freeing
992 * it.
993 */
Will Deacon44680ee2014-06-25 11:29:12 +0100994 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100995 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +0100996
Will Deacon44680ee2014-06-25 11:29:12 +0100997 if (cfg->irptndx != INVALID_IRPTNDX) {
998 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800999 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001000 }
1001
Markus Elfring44830b02015-11-06 18:32:41 +01001002 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001003 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001004}
1005
Joerg Roedel1d672632015-03-26 13:43:10 +01001006static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001007{
1008 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001009
Robin Murphy9adb9592016-01-26 18:06:36 +00001010 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001011 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001012 /*
1013 * Allocate the domain and initialise some of its data structures.
1014 * We can't really do anything meaningful until we've added a
1015 * master.
1016 */
1017 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1018 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001019 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001020
Robin Murphy021bb842016-09-14 15:26:46 +01001021 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1022 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001023 kfree(smmu_domain);
1024 return NULL;
1025 }
1026
Will Deacon518f7132014-11-14 17:17:54 +00001027 mutex_init(&smmu_domain->init_mutex);
1028 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001029
1030 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001031}
1032
Joerg Roedel1d672632015-03-26 13:43:10 +01001033static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001034{
Joerg Roedel1d672632015-03-26 13:43:10 +01001035 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001036
1037 /*
1038 * Free the domain resources. We assume that all devices have
1039 * already been detached.
1040 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001041 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001042 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001043 kfree(smmu_domain);
1044}
1045
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001046static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1047{
1048 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001049 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001050
1051 if (smr->valid)
1052 reg |= SMR_VALID;
1053 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1054}
1055
Robin Murphy8e8b2032016-09-12 17:13:50 +01001056static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1057{
1058 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1059 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1060 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1061 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1062
1063 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1064}
1065
1066static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1067{
1068 arm_smmu_write_s2cr(smmu, idx);
1069 if (smmu->smrs)
1070 arm_smmu_write_smr(smmu, idx);
1071}
1072
Robin Murphy588888a2016-09-12 17:13:54 +01001073static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001074{
1075 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001076 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001077
Robin Murphy588888a2016-09-12 17:13:54 +01001078 /* Stream indexing is blissfully easy */
1079 if (!smrs)
1080 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001081
Robin Murphy588888a2016-09-12 17:13:54 +01001082 /* Validating SMRs is... less so */
1083 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1084 if (!smrs[i].valid) {
1085 /*
1086 * Note the first free entry we come across, which
1087 * we'll claim in the end if nothing else matches.
1088 */
1089 if (free_idx < 0)
1090 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001091 continue;
1092 }
Robin Murphy588888a2016-09-12 17:13:54 +01001093 /*
1094 * If the new entry is _entirely_ matched by an existing entry,
1095 * then reuse that, with the guarantee that there also cannot
1096 * be any subsequent conflicting entries. In normal use we'd
1097 * expect simply identical entries for this case, but there's
1098 * no harm in accommodating the generalisation.
1099 */
1100 if ((mask & smrs[i].mask) == mask &&
1101 !((id ^ smrs[i].id) & ~smrs[i].mask))
1102 return i;
1103 /*
1104 * If the new entry has any other overlap with an existing one,
1105 * though, then there always exists at least one stream ID
1106 * which would cause a conflict, and we can't allow that risk.
1107 */
1108 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1109 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001110 }
1111
Robin Murphy588888a2016-09-12 17:13:54 +01001112 return free_idx;
1113}
1114
1115static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1116{
1117 if (--smmu->s2crs[idx].count)
1118 return false;
1119
1120 smmu->s2crs[idx] = s2cr_init_val;
1121 if (smmu->smrs)
1122 smmu->smrs[idx].valid = false;
1123
1124 return true;
1125}
1126
1127static int arm_smmu_master_alloc_smes(struct device *dev)
1128{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001129 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1130 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001131 struct arm_smmu_device *smmu = cfg->smmu;
1132 struct arm_smmu_smr *smrs = smmu->smrs;
1133 struct iommu_group *group;
1134 int i, idx, ret;
1135
1136 mutex_lock(&smmu->stream_map_mutex);
1137 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001138 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001139 u16 sid = fwspec->ids[i];
1140 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1141
Robin Murphy588888a2016-09-12 17:13:54 +01001142 if (idx != INVALID_SMENDX) {
1143 ret = -EEXIST;
1144 goto out_err;
1145 }
1146
Robin Murphy021bb842016-09-14 15:26:46 +01001147 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001148 if (ret < 0)
1149 goto out_err;
1150
1151 idx = ret;
1152 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001153 smrs[idx].id = sid;
1154 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001155 smrs[idx].valid = true;
1156 }
1157 smmu->s2crs[idx].count++;
1158 cfg->smendx[i] = (s16)idx;
1159 }
1160
1161 group = iommu_group_get_for_dev(dev);
1162 if (!group)
1163 group = ERR_PTR(-ENOMEM);
1164 if (IS_ERR(group)) {
1165 ret = PTR_ERR(group);
1166 goto out_err;
1167 }
1168 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001169
Will Deacon45ae7cf2013-06-24 18:31:25 +01001170 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001171 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001172 arm_smmu_write_sme(smmu, idx);
1173 smmu->s2crs[idx].group = group;
1174 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001175
Robin Murphy588888a2016-09-12 17:13:54 +01001176 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001177 return 0;
1178
Robin Murphy588888a2016-09-12 17:13:54 +01001179out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001180 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001181 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001182 cfg->smendx[i] = INVALID_SMENDX;
1183 }
Robin Murphy588888a2016-09-12 17:13:54 +01001184 mutex_unlock(&smmu->stream_map_mutex);
1185 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001186}
1187
Robin Murphyadfec2e2016-09-12 17:13:55 +01001188static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001189{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001190 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1191 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001192 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001193
Robin Murphy588888a2016-09-12 17:13:54 +01001194 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001195 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001196 if (arm_smmu_free_sme(smmu, idx))
1197 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001198 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001199 }
Robin Murphy588888a2016-09-12 17:13:54 +01001200 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001201}
1202
Will Deacon45ae7cf2013-06-24 18:31:25 +01001203static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001204 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205{
Will Deacon44680ee2014-06-25 11:29:12 +01001206 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001207 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1208 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1209 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy588888a2016-09-12 17:13:54 +01001210 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001211
Robin Murphyadfec2e2016-09-12 17:13:55 +01001212 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001213 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001214 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001215
Robin Murphy8e8b2032016-09-12 17:13:50 +01001216 s2cr[idx].type = type;
1217 s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
1218 s2cr[idx].cbndx = cbndx;
1219 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001220 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001221 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001222}
1223
Will Deacon45ae7cf2013-06-24 18:31:25 +01001224static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1225{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001226 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001227 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1228 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001229 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001230
Robin Murphyadfec2e2016-09-12 17:13:55 +01001231 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001232 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1233 return -ENXIO;
1234 }
1235
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001236 /*
1237 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1238 * domains between of_xlate() and add_device() - we have no way to cope
1239 * with that, so until ARM gets converted to rely on groups and default
1240 * domains, just say no (but more politely than by dereferencing NULL).
1241 * This should be at least a WARN_ON once that's sorted.
1242 */
1243 if (!fwspec->iommu_priv)
1244 return -ENODEV;
1245
Robin Murphyadfec2e2016-09-12 17:13:55 +01001246 smmu = fwspec_smmu(fwspec);
Will Deacon518f7132014-11-14 17:17:54 +00001247 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001248 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001249 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001250 return ret;
1251
Will Deacon45ae7cf2013-06-24 18:31:25 +01001252 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001253 * Sanity check the domain. We don't support domains across
1254 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001255 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001256 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001257 dev_err(dev,
1258 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001259 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001260 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001261 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001262
1263 /* Looks ok, so add the device to the domain */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001264 return arm_smmu_domain_add_master(smmu_domain, fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001265}
1266
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001268 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001269{
Will Deacon518f7132014-11-14 17:17:54 +00001270 int ret;
1271 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001272 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001273 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274
Will Deacon518f7132014-11-14 17:17:54 +00001275 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001276 return -ENODEV;
1277
Will Deacon518f7132014-11-14 17:17:54 +00001278 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1279 ret = ops->map(ops, iova, paddr, size, prot);
1280 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1281 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001282}
1283
1284static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1285 size_t size)
1286{
Will Deacon518f7132014-11-14 17:17:54 +00001287 size_t ret;
1288 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001289 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001290 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001291
Will Deacon518f7132014-11-14 17:17:54 +00001292 if (!ops)
1293 return 0;
1294
1295 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1296 ret = ops->unmap(ops, iova, size);
1297 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1298 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001299}
1300
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001301static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1302 dma_addr_t iova)
1303{
Joerg Roedel1d672632015-03-26 13:43:10 +01001304 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001305 struct arm_smmu_device *smmu = smmu_domain->smmu;
1306 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1307 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1308 struct device *dev = smmu->dev;
1309 void __iomem *cb_base;
1310 u32 tmp;
1311 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001312 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001313
1314 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1315
Robin Murphy661d9622015-05-27 17:09:34 +01001316 /* ATS1 registers can only be written atomically */
1317 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001318 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001319 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1320 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001321 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001322
1323 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1324 !(tmp & ATSR_ACTIVE), 5, 50)) {
1325 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001326 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001327 &iova);
1328 return ops->iova_to_phys(ops, iova);
1329 }
1330
Robin Murphyf9a05f02016-04-13 18:13:01 +01001331 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001332 if (phys & CB_PAR_F) {
1333 dev_err(dev, "translation fault!\n");
1334 dev_err(dev, "PAR = 0x%llx\n", phys);
1335 return 0;
1336 }
1337
1338 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1339}
1340
Will Deacon45ae7cf2013-06-24 18:31:25 +01001341static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001342 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001343{
Will Deacon518f7132014-11-14 17:17:54 +00001344 phys_addr_t ret;
1345 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001346 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001347 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001348
Will Deacon518f7132014-11-14 17:17:54 +00001349 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001350 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001351
Will Deacon518f7132014-11-14 17:17:54 +00001352 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001353 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1354 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001355 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001356 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001357 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001358 }
1359
Will Deacon518f7132014-11-14 17:17:54 +00001360 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001361
Will Deacon518f7132014-11-14 17:17:54 +00001362 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001363}
1364
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001365static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001366{
Will Deacond0948942014-06-24 17:30:10 +01001367 switch (cap) {
1368 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001369 /*
1370 * Return true here as the SMMU can always send out coherent
1371 * requests.
1372 */
1373 return true;
Will Deacond0948942014-06-24 17:30:10 +01001374 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001375 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001376 case IOMMU_CAP_NOEXEC:
1377 return true;
Will Deacond0948942014-06-24 17:30:10 +01001378 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001379 return false;
Will Deacond0948942014-06-24 17:30:10 +01001380 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001381}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001382
Robin Murphy021bb842016-09-14 15:26:46 +01001383static int arm_smmu_match_node(struct device *dev, void *data)
1384{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001385 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001386}
1387
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001388static
1389struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001390{
1391 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001392 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001393 put_device(dev);
1394 return dev ? dev_get_drvdata(dev) : NULL;
1395}
1396
Will Deacon03edb222015-01-19 14:27:33 +00001397static int arm_smmu_add_device(struct device *dev)
1398{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001399 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001400 struct arm_smmu_master_cfg *cfg;
Robin Murphy021bb842016-09-14 15:26:46 +01001401 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001402 int i, ret;
1403
Robin Murphy021bb842016-09-14 15:26:46 +01001404 if (using_legacy_binding) {
1405 ret = arm_smmu_register_legacy_master(dev, &smmu);
1406 fwspec = dev->iommu_fwspec;
1407 if (ret)
1408 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001409 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001410 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001411 } else {
1412 return -ENODEV;
1413 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001414
1415 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001416 for (i = 0; i < fwspec->num_ids; i++) {
1417 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001418 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001419
Robin Murphyadfec2e2016-09-12 17:13:55 +01001420 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001421 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001422 sid, smmu->streamid_mask);
1423 goto out_free;
1424 }
1425 if (mask & ~smmu->smr_mask_mask) {
1426 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1427 sid, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001428 goto out_free;
1429 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001430 }
Will Deacon03edb222015-01-19 14:27:33 +00001431
Robin Murphyadfec2e2016-09-12 17:13:55 +01001432 ret = -ENOMEM;
1433 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1434 GFP_KERNEL);
1435 if (!cfg)
1436 goto out_free;
1437
1438 cfg->smmu = smmu;
1439 fwspec->iommu_priv = cfg;
1440 while (i--)
1441 cfg->smendx[i] = INVALID_SMENDX;
1442
Robin Murphy588888a2016-09-12 17:13:54 +01001443 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001444 if (ret)
1445 goto out_free;
1446
1447 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001448
1449out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001450 if (fwspec)
1451 kfree(fwspec->iommu_priv);
1452 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001453 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001454}
1455
Will Deacon45ae7cf2013-06-24 18:31:25 +01001456static void arm_smmu_remove_device(struct device *dev)
1457{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001458 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001459
Robin Murphyadfec2e2016-09-12 17:13:55 +01001460 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001461 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001462
Robin Murphyadfec2e2016-09-12 17:13:55 +01001463 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001464 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001465 kfree(fwspec->iommu_priv);
1466 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001467}
1468
Joerg Roedelaf659932015-10-21 23:51:41 +02001469static struct iommu_group *arm_smmu_device_group(struct device *dev)
1470{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001471 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1472 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001473 struct iommu_group *group = NULL;
1474 int i, idx;
1475
Robin Murphyadfec2e2016-09-12 17:13:55 +01001476 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001477 if (group && smmu->s2crs[idx].group &&
1478 group != smmu->s2crs[idx].group)
1479 return ERR_PTR(-EINVAL);
1480
1481 group = smmu->s2crs[idx].group;
1482 }
1483
1484 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001485 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001486
1487 if (dev_is_pci(dev))
1488 group = pci_device_group(dev);
1489 else
1490 group = generic_device_group(dev);
1491
Joerg Roedelaf659932015-10-21 23:51:41 +02001492 return group;
1493}
1494
Will Deaconc752ce42014-06-25 22:46:31 +01001495static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1496 enum iommu_attr attr, void *data)
1497{
Joerg Roedel1d672632015-03-26 13:43:10 +01001498 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001499
1500 switch (attr) {
1501 case DOMAIN_ATTR_NESTING:
1502 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1503 return 0;
1504 default:
1505 return -ENODEV;
1506 }
1507}
1508
1509static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1510 enum iommu_attr attr, void *data)
1511{
Will Deacon518f7132014-11-14 17:17:54 +00001512 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001513 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001514
Will Deacon518f7132014-11-14 17:17:54 +00001515 mutex_lock(&smmu_domain->init_mutex);
1516
Will Deaconc752ce42014-06-25 22:46:31 +01001517 switch (attr) {
1518 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001519 if (smmu_domain->smmu) {
1520 ret = -EPERM;
1521 goto out_unlock;
1522 }
1523
Will Deaconc752ce42014-06-25 22:46:31 +01001524 if (*(int *)data)
1525 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1526 else
1527 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1528
Will Deacon518f7132014-11-14 17:17:54 +00001529 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001530 default:
Will Deacon518f7132014-11-14 17:17:54 +00001531 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001532 }
Will Deacon518f7132014-11-14 17:17:54 +00001533
1534out_unlock:
1535 mutex_unlock(&smmu_domain->init_mutex);
1536 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001537}
1538
Robin Murphy021bb842016-09-14 15:26:46 +01001539static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1540{
1541 u32 fwid = 0;
1542
1543 if (args->args_count > 0)
1544 fwid |= (u16)args->args[0];
1545
1546 if (args->args_count > 1)
1547 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
1548
1549 return iommu_fwspec_add_ids(dev, &fwid, 1);
1550}
1551
Will Deacon518f7132014-11-14 17:17:54 +00001552static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001553 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001554 .domain_alloc = arm_smmu_domain_alloc,
1555 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001556 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001557 .map = arm_smmu_map,
1558 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001559 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001560 .iova_to_phys = arm_smmu_iova_to_phys,
1561 .add_device = arm_smmu_add_device,
1562 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001563 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001564 .domain_get_attr = arm_smmu_domain_get_attr,
1565 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001566 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00001567 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001568};
1569
1570static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1571{
1572 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001573 void __iomem *cb_base;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001574 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001575 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001576
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001577 /* clear global FSR */
1578 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1579 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001580
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001581 /*
1582 * Reset stream mapping groups: Initial values mark all SMRn as
1583 * invalid and all S2CRn as bypass unless overridden.
1584 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001585 for (i = 0; i < smmu->num_mapping_groups; ++i)
1586 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001587
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301588 if (smmu->model == ARM_MMU500) {
1589 /*
1590 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1591 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1592 * bit is only present in MMU-500r2 onwards.
1593 */
1594 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1595 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001596 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301597 if (major >= 2)
1598 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1599 /*
1600 * Allow unmatched Stream IDs to allocate bypass
1601 * TLB entries for reduced latency.
1602 */
1603 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001604 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1605 }
1606
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001607 /* Make sure all context banks are disabled and clear CB_FSR */
1608 for (i = 0; i < smmu->num_context_banks; ++i) {
1609 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1610 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1611 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001612 /*
1613 * Disable MMU-500's not-particularly-beneficial next-page
1614 * prefetcher for the sake of errata #841119 and #826419.
1615 */
1616 if (smmu->model == ARM_MMU500) {
1617 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1618 reg &= ~ARM_MMU500_ACTLR_CPRE;
1619 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1620 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001621 }
Will Deacon1463fe42013-07-31 19:21:27 +01001622
Will Deacon45ae7cf2013-06-24 18:31:25 +01001623 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001624 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1625 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1626
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001627 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001628
Will Deacon45ae7cf2013-06-24 18:31:25 +01001629 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001630 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001631
1632 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001633 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001634
Robin Murphy25a1c962016-02-10 14:25:33 +00001635 /* Enable client access, handling unmatched streams as appropriate */
1636 reg &= ~sCR0_CLIENTPD;
1637 if (disable_bypass)
1638 reg |= sCR0_USFCFG;
1639 else
1640 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001641
1642 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001643 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001644
1645 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001646 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001647
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001648 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1649 reg |= sCR0_VMID16EN;
1650
Will Deacon45ae7cf2013-06-24 18:31:25 +01001651 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001652 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001653 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001654}
1655
1656static int arm_smmu_id_size_to_bits(int size)
1657{
1658 switch (size) {
1659 case 0:
1660 return 32;
1661 case 1:
1662 return 36;
1663 case 2:
1664 return 40;
1665 case 3:
1666 return 42;
1667 case 4:
1668 return 44;
1669 case 5:
1670 default:
1671 return 48;
1672 }
1673}
1674
1675static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1676{
1677 unsigned long size;
1678 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1679 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001680 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001681 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001682
1683 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001684 dev_notice(smmu->dev, "SMMUv%d with:\n",
1685 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001686
1687 /* ID0 */
1688 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001689
1690 /* Restrict available stages based on module parameter */
1691 if (force_stage == 1)
1692 id &= ~(ID0_S2TS | ID0_NTS);
1693 else if (force_stage == 2)
1694 id &= ~(ID0_S1TS | ID0_NTS);
1695
Will Deacon45ae7cf2013-06-24 18:31:25 +01001696 if (id & ID0_S1TS) {
1697 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1698 dev_notice(smmu->dev, "\tstage 1 translation\n");
1699 }
1700
1701 if (id & ID0_S2TS) {
1702 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1703 dev_notice(smmu->dev, "\tstage 2 translation\n");
1704 }
1705
1706 if (id & ID0_NTS) {
1707 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1708 dev_notice(smmu->dev, "\tnested translation\n");
1709 }
1710
1711 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001712 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001713 dev_err(smmu->dev, "\tno translation support!\n");
1714 return -ENODEV;
1715 }
1716
Robin Murphyb7862e32016-04-13 18:13:03 +01001717 if ((id & ID0_S1TS) &&
1718 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001719 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1720 dev_notice(smmu->dev, "\taddress translation ops\n");
1721 }
1722
Robin Murphybae2c2d2015-07-29 19:46:05 +01001723 /*
1724 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001725 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001726 * Fortunately, this also opens up a workaround for systems where the
1727 * ID register value has ended up configured incorrectly.
1728 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001729 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001730 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001731 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001732 cttw_fw ? "" : "non-");
1733 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001734 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001735 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001736
Robin Murphy21174242016-09-12 17:13:48 +01001737 /* Max. number of entries we have for stream matching/indexing */
1738 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1739 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001740 if (id & ID0_SMS) {
Robin Murphy21174242016-09-12 17:13:48 +01001741 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001742
1743 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001744 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1745 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001746 dev_err(smmu->dev,
1747 "stream-matching supported, but no SMRs present!\n");
1748 return -ENODEV;
1749 }
1750
Robin Murphy21174242016-09-12 17:13:48 +01001751 /*
1752 * SMR.ID bits may not be preserved if the corresponding MASK
1753 * bits are set, so check each one separately. We can reject
1754 * masters later if they try to claim IDs outside these masks.
1755 */
1756 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001757 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1758 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Robin Murphy21174242016-09-12 17:13:48 +01001759 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001760
Robin Murphy21174242016-09-12 17:13:48 +01001761 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1762 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1763 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1764 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001765
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001766 /* Zero-initialised to mark as invalid */
1767 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1768 GFP_KERNEL);
1769 if (!smmu->smrs)
1770 return -ENOMEM;
1771
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772 dev_notice(smmu->dev,
Robin Murphy21174242016-09-12 17:13:48 +01001773 "\tstream matching with %lu register groups, mask 0x%x",
1774 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001776 /* s2cr->type == 0 means translation, so initialise explicitly */
1777 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1778 GFP_KERNEL);
1779 if (!smmu->s2crs)
1780 return -ENOMEM;
1781 for (i = 0; i < size; i++)
1782 smmu->s2crs[i] = s2cr_init_val;
1783
Robin Murphy21174242016-09-12 17:13:48 +01001784 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001785 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001786
Robin Murphy7602b872016-04-28 17:12:09 +01001787 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1788 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1789 if (!(id & ID0_PTFS_NO_AARCH32S))
1790 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1791 }
1792
Will Deacon45ae7cf2013-06-24 18:31:25 +01001793 /* ID1 */
1794 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001795 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001796
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001797 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001798 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001799 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001800 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001801 dev_warn(smmu->dev,
1802 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1803 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001804
Will Deacon518f7132014-11-14 17:17:54 +00001805 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001806 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1807 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1808 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1809 return -ENODEV;
1810 }
1811 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1812 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001813 /*
1814 * Cavium CN88xx erratum #27704.
1815 * Ensure ASID and VMID allocation is unique across all SMMUs in
1816 * the system.
1817 */
1818 if (smmu->model == CAVIUM_SMMUV2) {
1819 smmu->cavium_id_base =
1820 atomic_add_return(smmu->num_context_banks,
1821 &cavium_smmu_context_count);
1822 smmu->cavium_id_base -= smmu->num_context_banks;
1823 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001824
1825 /* ID2 */
1826 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1827 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001828 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001829
Will Deacon518f7132014-11-14 17:17:54 +00001830 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001831 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001832 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001833
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001834 if (id & ID2_VMID16)
1835 smmu->features |= ARM_SMMU_FEAT_VMID16;
1836
Robin Murphyf1d84542015-03-04 16:41:05 +00001837 /*
1838 * What the page table walker can address actually depends on which
1839 * descriptor format is in use, but since a) we don't know that yet,
1840 * and b) it can vary per context bank, this will have to do...
1841 */
1842 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1843 dev_warn(smmu->dev,
1844 "failed to set DMA mask for table walker\n");
1845
Robin Murphyb7862e32016-04-13 18:13:03 +01001846 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001847 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001848 if (smmu->version == ARM_SMMU_V1_64K)
1849 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001850 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001851 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001852 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001853 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001854 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001855 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001856 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001857 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001858 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001859 }
1860
Robin Murphy7602b872016-04-28 17:12:09 +01001861 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001862 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001863 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001864 if (smmu->features &
1865 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001866 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001867 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001868 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001869 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001870 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001871
Robin Murphyd5466352016-05-09 17:20:09 +01001872 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1873 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1874 else
1875 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1876 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1877 smmu->pgsize_bitmap);
1878
Will Deacon518f7132014-11-14 17:17:54 +00001879
Will Deacon28d60072014-09-01 16:24:48 +01001880 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1881 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001882 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001883
1884 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1885 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001886 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001887
Will Deacon45ae7cf2013-06-24 18:31:25 +01001888 return 0;
1889}
1890
Robin Murphy67b65a32016-04-13 18:12:57 +01001891struct arm_smmu_match_data {
1892 enum arm_smmu_arch_version version;
1893 enum arm_smmu_implementation model;
1894};
1895
1896#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1897static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1898
1899ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1900ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001901ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001902ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001903ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001904
Joerg Roedel09b52692014-10-02 12:24:45 +02001905static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001906 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1907 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1908 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001909 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001910 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001911 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001912 { },
1913};
1914MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1915
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001916#ifdef CONFIG_ACPI
1917static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1918{
1919 int ret = 0;
1920
1921 switch (model) {
1922 case ACPI_IORT_SMMU_V1:
1923 case ACPI_IORT_SMMU_CORELINK_MMU400:
1924 smmu->version = ARM_SMMU_V1;
1925 smmu->model = GENERIC_SMMU;
1926 break;
1927 case ACPI_IORT_SMMU_V2:
1928 smmu->version = ARM_SMMU_V2;
1929 smmu->model = GENERIC_SMMU;
1930 break;
1931 case ACPI_IORT_SMMU_CORELINK_MMU500:
1932 smmu->version = ARM_SMMU_V2;
1933 smmu->model = ARM_MMU500;
1934 break;
1935 default:
1936 ret = -ENODEV;
1937 }
1938
1939 return ret;
1940}
1941
1942static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1943 struct arm_smmu_device *smmu)
1944{
1945 struct device *dev = smmu->dev;
1946 struct acpi_iort_node *node =
1947 *(struct acpi_iort_node **)dev_get_platdata(dev);
1948 struct acpi_iort_smmu *iort_smmu;
1949 int ret;
1950
1951 /* Retrieve SMMU1/2 specific data */
1952 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1953
1954 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1955 if (ret < 0)
1956 return ret;
1957
1958 /* Ignore the configuration access interrupt */
1959 smmu->num_global_irqs = 1;
1960
1961 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1962 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1963
1964 return 0;
1965}
1966#else
1967static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1968 struct arm_smmu_device *smmu)
1969{
1970 return -ENODEV;
1971}
1972#endif
1973
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001974static int arm_smmu_device_dt_probe(struct platform_device *pdev,
1975 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001976{
Robin Murphy67b65a32016-04-13 18:12:57 +01001977 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001978 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01001979 bool legacy_binding;
1980
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001981 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1982 &smmu->num_global_irqs)) {
1983 dev_err(dev, "missing #global-interrupts property\n");
1984 return -ENODEV;
1985 }
1986
1987 data = of_device_get_match_data(dev);
1988 smmu->version = data->version;
1989 smmu->model = data->model;
1990
1991 parse_driver_options(smmu);
1992
Robin Murphy021bb842016-09-14 15:26:46 +01001993 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
1994 if (legacy_binding && !using_generic_binding) {
1995 if (!using_legacy_binding)
1996 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
1997 using_legacy_binding = true;
1998 } else if (!legacy_binding && !using_legacy_binding) {
1999 using_generic_binding = true;
2000 } else {
2001 dev_err(dev, "not probing due to mismatched DT properties\n");
2002 return -ENODEV;
2003 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002004
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002005 if (of_dma_is_coherent(dev->of_node))
2006 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2007
2008 return 0;
2009}
2010
2011static int arm_smmu_device_probe(struct platform_device *pdev)
2012{
2013 struct resource *res;
2014 struct arm_smmu_device *smmu;
2015 struct device *dev = &pdev->dev;
2016 int num_irqs, i, err;
2017
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2019 if (!smmu) {
2020 dev_err(dev, "failed to allocate arm_smmu_device\n");
2021 return -ENOMEM;
2022 }
2023 smmu->dev = dev;
2024
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002025 if (dev->of_node)
2026 err = arm_smmu_device_dt_probe(pdev, smmu);
2027 else
2028 err = arm_smmu_device_acpi_probe(pdev, smmu);
2029
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002030 if (err)
2031 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002032
Will Deacon45ae7cf2013-06-24 18:31:25 +01002033 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01002034 smmu->base = devm_ioremap_resource(dev, res);
2035 if (IS_ERR(smmu->base))
2036 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002037 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002038
Will Deacon45ae7cf2013-06-24 18:31:25 +01002039 num_irqs = 0;
2040 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2041 num_irqs++;
2042 if (num_irqs > smmu->num_global_irqs)
2043 smmu->num_context_irqs++;
2044 }
2045
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002046 if (!smmu->num_context_irqs) {
2047 dev_err(dev, "found %d interrupts but expected at least %d\n",
2048 num_irqs, smmu->num_global_irqs + 1);
2049 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002050 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002051
2052 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2053 GFP_KERNEL);
2054 if (!smmu->irqs) {
2055 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2056 return -ENOMEM;
2057 }
2058
2059 for (i = 0; i < num_irqs; ++i) {
2060 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002061
Will Deacon45ae7cf2013-06-24 18:31:25 +01002062 if (irq < 0) {
2063 dev_err(dev, "failed to get irq index %d\n", i);
2064 return -ENODEV;
2065 }
2066 smmu->irqs[i] = irq;
2067 }
2068
Olav Haugan3c8766d2014-08-22 17:12:32 -07002069 err = arm_smmu_device_cfg_probe(smmu);
2070 if (err)
2071 return err;
2072
Robin Murphyb7862e32016-04-13 18:13:03 +01002073 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002074 smmu->num_context_banks != smmu->num_context_irqs) {
2075 dev_err(dev,
2076 "found only %d context interrupt(s) but %d required\n",
2077 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01002078 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002079 }
2080
Will Deacon45ae7cf2013-06-24 18:31:25 +01002081 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002082 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2083 arm_smmu_global_fault,
2084 IRQF_SHARED,
2085 "arm-smmu global fault",
2086 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002087 if (err) {
2088 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2089 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002090 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002091 }
2092 }
2093
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00002094 iommu_register_instance(dev->fwnode, &arm_smmu_ops);
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002095 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002096 arm_smmu_device_reset(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002097
2098 /* Oh, for a proper bus abstraction */
2099 if (!iommu_present(&platform_bus_type))
2100 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2101#ifdef CONFIG_ARM_AMBA
2102 if (!iommu_present(&amba_bustype))
2103 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2104#endif
2105#ifdef CONFIG_PCI
2106 if (!iommu_present(&pci_bus_type)) {
2107 pci_request_acs();
2108 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2109 }
2110#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002111 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002112}
2113
2114static int arm_smmu_device_remove(struct platform_device *pdev)
2115{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002116 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002117
2118 if (!smmu)
2119 return -ENODEV;
2120
Will Deaconecfadb62013-07-31 19:21:28 +01002121 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002122 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002123
Will Deacon45ae7cf2013-06-24 18:31:25 +01002124 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002125 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002126 return 0;
2127}
2128
Will Deacon45ae7cf2013-06-24 18:31:25 +01002129static struct platform_driver arm_smmu_driver = {
2130 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002131 .name = "arm-smmu",
2132 .of_match_table = of_match_ptr(arm_smmu_of_match),
2133 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002134 .probe = arm_smmu_device_probe,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002135 .remove = arm_smmu_device_remove,
2136};
2137
2138static int __init arm_smmu_init(void)
2139{
Robin Murphy021bb842016-09-14 15:26:46 +01002140 static bool registered;
2141 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002142
Robin Murphy021bb842016-09-14 15:26:46 +01002143 if (!registered) {
2144 ret = platform_driver_register(&arm_smmu_driver);
2145 registered = !ret;
Wei Chen112c8982016-06-13 17:20:17 +08002146 }
Robin Murphy021bb842016-09-14 15:26:46 +01002147 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002148}
2149
2150static void __exit arm_smmu_exit(void)
2151{
2152 return platform_driver_unregister(&arm_smmu_driver);
2153}
2154
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002155subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002156module_exit(arm_smmu_exit);
2157
Robin Murphy021bb842016-09-14 15:26:46 +01002158static int __init arm_smmu_of_init(struct device_node *np)
2159{
2160 int ret = arm_smmu_init();
2161
2162 if (ret)
2163 return ret;
2164
2165 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2166 return -ENODEV;
2167
2168 return 0;
2169}
2170IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
2171IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
2172IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
2173IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
2174IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
2175IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
2176
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002177#ifdef CONFIG_ACPI
2178static int __init arm_smmu_acpi_init(struct acpi_table_header *table)
2179{
2180 if (iort_node_match(ACPI_IORT_NODE_SMMU))
2181 return arm_smmu_init();
2182
2183 return 0;
2184}
2185IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);
2186#endif
2187
Will Deacon45ae7cf2013-06-24 18:31:25 +01002188MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2189MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2190MODULE_LICENSE("GPL v2");