blob: 9dbb6a37e625775755000ac6b791663fc557f652 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
50
51#include <linux/amba/bus.h>
52
Will Deacon518f7132014-11-14 17:17:54 +000053#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
Will Deacon45ae7cf2013-06-24 18:31:25 +010055/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
Will Deacon45ae7cf2013-06-24 18:31:25 +010058/* SMMU global address space */
59#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010060#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010061
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000062/*
63 * SMMU global address space with conditional offset to access secure
64 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
65 * nsGFSYNR0: 0x450)
66 */
67#define ARM_SMMU_GR0_NS(smmu) \
68 ((smmu)->base + \
69 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
70 ? 0x400 : 0))
71
Robin Murphyf9a05f02016-04-13 18:13:01 +010072/*
73 * Some 64-bit registers only make sense to write atomically, but in such
74 * cases all the data relevant to AArch32 formats lies within the lower word,
75 * therefore this actually makes more sense than it might first appear.
76 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010077#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010078#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010079#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010080#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#endif
82
Will Deacon45ae7cf2013-06-24 18:31:25 +010083/* Configuration registers */
84#define ARM_SMMU_GR0_sCR0 0x0
85#define sCR0_CLIENTPD (1 << 0)
86#define sCR0_GFRE (1 << 1)
87#define sCR0_GFIE (1 << 2)
88#define sCR0_GCFGFRE (1 << 4)
89#define sCR0_GCFGFIE (1 << 5)
90#define sCR0_USFCFG (1 << 10)
91#define sCR0_VMIDPNE (1 << 11)
92#define sCR0_PTM (1 << 12)
93#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080094#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010095#define sCR0_BSU_SHIFT 14
96#define sCR0_BSU_MASK 0x3
97
Peng Fan3ca37122016-05-03 21:50:30 +080098/* Auxiliary Configuration register */
99#define ARM_SMMU_GR0_sACR 0x10
100
Will Deacon45ae7cf2013-06-24 18:31:25 +0100101/* Identification registers */
102#define ARM_SMMU_GR0_ID0 0x20
103#define ARM_SMMU_GR0_ID1 0x24
104#define ARM_SMMU_GR0_ID2 0x28
105#define ARM_SMMU_GR0_ID3 0x2c
106#define ARM_SMMU_GR0_ID4 0x30
107#define ARM_SMMU_GR0_ID5 0x34
108#define ARM_SMMU_GR0_ID6 0x38
109#define ARM_SMMU_GR0_ID7 0x3c
110#define ARM_SMMU_GR0_sGFSR 0x48
111#define ARM_SMMU_GR0_sGFSYNR0 0x50
112#define ARM_SMMU_GR0_sGFSYNR1 0x54
113#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100114
115#define ID0_S1TS (1 << 30)
116#define ID0_S2TS (1 << 29)
117#define ID0_NTS (1 << 28)
118#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000119#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100120#define ID0_PTFS_NO_AARCH32 (1 << 25)
121#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100122#define ID0_CTTW (1 << 14)
123#define ID0_NUMIRPT_SHIFT 16
124#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700125#define ID0_NUMSIDB_SHIFT 9
126#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100127#define ID0_NUMSMRG_SHIFT 0
128#define ID0_NUMSMRG_MASK 0xff
129
130#define ID1_PAGESIZE (1 << 31)
131#define ID1_NUMPAGENDXB_SHIFT 28
132#define ID1_NUMPAGENDXB_MASK 7
133#define ID1_NUMS2CB_SHIFT 16
134#define ID1_NUMS2CB_MASK 0xff
135#define ID1_NUMCB_SHIFT 0
136#define ID1_NUMCB_MASK 0xff
137
138#define ID2_OAS_SHIFT 4
139#define ID2_OAS_MASK 0xf
140#define ID2_IAS_SHIFT 0
141#define ID2_IAS_MASK 0xf
142#define ID2_UBS_SHIFT 8
143#define ID2_UBS_MASK 0xf
144#define ID2_PTFS_4K (1 << 12)
145#define ID2_PTFS_16K (1 << 13)
146#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800147#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100148
Peng Fan3ca37122016-05-03 21:50:30 +0800149#define ID7_MAJOR_SHIFT 4
150#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153#define ARM_SMMU_GR0_TLBIVMID 0x64
154#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
155#define ARM_SMMU_GR0_TLBIALLH 0x6c
156#define ARM_SMMU_GR0_sTLBGSYNC 0x70
157#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
158#define sTLBGSTATUS_GSACTIVE (1 << 0)
159#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
160
161/* Stream mapping registers */
162#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
163#define SMR_VALID (1 << 31)
164#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
168#define S2CR_CBNDX_SHIFT 0
169#define S2CR_CBNDX_MASK 0xff
170#define S2CR_TYPE_SHIFT 16
171#define S2CR_TYPE_MASK 0x3
Robin Murphy8e8b2032016-09-12 17:13:50 +0100172enum arm_smmu_s2cr_type {
173 S2CR_TYPE_TRANS,
174 S2CR_TYPE_BYPASS,
175 S2CR_TYPE_FAULT,
176};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100177
Robin Murphyd3461802016-01-26 18:06:34 +0000178#define S2CR_PRIVCFG_SHIFT 24
Robin Murphy8e8b2032016-09-12 17:13:50 +0100179#define S2CR_PRIVCFG_MASK 0x3
180enum arm_smmu_s2cr_privcfg {
181 S2CR_PRIVCFG_DEFAULT,
182 S2CR_PRIVCFG_DIPAN,
183 S2CR_PRIVCFG_UNPRIV,
184 S2CR_PRIVCFG_PRIV,
185};
Robin Murphyd3461802016-01-26 18:06:34 +0000186
Will Deacon45ae7cf2013-06-24 18:31:25 +0100187/* Context bank attribute registers */
188#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
189#define CBAR_VMID_SHIFT 0
190#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000191#define CBAR_S1_BPSHCFG_SHIFT 8
192#define CBAR_S1_BPSHCFG_MASK 3
193#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100194#define CBAR_S1_MEMATTR_SHIFT 12
195#define CBAR_S1_MEMATTR_MASK 0xf
196#define CBAR_S1_MEMATTR_WB 0xf
197#define CBAR_TYPE_SHIFT 16
198#define CBAR_TYPE_MASK 0x3
199#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
200#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
201#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
202#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
203#define CBAR_IRPTNDX_SHIFT 24
204#define CBAR_IRPTNDX_MASK 0xff
205
206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100223#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000225#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100226#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100228#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000230#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100231#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_TLBIVAL 0x620
233#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
234#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100235#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000236#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237
238#define SCTLR_S1_ASIDPNE (1 << 12)
239#define SCTLR_CFCFG (1 << 7)
240#define SCTLR_CFIE (1 << 6)
241#define SCTLR_CFRE (1 << 5)
242#define SCTLR_E (1 << 4)
243#define SCTLR_AFE (1 << 2)
244#define SCTLR_TRE (1 << 1)
245#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100247#define ARM_MMU500_ACTLR_CPRE (1 << 1)
248
Peng Fan3ca37122016-05-03 21:50:30 +0800249#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
250
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000251#define CB_PAR_F (1 << 0)
252
253#define ATSR_ACTIVE (1 << 0)
254
Will Deacon45ae7cf2013-06-24 18:31:25 +0100255#define RESUME_RETRY (0 << 0)
256#define RESUME_TERMINATE (1 << 0)
257
Will Deacon45ae7cf2013-06-24 18:31:25 +0100258#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100259#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100260
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100261#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100262
263#define FSR_MULTI (1 << 31)
264#define FSR_SS (1 << 30)
265#define FSR_UUT (1 << 8)
266#define FSR_ASF (1 << 7)
267#define FSR_TLBLKF (1 << 6)
268#define FSR_TLBMCF (1 << 5)
269#define FSR_EF (1 << 4)
270#define FSR_PF (1 << 3)
271#define FSR_AFF (1 << 2)
272#define FSR_TF (1 << 1)
273
Mitchel Humpherys29073202014-07-08 09:52:18 -0700274#define FSR_IGN (FSR_AFF | FSR_ASF | \
275 FSR_TLBMCF | FSR_TLBLKF)
276#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100277 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278
279#define FSYNR0_WNR (1 << 4)
280
Will Deacon4cf740b2014-07-14 19:47:39 +0100281static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000282module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100283MODULE_PARM_DESC(force_stage,
284 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000285static bool disable_bypass;
286module_param(disable_bypass, bool, S_IRUGO);
287MODULE_PARM_DESC(disable_bypass,
288 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100289
Robin Murphy09360402014-08-28 17:51:59 +0100290enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100291 ARM_SMMU_V1,
292 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100293 ARM_SMMU_V2,
294};
295
Robin Murphy67b65a32016-04-13 18:12:57 +0100296enum arm_smmu_implementation {
297 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100298 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100299 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100300};
301
Robin Murphy8e8b2032016-09-12 17:13:50 +0100302struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100303 struct iommu_group *group;
304 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100305 enum arm_smmu_s2cr_type type;
306 enum arm_smmu_s2cr_privcfg privcfg;
307 u8 cbndx;
308};
309
310#define s2cr_init_val (struct arm_smmu_s2cr){ \
311 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
312}
313
Will Deacon45ae7cf2013-06-24 18:31:25 +0100314struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100315 u16 mask;
316 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100317 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100318};
319
Will Deacona9a1b0b2014-05-01 18:05:08 +0100320struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100321 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100322 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100323};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100324#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100325#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
326#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
327#define for_each_cfg_sme(fw, i, idx) \
328 for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100329
330struct arm_smmu_device {
331 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100332
333 void __iomem *base;
334 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100335 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100336
337#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
338#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
339#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
340#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
341#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000342#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800343#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100344#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
345#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
346#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
347#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
348#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100349 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000350
351#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
352 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100353 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100354 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355
356 u32 num_context_banks;
357 u32 num_s2_context_banks;
358 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
359 atomic_t irptndx;
360
361 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100362 u16 streamid_mask;
363 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100364 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100365 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100366 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100367
Will Deacon518f7132014-11-14 17:17:54 +0000368 unsigned long va_size;
369 unsigned long ipa_size;
370 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100371 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100372
373 u32 num_global_irqs;
374 u32 num_context_irqs;
375 unsigned int *irqs;
376
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800377 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100378};
379
Robin Murphy7602b872016-04-28 17:12:09 +0100380enum arm_smmu_context_fmt {
381 ARM_SMMU_CTX_FMT_NONE,
382 ARM_SMMU_CTX_FMT_AARCH64,
383 ARM_SMMU_CTX_FMT_AARCH32_L,
384 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100385};
386
387struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100388 u8 cbndx;
389 u8 irptndx;
390 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100391 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100392};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100393#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800395#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
396#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100397
Will Deaconc752ce42014-06-25 22:46:31 +0100398enum arm_smmu_domain_stage {
399 ARM_SMMU_DOMAIN_S1 = 0,
400 ARM_SMMU_DOMAIN_S2,
401 ARM_SMMU_DOMAIN_NESTED,
402};
403
Will Deacon45ae7cf2013-06-24 18:31:25 +0100404struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100405 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000406 struct io_pgtable_ops *pgtbl_ops;
407 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100408 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100409 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000410 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100411 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100412};
413
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000414struct arm_smmu_option_prop {
415 u32 opt;
416 const char *prop;
417};
418
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800419static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
420
Mitchel Humpherys29073202014-07-08 09:52:18 -0700421static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000422 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
423 { 0, NULL},
424};
425
Joerg Roedel1d672632015-03-26 13:43:10 +0100426static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
427{
428 return container_of(dom, struct arm_smmu_domain, domain);
429}
430
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000431static void parse_driver_options(struct arm_smmu_device *smmu)
432{
433 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700434
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000435 do {
436 if (of_property_read_bool(smmu->dev->of_node,
437 arm_smmu_options[i].prop)) {
438 smmu->options |= arm_smmu_options[i].opt;
439 dev_notice(smmu->dev, "option %s\n",
440 arm_smmu_options[i].prop);
441 }
442 } while (arm_smmu_options[++i].opt);
443}
444
Will Deacon8f68f8e2014-07-15 11:27:08 +0100445static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100446{
447 if (dev_is_pci(dev)) {
448 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700449
Will Deacona9a1b0b2014-05-01 18:05:08 +0100450 while (!pci_is_root_bus(bus))
451 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100452 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100453 }
454
Robin Murphyf80cd882016-09-14 15:21:39 +0100455 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100456}
457
Robin Murphyf80cd882016-09-14 15:21:39 +0100458static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100459{
Robin Murphyf80cd882016-09-14 15:21:39 +0100460 *((__be32 *)data) = cpu_to_be32(alias);
461 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100462}
463
Robin Murphyf80cd882016-09-14 15:21:39 +0100464static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100465{
Robin Murphyf80cd882016-09-14 15:21:39 +0100466 struct of_phandle_iterator *it = *(void **)data;
467 struct device_node *np = it->node;
468 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100469
Robin Murphyf80cd882016-09-14 15:21:39 +0100470 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
471 "#stream-id-cells", 0)
472 if (it->node == np) {
473 *(void **)data = dev;
474 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700475 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100476 it->node = np;
477 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100478}
479
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100480static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100481static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100482
Robin Murphyadfec2e2016-09-12 17:13:55 +0100483static int arm_smmu_register_legacy_master(struct device *dev,
484 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100485{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100486 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100487 struct device_node *np;
488 struct of_phandle_iterator it;
489 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100490 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100491 __be32 pci_sid;
492 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100493
Robin Murphyf80cd882016-09-14 15:21:39 +0100494 np = dev_get_dev_node(dev);
495 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
496 of_node_put(np);
497 return -ENODEV;
498 }
499
500 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100501 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
502 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100503 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100504 of_node_put(np);
505 if (err == 0)
506 return -ENODEV;
507 if (err < 0)
508 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100509
Robin Murphyf80cd882016-09-14 15:21:39 +0100510 if (dev_is_pci(dev)) {
511 /* "mmu-masters" assumes Stream ID == Requester ID */
512 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
513 &pci_sid);
514 it.cur = &pci_sid;
515 it.cur_count = 1;
516 }
517
Robin Murphyadfec2e2016-09-12 17:13:55 +0100518 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
519 &arm_smmu_ops);
520 if (err)
521 return err;
522
523 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
524 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100525 return -ENOMEM;
526
Robin Murphyadfec2e2016-09-12 17:13:55 +0100527 *smmu = dev_get_drvdata(smmu_dev);
528 of_phandle_iterator_args(&it, sids, it.cur_count);
529 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
530 kfree(sids);
531 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100532}
533
534static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
535{
536 int idx;
537
538 do {
539 idx = find_next_zero_bit(map, end, start);
540 if (idx == end)
541 return -ENOSPC;
542 } while (test_and_set_bit(idx, map));
543
544 return idx;
545}
546
547static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
548{
549 clear_bit(idx, map);
550}
551
552/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000553static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100554{
555 int count = 0;
556 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
557
558 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
559 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
560 & sTLBGSTATUS_GSACTIVE) {
561 cpu_relax();
562 if (++count == TLB_LOOP_TIMEOUT) {
563 dev_err_ratelimited(smmu->dev,
564 "TLB sync timed out -- SMMU may be deadlocked\n");
565 return;
566 }
567 udelay(1);
568 }
569}
570
Will Deacon518f7132014-11-14 17:17:54 +0000571static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100572{
Will Deacon518f7132014-11-14 17:17:54 +0000573 struct arm_smmu_domain *smmu_domain = cookie;
574 __arm_smmu_tlb_sync(smmu_domain->smmu);
575}
576
577static void arm_smmu_tlb_inv_context(void *cookie)
578{
579 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100580 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
581 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100582 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000583 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100584
585 if (stage1) {
586 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800587 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100588 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100589 } else {
590 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800591 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100592 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100593 }
594
Will Deacon518f7132014-11-14 17:17:54 +0000595 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100596}
597
Will Deacon518f7132014-11-14 17:17:54 +0000598static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000599 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000600{
601 struct arm_smmu_domain *smmu_domain = cookie;
602 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
603 struct arm_smmu_device *smmu = smmu_domain->smmu;
604 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
605 void __iomem *reg;
606
607 if (stage1) {
608 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
609 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
610
Robin Murphy7602b872016-04-28 17:12:09 +0100611 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000612 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800613 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000614 do {
615 writel_relaxed(iova, reg);
616 iova += granule;
617 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000618 } else {
619 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800620 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000621 do {
622 writeq_relaxed(iova, reg);
623 iova += granule >> 12;
624 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000625 }
Will Deacon518f7132014-11-14 17:17:54 +0000626 } else if (smmu->version == ARM_SMMU_V2) {
627 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
628 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
629 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000630 iova >>= 12;
631 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100632 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000633 iova += granule >> 12;
634 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000635 } else {
636 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800637 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000638 }
639}
640
Will Deacon518f7132014-11-14 17:17:54 +0000641static struct iommu_gather_ops arm_smmu_gather_ops = {
642 .tlb_flush_all = arm_smmu_tlb_inv_context,
643 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
644 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000645};
646
Will Deacon45ae7cf2013-06-24 18:31:25 +0100647static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
648{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100649 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100650 unsigned long iova;
651 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100652 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100653 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
654 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100655 void __iomem *cb_base;
656
Will Deacon44680ee2014-06-25 11:29:12 +0100657 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100658 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
659
660 if (!(fsr & FSR_FAULT))
661 return IRQ_NONE;
662
Will Deacon45ae7cf2013-06-24 18:31:25 +0100663 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100664 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100665
Will Deacon3714ce1d2016-08-05 19:49:45 +0100666 dev_err_ratelimited(smmu->dev,
667 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
668 fsr, iova, fsynr, cfg->cbndx);
669
Will Deacon45ae7cf2013-06-24 18:31:25 +0100670 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100671 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100672}
673
674static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
675{
676 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
677 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000678 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100679
680 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
681 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
682 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
683 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
684
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000685 if (!gfsr)
686 return IRQ_NONE;
687
Will Deacon45ae7cf2013-06-24 18:31:25 +0100688 dev_err_ratelimited(smmu->dev,
689 "Unexpected global fault, this could be serious\n");
690 dev_err_ratelimited(smmu->dev,
691 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
692 gfsr, gfsynr0, gfsynr1, gfsynr2);
693
694 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100695 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100696}
697
Will Deacon518f7132014-11-14 17:17:54 +0000698static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
699 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100700{
Robin Murphy60705292016-08-11 17:44:06 +0100701 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100702 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100703 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100704 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
705 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100706 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100707
Will Deacon45ae7cf2013-06-24 18:31:25 +0100708 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100709 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
710 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100711
Will Deacon4a1c93c2015-03-04 12:21:03 +0000712 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100713 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
714 reg = CBA2R_RW64_64BIT;
715 else
716 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800717 /* 16-bit VMIDs live in CBA2R */
718 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800719 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800720
Will Deacon4a1c93c2015-03-04 12:21:03 +0000721 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
722 }
723
Will Deacon45ae7cf2013-06-24 18:31:25 +0100724 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100725 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100726 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700727 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100728
Will Deacon57ca90f2014-02-06 14:59:05 +0000729 /*
730 * Use the weakest shareability/memory types, so they are
731 * overridden by the ttbcr/pte.
732 */
733 if (stage1) {
734 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
735 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800736 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
737 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800738 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000739 }
Will Deacon44680ee2014-06-25 11:29:12 +0100740 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100741
Will Deacon518f7132014-11-14 17:17:54 +0000742 /* TTBRs */
743 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100744 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100745
Robin Murphy60705292016-08-11 17:44:06 +0100746 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
747 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
748 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
749 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
750 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
751 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
752 } else {
753 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
754 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
755 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
756 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
757 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
758 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
759 }
Will Deacon518f7132014-11-14 17:17:54 +0000760 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100761 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100762 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000763 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100764
Will Deacon518f7132014-11-14 17:17:54 +0000765 /* TTBCR */
766 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100767 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
768 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
769 reg2 = 0;
770 } else {
771 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
772 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
773 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100774 }
Robin Murphy60705292016-08-11 17:44:06 +0100775 if (smmu->version > ARM_SMMU_V1)
776 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100777 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000778 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100779 }
Robin Murphy60705292016-08-11 17:44:06 +0100780 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100781
Will Deacon518f7132014-11-14 17:17:54 +0000782 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100783 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100784 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
785 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
786 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
787 } else {
788 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
789 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
790 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100791 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphy60705292016-08-11 17:44:06 +0100792 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100793 }
794
Will Deacon45ae7cf2013-06-24 18:31:25 +0100795 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100796 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100797 if (stage1)
798 reg |= SCTLR_S1_ASIDPNE;
799#ifdef __BIG_ENDIAN
800 reg |= SCTLR_E;
801#endif
Will Deacon25724842013-08-21 13:49:53 +0100802 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100803}
804
805static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100806 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100807{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100808 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000809 unsigned long ias, oas;
810 struct io_pgtable_ops *pgtbl_ops;
811 struct io_pgtable_cfg pgtbl_cfg;
812 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100813 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100814 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100815
Will Deacon518f7132014-11-14 17:17:54 +0000816 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100817 if (smmu_domain->smmu)
818 goto out_unlock;
819
Robin Murphy98006992016-04-20 14:53:33 +0100820 /* We're bypassing these SIDs, so don't allocate an actual context */
821 if (domain->type == IOMMU_DOMAIN_DMA) {
822 smmu_domain->smmu = smmu;
823 goto out_unlock;
824 }
825
Will Deaconc752ce42014-06-25 22:46:31 +0100826 /*
827 * Mapping the requested stage onto what we support is surprisingly
828 * complicated, mainly because the spec allows S1+S2 SMMUs without
829 * support for nested translation. That means we end up with the
830 * following table:
831 *
832 * Requested Supported Actual
833 * S1 N S1
834 * S1 S1+S2 S1
835 * S1 S2 S2
836 * S1 S1 S1
837 * N N N
838 * N S1+S2 S2
839 * N S2 S2
840 * N S1 S1
841 *
842 * Note that you can't actually request stage-2 mappings.
843 */
844 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
845 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
846 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
847 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
848
Robin Murphy7602b872016-04-28 17:12:09 +0100849 /*
850 * Choosing a suitable context format is even more fiddly. Until we
851 * grow some way for the caller to express a preference, and/or move
852 * the decision into the io-pgtable code where it arguably belongs,
853 * just aim for the closest thing to the rest of the system, and hope
854 * that the hardware isn't esoteric enough that we can't assume AArch64
855 * support to be a superset of AArch32 support...
856 */
857 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
858 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100859 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
860 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
861 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
862 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
863 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100864 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
865 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
866 ARM_SMMU_FEAT_FMT_AARCH64_16K |
867 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
868 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
869
870 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
871 ret = -EINVAL;
872 goto out_unlock;
873 }
874
Will Deaconc752ce42014-06-25 22:46:31 +0100875 switch (smmu_domain->stage) {
876 case ARM_SMMU_DOMAIN_S1:
877 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
878 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000879 ias = smmu->va_size;
880 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100881 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000882 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100883 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000884 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100885 ias = min(ias, 32UL);
886 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100887 } else {
888 fmt = ARM_V7S;
889 ias = min(ias, 32UL);
890 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100891 }
Will Deaconc752ce42014-06-25 22:46:31 +0100892 break;
893 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100894 /*
895 * We will likely want to change this if/when KVM gets
896 * involved.
897 */
Will Deaconc752ce42014-06-25 22:46:31 +0100898 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100899 cfg->cbar = CBAR_TYPE_S2_TRANS;
900 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000901 ias = smmu->ipa_size;
902 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100903 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000904 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100905 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000906 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100907 ias = min(ias, 40UL);
908 oas = min(oas, 40UL);
909 }
Will Deaconc752ce42014-06-25 22:46:31 +0100910 break;
911 default:
912 ret = -EINVAL;
913 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100914 }
915
916 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
917 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200918 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100919 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100920
Will Deacon44680ee2014-06-25 11:29:12 +0100921 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100922 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100923 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
924 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100925 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100926 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100927 }
928
Will Deacon518f7132014-11-14 17:17:54 +0000929 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100930 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000931 .ias = ias,
932 .oas = oas,
933 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100934 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000935 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100936
Will Deacon518f7132014-11-14 17:17:54 +0000937 smmu_domain->smmu = smmu;
938 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
939 if (!pgtbl_ops) {
940 ret = -ENOMEM;
941 goto out_clear_smmu;
942 }
943
Robin Murphyd5466352016-05-09 17:20:09 +0100944 /* Update the domain's page sizes to reflect the page table format */
945 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +0000946
947 /* Initialise the context bank with our page table cfg */
948 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
949
950 /*
951 * Request context fault interrupt. Do this last to avoid the
952 * handler seeing a half-initialised domain state.
953 */
Will Deacon44680ee2014-06-25 11:29:12 +0100954 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800955 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
956 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200957 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100958 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100959 cfg->irptndx, irq);
960 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100961 }
962
Will Deacon518f7132014-11-14 17:17:54 +0000963 mutex_unlock(&smmu_domain->init_mutex);
964
965 /* Publish page table ops for map/unmap */
966 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100967 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100968
Will Deacon518f7132014-11-14 17:17:54 +0000969out_clear_smmu:
970 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100971out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000972 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100973 return ret;
974}
975
976static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
977{
Joerg Roedel1d672632015-03-26 13:43:10 +0100978 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100979 struct arm_smmu_device *smmu = smmu_domain->smmu;
980 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +0100981 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100982 int irq;
983
Robin Murphy98006992016-04-20 14:53:33 +0100984 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100985 return;
986
Will Deacon518f7132014-11-14 17:17:54 +0000987 /*
988 * Disable the context bank and free the page tables before freeing
989 * it.
990 */
Will Deacon44680ee2014-06-25 11:29:12 +0100991 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100992 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +0100993
Will Deacon44680ee2014-06-25 11:29:12 +0100994 if (cfg->irptndx != INVALID_IRPTNDX) {
995 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800996 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100997 }
998
Markus Elfring44830b02015-11-06 18:32:41 +0100999 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001000 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001001}
1002
Joerg Roedel1d672632015-03-26 13:43:10 +01001003static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001004{
1005 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001006
Robin Murphy9adb9592016-01-26 18:06:36 +00001007 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001008 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001009 /*
1010 * Allocate the domain and initialise some of its data structures.
1011 * We can't really do anything meaningful until we've added a
1012 * master.
1013 */
1014 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1015 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001016 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001017
Robin Murphy9adb9592016-01-26 18:06:36 +00001018 if (type == IOMMU_DOMAIN_DMA &&
1019 iommu_get_dma_cookie(&smmu_domain->domain)) {
1020 kfree(smmu_domain);
1021 return NULL;
1022 }
1023
Will Deacon518f7132014-11-14 17:17:54 +00001024 mutex_init(&smmu_domain->init_mutex);
1025 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001026
1027 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001028}
1029
Joerg Roedel1d672632015-03-26 13:43:10 +01001030static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001031{
Joerg Roedel1d672632015-03-26 13:43:10 +01001032 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001033
1034 /*
1035 * Free the domain resources. We assume that all devices have
1036 * already been detached.
1037 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001038 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001039 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001040 kfree(smmu_domain);
1041}
1042
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001043static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1044{
1045 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001046 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001047
1048 if (smr->valid)
1049 reg |= SMR_VALID;
1050 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1051}
1052
Robin Murphy8e8b2032016-09-12 17:13:50 +01001053static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1054{
1055 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1056 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1057 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1058 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1059
1060 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1061}
1062
1063static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1064{
1065 arm_smmu_write_s2cr(smmu, idx);
1066 if (smmu->smrs)
1067 arm_smmu_write_smr(smmu, idx);
1068}
1069
Robin Murphy588888a2016-09-12 17:13:54 +01001070static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001071{
1072 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001073 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001074
Robin Murphy588888a2016-09-12 17:13:54 +01001075 /* Stream indexing is blissfully easy */
1076 if (!smrs)
1077 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001078
Robin Murphy588888a2016-09-12 17:13:54 +01001079 /* Validating SMRs is... less so */
1080 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1081 if (!smrs[i].valid) {
1082 /*
1083 * Note the first free entry we come across, which
1084 * we'll claim in the end if nothing else matches.
1085 */
1086 if (free_idx < 0)
1087 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001088 continue;
1089 }
Robin Murphy588888a2016-09-12 17:13:54 +01001090 /*
1091 * If the new entry is _entirely_ matched by an existing entry,
1092 * then reuse that, with the guarantee that there also cannot
1093 * be any subsequent conflicting entries. In normal use we'd
1094 * expect simply identical entries for this case, but there's
1095 * no harm in accommodating the generalisation.
1096 */
1097 if ((mask & smrs[i].mask) == mask &&
1098 !((id ^ smrs[i].id) & ~smrs[i].mask))
1099 return i;
1100 /*
1101 * If the new entry has any other overlap with an existing one,
1102 * though, then there always exists at least one stream ID
1103 * which would cause a conflict, and we can't allow that risk.
1104 */
1105 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1106 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001107 }
1108
Robin Murphy588888a2016-09-12 17:13:54 +01001109 return free_idx;
1110}
1111
1112static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1113{
1114 if (--smmu->s2crs[idx].count)
1115 return false;
1116
1117 smmu->s2crs[idx] = s2cr_init_val;
1118 if (smmu->smrs)
1119 smmu->smrs[idx].valid = false;
1120
1121 return true;
1122}
1123
1124static int arm_smmu_master_alloc_smes(struct device *dev)
1125{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001126 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1127 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001128 struct arm_smmu_device *smmu = cfg->smmu;
1129 struct arm_smmu_smr *smrs = smmu->smrs;
1130 struct iommu_group *group;
1131 int i, idx, ret;
1132
1133 mutex_lock(&smmu->stream_map_mutex);
1134 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001135 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001136 if (idx != INVALID_SMENDX) {
1137 ret = -EEXIST;
1138 goto out_err;
1139 }
1140
Robin Murphyadfec2e2016-09-12 17:13:55 +01001141 ret = arm_smmu_find_sme(smmu, fwspec->ids[i], 0);
Robin Murphy588888a2016-09-12 17:13:54 +01001142 if (ret < 0)
1143 goto out_err;
1144
1145 idx = ret;
1146 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphyadfec2e2016-09-12 17:13:55 +01001147 smrs[idx].id = fwspec->ids[i];
Robin Murphy588888a2016-09-12 17:13:54 +01001148 smrs[idx].mask = 0; /* We don't currently share SMRs */
1149 smrs[idx].valid = true;
1150 }
1151 smmu->s2crs[idx].count++;
1152 cfg->smendx[i] = (s16)idx;
1153 }
1154
1155 group = iommu_group_get_for_dev(dev);
1156 if (!group)
1157 group = ERR_PTR(-ENOMEM);
1158 if (IS_ERR(group)) {
1159 ret = PTR_ERR(group);
1160 goto out_err;
1161 }
1162 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001163
Will Deacon45ae7cf2013-06-24 18:31:25 +01001164 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001165 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001166 arm_smmu_write_sme(smmu, idx);
1167 smmu->s2crs[idx].group = group;
1168 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001169
Robin Murphy588888a2016-09-12 17:13:54 +01001170 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001171 return 0;
1172
Robin Murphy588888a2016-09-12 17:13:54 +01001173out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001174 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001175 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001176 cfg->smendx[i] = INVALID_SMENDX;
1177 }
Robin Murphy588888a2016-09-12 17:13:54 +01001178 mutex_unlock(&smmu->stream_map_mutex);
1179 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001180}
1181
Robin Murphyadfec2e2016-09-12 17:13:55 +01001182static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001183{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001184 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1185 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001186 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001187
Robin Murphy588888a2016-09-12 17:13:54 +01001188 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001189 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001190 if (arm_smmu_free_sme(smmu, idx))
1191 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001192 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001193 }
Robin Murphy588888a2016-09-12 17:13:54 +01001194 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001195}
1196
Will Deacon45ae7cf2013-06-24 18:31:25 +01001197static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001198 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001199{
Will Deacon44680ee2014-06-25 11:29:12 +01001200 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001201 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1202 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1203 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy588888a2016-09-12 17:13:54 +01001204 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205
Will Deacon5f634952016-04-20 14:53:32 +01001206 /*
1207 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1208 * for all devices behind the SMMU. Note that we need to take
1209 * care configuring SMRs for devices both a platform_device and
1210 * and a PCI device (i.e. a PCI host controller)
1211 */
1212 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
Robin Murphy8e8b2032016-09-12 17:13:50 +01001213 type = S2CR_TYPE_BYPASS;
Will Deacon5f634952016-04-20 14:53:32 +01001214
Robin Murphyadfec2e2016-09-12 17:13:55 +01001215 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001216 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001217 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001218
Robin Murphy8e8b2032016-09-12 17:13:50 +01001219 s2cr[idx].type = type;
1220 s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
1221 s2cr[idx].cbndx = cbndx;
1222 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001223 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001224 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001225}
1226
Will Deacon45ae7cf2013-06-24 18:31:25 +01001227static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1228{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001229 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001230 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1231 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001232 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001233
Robin Murphyadfec2e2016-09-12 17:13:55 +01001234 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001235 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1236 return -ENXIO;
1237 }
1238
Robin Murphyadfec2e2016-09-12 17:13:55 +01001239 smmu = fwspec_smmu(fwspec);
Will Deacon518f7132014-11-14 17:17:54 +00001240 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001241 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001242 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001243 return ret;
1244
Will Deacon45ae7cf2013-06-24 18:31:25 +01001245 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001246 * Sanity check the domain. We don't support domains across
1247 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001248 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001249 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001250 dev_err(dev,
1251 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001252 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001253 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001254 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001255
1256 /* Looks ok, so add the device to the domain */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001257 return arm_smmu_domain_add_master(smmu_domain, fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001258}
1259
Will Deacon45ae7cf2013-06-24 18:31:25 +01001260static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001261 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001262{
Will Deacon518f7132014-11-14 17:17:54 +00001263 int ret;
1264 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001265 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001266 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267
Will Deacon518f7132014-11-14 17:17:54 +00001268 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001269 return -ENODEV;
1270
Will Deacon518f7132014-11-14 17:17:54 +00001271 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1272 ret = ops->map(ops, iova, paddr, size, prot);
1273 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1274 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001275}
1276
1277static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1278 size_t size)
1279{
Will Deacon518f7132014-11-14 17:17:54 +00001280 size_t ret;
1281 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001282 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001283 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284
Will Deacon518f7132014-11-14 17:17:54 +00001285 if (!ops)
1286 return 0;
1287
1288 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1289 ret = ops->unmap(ops, iova, size);
1290 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1291 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001292}
1293
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001294static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1295 dma_addr_t iova)
1296{
Joerg Roedel1d672632015-03-26 13:43:10 +01001297 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001298 struct arm_smmu_device *smmu = smmu_domain->smmu;
1299 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1300 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1301 struct device *dev = smmu->dev;
1302 void __iomem *cb_base;
1303 u32 tmp;
1304 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001305 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001306
1307 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1308
Robin Murphy661d9622015-05-27 17:09:34 +01001309 /* ATS1 registers can only be written atomically */
1310 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001311 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001312 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1313 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001314 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001315
1316 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1317 !(tmp & ATSR_ACTIVE), 5, 50)) {
1318 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001319 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001320 &iova);
1321 return ops->iova_to_phys(ops, iova);
1322 }
1323
Robin Murphyf9a05f02016-04-13 18:13:01 +01001324 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001325 if (phys & CB_PAR_F) {
1326 dev_err(dev, "translation fault!\n");
1327 dev_err(dev, "PAR = 0x%llx\n", phys);
1328 return 0;
1329 }
1330
1331 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1332}
1333
Will Deacon45ae7cf2013-06-24 18:31:25 +01001334static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001335 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001336{
Will Deacon518f7132014-11-14 17:17:54 +00001337 phys_addr_t ret;
1338 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001339 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001340 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001341
Will Deacon518f7132014-11-14 17:17:54 +00001342 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001343 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001344
Will Deacon518f7132014-11-14 17:17:54 +00001345 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001346 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1347 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001348 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001349 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001350 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001351 }
1352
Will Deacon518f7132014-11-14 17:17:54 +00001353 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001354
Will Deacon518f7132014-11-14 17:17:54 +00001355 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001356}
1357
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001358static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001359{
Will Deacond0948942014-06-24 17:30:10 +01001360 switch (cap) {
1361 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001362 /*
1363 * Return true here as the SMMU can always send out coherent
1364 * requests.
1365 */
1366 return true;
Will Deacond0948942014-06-24 17:30:10 +01001367 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001368 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001369 case IOMMU_CAP_NOEXEC:
1370 return true;
Will Deacond0948942014-06-24 17:30:10 +01001371 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001372 return false;
Will Deacond0948942014-06-24 17:30:10 +01001373 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001374}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001375
Will Deacon03edb222015-01-19 14:27:33 +00001376static int arm_smmu_add_device(struct device *dev)
1377{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001378 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001379 struct arm_smmu_master_cfg *cfg;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001380 struct iommu_fwspec *fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001381 int i, ret;
1382
Robin Murphyadfec2e2016-09-12 17:13:55 +01001383 ret = arm_smmu_register_legacy_master(dev, &smmu);
1384 fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001385 if (ret)
1386 goto out_free;
1387
1388 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001389 for (i = 0; i < fwspec->num_ids; i++) {
1390 u16 sid = fwspec->ids[i];
Robin Murphyf80cd882016-09-14 15:21:39 +01001391
Robin Murphyadfec2e2016-09-12 17:13:55 +01001392 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001393 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1394 sid, cfg->smmu->streamid_mask);
1395 goto out_free;
1396 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001397 }
Will Deacon03edb222015-01-19 14:27:33 +00001398
Robin Murphyadfec2e2016-09-12 17:13:55 +01001399 ret = -ENOMEM;
1400 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1401 GFP_KERNEL);
1402 if (!cfg)
1403 goto out_free;
1404
1405 cfg->smmu = smmu;
1406 fwspec->iommu_priv = cfg;
1407 while (i--)
1408 cfg->smendx[i] = INVALID_SMENDX;
1409
Robin Murphy588888a2016-09-12 17:13:54 +01001410 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001411 if (ret)
1412 goto out_free;
1413
1414 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001415
1416out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001417 if (fwspec)
1418 kfree(fwspec->iommu_priv);
1419 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001420 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001421}
1422
Will Deacon45ae7cf2013-06-24 18:31:25 +01001423static void arm_smmu_remove_device(struct device *dev)
1424{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001425 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001426
Robin Murphyadfec2e2016-09-12 17:13:55 +01001427 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001428 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001429
Robin Murphyadfec2e2016-09-12 17:13:55 +01001430 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001431 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001432 kfree(fwspec->iommu_priv);
1433 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001434}
1435
Joerg Roedelaf659932015-10-21 23:51:41 +02001436static struct iommu_group *arm_smmu_device_group(struct device *dev)
1437{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001438 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1439 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001440 struct iommu_group *group = NULL;
1441 int i, idx;
1442
Robin Murphyadfec2e2016-09-12 17:13:55 +01001443 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001444 if (group && smmu->s2crs[idx].group &&
1445 group != smmu->s2crs[idx].group)
1446 return ERR_PTR(-EINVAL);
1447
1448 group = smmu->s2crs[idx].group;
1449 }
1450
1451 if (group)
1452 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02001453
1454 if (dev_is_pci(dev))
1455 group = pci_device_group(dev);
1456 else
1457 group = generic_device_group(dev);
1458
Joerg Roedelaf659932015-10-21 23:51:41 +02001459 return group;
1460}
1461
Will Deaconc752ce42014-06-25 22:46:31 +01001462static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1463 enum iommu_attr attr, void *data)
1464{
Joerg Roedel1d672632015-03-26 13:43:10 +01001465 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001466
1467 switch (attr) {
1468 case DOMAIN_ATTR_NESTING:
1469 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1470 return 0;
1471 default:
1472 return -ENODEV;
1473 }
1474}
1475
1476static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1477 enum iommu_attr attr, void *data)
1478{
Will Deacon518f7132014-11-14 17:17:54 +00001479 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001480 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001481
Will Deacon518f7132014-11-14 17:17:54 +00001482 mutex_lock(&smmu_domain->init_mutex);
1483
Will Deaconc752ce42014-06-25 22:46:31 +01001484 switch (attr) {
1485 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001486 if (smmu_domain->smmu) {
1487 ret = -EPERM;
1488 goto out_unlock;
1489 }
1490
Will Deaconc752ce42014-06-25 22:46:31 +01001491 if (*(int *)data)
1492 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1493 else
1494 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1495
Will Deacon518f7132014-11-14 17:17:54 +00001496 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001497 default:
Will Deacon518f7132014-11-14 17:17:54 +00001498 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001499 }
Will Deacon518f7132014-11-14 17:17:54 +00001500
1501out_unlock:
1502 mutex_unlock(&smmu_domain->init_mutex);
1503 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001504}
1505
Will Deacon518f7132014-11-14 17:17:54 +00001506static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001507 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001508 .domain_alloc = arm_smmu_domain_alloc,
1509 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001510 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001511 .map = arm_smmu_map,
1512 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001513 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001514 .iova_to_phys = arm_smmu_iova_to_phys,
1515 .add_device = arm_smmu_add_device,
1516 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001517 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001518 .domain_get_attr = arm_smmu_domain_get_attr,
1519 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001520 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001521};
1522
1523static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1524{
1525 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001526 void __iomem *cb_base;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001527 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001528 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001529
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001530 /* clear global FSR */
1531 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1532 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001533
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001534 /*
1535 * Reset stream mapping groups: Initial values mark all SMRn as
1536 * invalid and all S2CRn as bypass unless overridden.
1537 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001538 for (i = 0; i < smmu->num_mapping_groups; ++i)
1539 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001540
Peng Fan3ca37122016-05-03 21:50:30 +08001541 /*
1542 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1543 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1544 * bit is only present in MMU-500r2 onwards.
1545 */
1546 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1547 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1548 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1549 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1550 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1551 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1552 }
1553
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001554 /* Make sure all context banks are disabled and clear CB_FSR */
1555 for (i = 0; i < smmu->num_context_banks; ++i) {
1556 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1557 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1558 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001559 /*
1560 * Disable MMU-500's not-particularly-beneficial next-page
1561 * prefetcher for the sake of errata #841119 and #826419.
1562 */
1563 if (smmu->model == ARM_MMU500) {
1564 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1565 reg &= ~ARM_MMU500_ACTLR_CPRE;
1566 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1567 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001568 }
Will Deacon1463fe42013-07-31 19:21:27 +01001569
Will Deacon45ae7cf2013-06-24 18:31:25 +01001570 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001571 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1572 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1573
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001574 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001575
Will Deacon45ae7cf2013-06-24 18:31:25 +01001576 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001577 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001578
1579 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001580 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001581
Robin Murphy25a1c962016-02-10 14:25:33 +00001582 /* Enable client access, handling unmatched streams as appropriate */
1583 reg &= ~sCR0_CLIENTPD;
1584 if (disable_bypass)
1585 reg |= sCR0_USFCFG;
1586 else
1587 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001588
1589 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001590 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001591
1592 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001593 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001594
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001595 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1596 reg |= sCR0_VMID16EN;
1597
Will Deacon45ae7cf2013-06-24 18:31:25 +01001598 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001599 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001600 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001601}
1602
1603static int arm_smmu_id_size_to_bits(int size)
1604{
1605 switch (size) {
1606 case 0:
1607 return 32;
1608 case 1:
1609 return 36;
1610 case 2:
1611 return 40;
1612 case 3:
1613 return 42;
1614 case 4:
1615 return 44;
1616 case 5:
1617 default:
1618 return 48;
1619 }
1620}
1621
1622static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1623{
1624 unsigned long size;
1625 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1626 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001627 bool cttw_dt, cttw_reg;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001628 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001629
1630 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001631 dev_notice(smmu->dev, "SMMUv%d with:\n",
1632 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001633
1634 /* ID0 */
1635 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001636
1637 /* Restrict available stages based on module parameter */
1638 if (force_stage == 1)
1639 id &= ~(ID0_S2TS | ID0_NTS);
1640 else if (force_stage == 2)
1641 id &= ~(ID0_S1TS | ID0_NTS);
1642
Will Deacon45ae7cf2013-06-24 18:31:25 +01001643 if (id & ID0_S1TS) {
1644 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1645 dev_notice(smmu->dev, "\tstage 1 translation\n");
1646 }
1647
1648 if (id & ID0_S2TS) {
1649 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1650 dev_notice(smmu->dev, "\tstage 2 translation\n");
1651 }
1652
1653 if (id & ID0_NTS) {
1654 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1655 dev_notice(smmu->dev, "\tnested translation\n");
1656 }
1657
1658 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001659 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001660 dev_err(smmu->dev, "\tno translation support!\n");
1661 return -ENODEV;
1662 }
1663
Robin Murphyb7862e32016-04-13 18:13:03 +01001664 if ((id & ID0_S1TS) &&
1665 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001666 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1667 dev_notice(smmu->dev, "\taddress translation ops\n");
1668 }
1669
Robin Murphybae2c2d2015-07-29 19:46:05 +01001670 /*
1671 * In order for DMA API calls to work properly, we must defer to what
1672 * the DT says about coherency, regardless of what the hardware claims.
1673 * Fortunately, this also opens up a workaround for systems where the
1674 * ID register value has ended up configured incorrectly.
1675 */
1676 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1677 cttw_reg = !!(id & ID0_CTTW);
1678 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001679 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001680 if (cttw_dt || cttw_reg)
1681 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1682 cttw_dt ? "" : "non-");
1683 if (cttw_dt != cttw_reg)
1684 dev_notice(smmu->dev,
1685 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001686
Robin Murphy21174242016-09-12 17:13:48 +01001687 /* Max. number of entries we have for stream matching/indexing */
1688 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1689 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001690 if (id & ID0_SMS) {
Robin Murphy21174242016-09-12 17:13:48 +01001691 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001692
1693 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001694 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1695 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001696 dev_err(smmu->dev,
1697 "stream-matching supported, but no SMRs present!\n");
1698 return -ENODEV;
1699 }
1700
Robin Murphy21174242016-09-12 17:13:48 +01001701 /*
1702 * SMR.ID bits may not be preserved if the corresponding MASK
1703 * bits are set, so check each one separately. We can reject
1704 * masters later if they try to claim IDs outside these masks.
1705 */
1706 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001707 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1708 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Robin Murphy21174242016-09-12 17:13:48 +01001709 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001710
Robin Murphy21174242016-09-12 17:13:48 +01001711 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1712 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1713 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1714 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001715
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001716 /* Zero-initialised to mark as invalid */
1717 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1718 GFP_KERNEL);
1719 if (!smmu->smrs)
1720 return -ENOMEM;
1721
Will Deacon45ae7cf2013-06-24 18:31:25 +01001722 dev_notice(smmu->dev,
Robin Murphy21174242016-09-12 17:13:48 +01001723 "\tstream matching with %lu register groups, mask 0x%x",
1724 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001725 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001726 /* s2cr->type == 0 means translation, so initialise explicitly */
1727 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1728 GFP_KERNEL);
1729 if (!smmu->s2crs)
1730 return -ENOMEM;
1731 for (i = 0; i < size; i++)
1732 smmu->s2crs[i] = s2cr_init_val;
1733
Robin Murphy21174242016-09-12 17:13:48 +01001734 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001735 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001736
Robin Murphy7602b872016-04-28 17:12:09 +01001737 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1738 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1739 if (!(id & ID0_PTFS_NO_AARCH32S))
1740 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1741 }
1742
Will Deacon45ae7cf2013-06-24 18:31:25 +01001743 /* ID1 */
1744 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001745 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001746
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001747 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001748 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001749 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001750 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001751 dev_warn(smmu->dev,
1752 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1753 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001754
Will Deacon518f7132014-11-14 17:17:54 +00001755 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001756 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1757 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1758 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1759 return -ENODEV;
1760 }
1761 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1762 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001763 /*
1764 * Cavium CN88xx erratum #27704.
1765 * Ensure ASID and VMID allocation is unique across all SMMUs in
1766 * the system.
1767 */
1768 if (smmu->model == CAVIUM_SMMUV2) {
1769 smmu->cavium_id_base =
1770 atomic_add_return(smmu->num_context_banks,
1771 &cavium_smmu_context_count);
1772 smmu->cavium_id_base -= smmu->num_context_banks;
1773 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001774
1775 /* ID2 */
1776 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1777 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001778 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001779
Will Deacon518f7132014-11-14 17:17:54 +00001780 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001781 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001782 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001783
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001784 if (id & ID2_VMID16)
1785 smmu->features |= ARM_SMMU_FEAT_VMID16;
1786
Robin Murphyf1d84542015-03-04 16:41:05 +00001787 /*
1788 * What the page table walker can address actually depends on which
1789 * descriptor format is in use, but since a) we don't know that yet,
1790 * and b) it can vary per context bank, this will have to do...
1791 */
1792 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1793 dev_warn(smmu->dev,
1794 "failed to set DMA mask for table walker\n");
1795
Robin Murphyb7862e32016-04-13 18:13:03 +01001796 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001797 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001798 if (smmu->version == ARM_SMMU_V1_64K)
1799 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001800 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001801 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001802 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001803 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001804 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001805 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001806 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001807 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001808 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001809 }
1810
Robin Murphy7602b872016-04-28 17:12:09 +01001811 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001812 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001813 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001814 if (smmu->features &
1815 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001816 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001817 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001818 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001819 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001820 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001821
Robin Murphyd5466352016-05-09 17:20:09 +01001822 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1823 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1824 else
1825 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1826 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1827 smmu->pgsize_bitmap);
1828
Will Deacon518f7132014-11-14 17:17:54 +00001829
Will Deacon28d60072014-09-01 16:24:48 +01001830 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1831 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001832 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001833
1834 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1835 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001836 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001837
Will Deacon45ae7cf2013-06-24 18:31:25 +01001838 return 0;
1839}
1840
Robin Murphy67b65a32016-04-13 18:12:57 +01001841struct arm_smmu_match_data {
1842 enum arm_smmu_arch_version version;
1843 enum arm_smmu_implementation model;
1844};
1845
1846#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1847static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1848
1849ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1850ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001851ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001852ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001853ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001854
Joerg Roedel09b52692014-10-02 12:24:45 +02001855static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001856 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1857 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1858 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001859 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001860 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001861 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001862 { },
1863};
1864MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1865
Will Deacon45ae7cf2013-06-24 18:31:25 +01001866static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1867{
Robin Murphy67b65a32016-04-13 18:12:57 +01001868 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001869 struct resource *res;
1870 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001871 struct device *dev = &pdev->dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001872 int num_irqs, i, err;
1873
1874 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1875 if (!smmu) {
1876 dev_err(dev, "failed to allocate arm_smmu_device\n");
1877 return -ENOMEM;
1878 }
1879 smmu->dev = dev;
1880
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001881 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01001882 smmu->version = data->version;
1883 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001884
Will Deacon45ae7cf2013-06-24 18:31:25 +01001885 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001886 smmu->base = devm_ioremap_resource(dev, res);
1887 if (IS_ERR(smmu->base))
1888 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001889 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001890
1891 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1892 &smmu->num_global_irqs)) {
1893 dev_err(dev, "missing #global-interrupts property\n");
1894 return -ENODEV;
1895 }
1896
1897 num_irqs = 0;
1898 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1899 num_irqs++;
1900 if (num_irqs > smmu->num_global_irqs)
1901 smmu->num_context_irqs++;
1902 }
1903
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001904 if (!smmu->num_context_irqs) {
1905 dev_err(dev, "found %d interrupts but expected at least %d\n",
1906 num_irqs, smmu->num_global_irqs + 1);
1907 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001908 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001909
1910 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1911 GFP_KERNEL);
1912 if (!smmu->irqs) {
1913 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1914 return -ENOMEM;
1915 }
1916
1917 for (i = 0; i < num_irqs; ++i) {
1918 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001919
Will Deacon45ae7cf2013-06-24 18:31:25 +01001920 if (irq < 0) {
1921 dev_err(dev, "failed to get irq index %d\n", i);
1922 return -ENODEV;
1923 }
1924 smmu->irqs[i] = irq;
1925 }
1926
Olav Haugan3c8766d2014-08-22 17:12:32 -07001927 err = arm_smmu_device_cfg_probe(smmu);
1928 if (err)
1929 return err;
1930
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001931 parse_driver_options(smmu);
1932
Robin Murphyb7862e32016-04-13 18:13:03 +01001933 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001934 smmu->num_context_banks != smmu->num_context_irqs) {
1935 dev_err(dev,
1936 "found only %d context interrupt(s) but %d required\n",
1937 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01001938 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001939 }
1940
Will Deacon45ae7cf2013-06-24 18:31:25 +01001941 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08001942 err = devm_request_irq(smmu->dev, smmu->irqs[i],
1943 arm_smmu_global_fault,
1944 IRQF_SHARED,
1945 "arm-smmu global fault",
1946 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001947 if (err) {
1948 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1949 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001950 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001951 }
1952 }
1953
Robin Murphyadfec2e2016-09-12 17:13:55 +01001954 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001955 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01001956 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001957 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001958}
1959
1960static int arm_smmu_device_remove(struct platform_device *pdev)
1961{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001962 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963
1964 if (!smmu)
1965 return -ENODEV;
1966
Will Deaconecfadb62013-07-31 19:21:28 +01001967 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001968 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001969
Will Deacon45ae7cf2013-06-24 18:31:25 +01001970 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07001971 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972 return 0;
1973}
1974
Will Deacon45ae7cf2013-06-24 18:31:25 +01001975static struct platform_driver arm_smmu_driver = {
1976 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001977 .name = "arm-smmu",
1978 .of_match_table = of_match_ptr(arm_smmu_of_match),
1979 },
1980 .probe = arm_smmu_device_dt_probe,
1981 .remove = arm_smmu_device_remove,
1982};
1983
1984static int __init arm_smmu_init(void)
1985{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001986 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001987 int ret;
1988
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001989 /*
1990 * Play nice with systems that don't have an ARM SMMU by checking that
1991 * an ARM SMMU exists in the system before proceeding with the driver
1992 * and IOMMU bus operation registration.
1993 */
1994 np = of_find_matching_node(NULL, arm_smmu_of_match);
1995 if (!np)
1996 return 0;
1997
1998 of_node_put(np);
1999
Will Deacon45ae7cf2013-06-24 18:31:25 +01002000 ret = platform_driver_register(&arm_smmu_driver);
2001 if (ret)
2002 return ret;
2003
2004 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002005 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002006 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2007
Will Deacond123cf82014-02-04 22:17:53 +00002008#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002009 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002010 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002011#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002012
Will Deacona9a1b0b2014-05-01 18:05:08 +01002013#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002014 if (!iommu_present(&pci_bus_type)) {
2015 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002016 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002017 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002018#endif
2019
Will Deacon45ae7cf2013-06-24 18:31:25 +01002020 return 0;
2021}
2022
2023static void __exit arm_smmu_exit(void)
2024{
2025 return platform_driver_unregister(&arm_smmu_driver);
2026}
2027
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002028subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002029module_exit(arm_smmu_exit);
2030
2031MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2032MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2033MODULE_LICENSE("GPL v2");