blob: bc89b4d6c043dacee88463ba22edc8883f60385e [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030027 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010028 */
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000032#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010034#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000036#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010041#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000043#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/module.h>
45#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010046#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010047#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010048#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010049#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050#include <linux/platform_device.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53
54#include <linux/amba/bus.h>
55
Will Deacon518f7132014-11-14 17:17:54 +000056#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
Will Deacon45ae7cf2013-06-24 18:31:25 +010058/* Maximum number of context banks per SMMU */
59#define ARM_SMMU_MAX_CBS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030091#define sCR0_EXIDENABLE (1 << 3)
Will Deacon45ae7cf2013-06-24 18:31:25 +010092#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300131#define ID0_EXIDS (1 << 8)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100132#define ID0_NUMSMRG_SHIFT 0
133#define ID0_NUMSMRG_MASK 0xff
134
135#define ID1_PAGESIZE (1 << 31)
136#define ID1_NUMPAGENDXB_SHIFT 28
137#define ID1_NUMPAGENDXB_MASK 7
138#define ID1_NUMS2CB_SHIFT 16
139#define ID1_NUMS2CB_MASK 0xff
140#define ID1_NUMCB_SHIFT 0
141#define ID1_NUMCB_MASK 0xff
142
143#define ID2_OAS_SHIFT 4
144#define ID2_OAS_MASK 0xf
145#define ID2_IAS_SHIFT 0
146#define ID2_IAS_MASK 0xf
147#define ID2_UBS_SHIFT 8
148#define ID2_UBS_MASK 0xf
149#define ID2_PTFS_4K (1 << 12)
150#define ID2_PTFS_16K (1 << 13)
151#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800152#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153
Peng Fan3ca37122016-05-03 21:50:30 +0800154#define ID7_MAJOR_SHIFT 4
155#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158#define ARM_SMMU_GR0_TLBIVMID 0x64
159#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
160#define ARM_SMMU_GR0_TLBIALLH 0x6c
161#define ARM_SMMU_GR0_sTLBGSYNC 0x70
162#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
163#define sTLBGSTATUS_GSACTIVE (1 << 0)
164#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
Robin Murphy8513c892017-03-30 17:56:32 +0100165#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167/* Stream mapping registers */
168#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
169#define SMR_VALID (1 << 31)
170#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100171#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100172
173#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
174#define S2CR_CBNDX_SHIFT 0
175#define S2CR_CBNDX_MASK 0xff
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300176#define S2CR_EXIDVALID (1 << 10)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100177#define S2CR_TYPE_SHIFT 16
178#define S2CR_TYPE_MASK 0x3
Robin Murphy8e8b2032016-09-12 17:13:50 +0100179enum arm_smmu_s2cr_type {
180 S2CR_TYPE_TRANS,
181 S2CR_TYPE_BYPASS,
182 S2CR_TYPE_FAULT,
183};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184
Robin Murphyd3461802016-01-26 18:06:34 +0000185#define S2CR_PRIVCFG_SHIFT 24
Robin Murphy8e8b2032016-09-12 17:13:50 +0100186#define S2CR_PRIVCFG_MASK 0x3
187enum arm_smmu_s2cr_privcfg {
188 S2CR_PRIVCFG_DEFAULT,
189 S2CR_PRIVCFG_DIPAN,
190 S2CR_PRIVCFG_UNPRIV,
191 S2CR_PRIVCFG_PRIV,
192};
Robin Murphyd3461802016-01-26 18:06:34 +0000193
Will Deacon45ae7cf2013-06-24 18:31:25 +0100194/* Context bank attribute registers */
195#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
196#define CBAR_VMID_SHIFT 0
197#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000198#define CBAR_S1_BPSHCFG_SHIFT 8
199#define CBAR_S1_BPSHCFG_MASK 3
200#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100201#define CBAR_S1_MEMATTR_SHIFT 12
202#define CBAR_S1_MEMATTR_MASK 0xf
203#define CBAR_S1_MEMATTR_WB 0xf
204#define CBAR_TYPE_SHIFT 16
205#define CBAR_TYPE_MASK 0x3
206#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
207#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
208#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
209#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
210#define CBAR_IRPTNDX_SHIFT 24
211#define CBAR_IRPTNDX_MASK 0xff
212
213#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
214#define CBA2R_RW64_32BIT (0 << 0)
215#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800216#define CBA2R_VMID_SHIFT 16
217#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218
219/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +0100220#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100221
222#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100223#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_RESUME 0x8
225#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100226#define ARM_SMMU_CB_TTBR0 0x20
227#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100229#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100232#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100234#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100235#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000236#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100237#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000238#define ARM_SMMU_CB_S1_TLBIVAL 0x620
239#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
240#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy11febfc2017-03-30 17:56:31 +0100241#define ARM_SMMU_CB_TLBSYNC 0x7f0
242#define ARM_SMMU_CB_TLBSTATUS 0x7f4
Robin Murphy661d9622015-05-27 17:09:34 +0100243#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000244#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100245
246#define SCTLR_S1_ASIDPNE (1 << 12)
247#define SCTLR_CFCFG (1 << 7)
248#define SCTLR_CFIE (1 << 6)
249#define SCTLR_CFRE (1 << 5)
250#define SCTLR_E (1 << 4)
251#define SCTLR_AFE (1 << 2)
252#define SCTLR_TRE (1 << 1)
253#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100254
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100255#define ARM_MMU500_ACTLR_CPRE (1 << 1)
256
Peng Fan3ca37122016-05-03 21:50:30 +0800257#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
Nipun Gupta6eb18d42016-11-04 15:25:23 +0530258#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
Peng Fan3ca37122016-05-03 21:50:30 +0800259
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000260#define CB_PAR_F (1 << 0)
261
262#define ATSR_ACTIVE (1 << 0)
263
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264#define RESUME_RETRY (0 << 0)
265#define RESUME_TERMINATE (1 << 0)
266
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100268#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Tomasz Nowicki3677a642017-01-16 08:16:07 +0100269#define TTBCR2_AS (1 << 4)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100271#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100272
273#define FSR_MULTI (1 << 31)
274#define FSR_SS (1 << 30)
275#define FSR_UUT (1 << 8)
276#define FSR_ASF (1 << 7)
277#define FSR_TLBLKF (1 << 6)
278#define FSR_TLBMCF (1 << 5)
279#define FSR_EF (1 << 4)
280#define FSR_PF (1 << 3)
281#define FSR_AFF (1 << 2)
282#define FSR_TF (1 << 1)
283
Mitchel Humpherys29073202014-07-08 09:52:18 -0700284#define FSR_IGN (FSR_AFF | FSR_ASF | \
285 FSR_TLBMCF | FSR_TLBLKF)
286#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100287 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100288
289#define FSYNR0_WNR (1 << 4)
290
Eric Augerf3ebee82017-01-19 20:57:55 +0000291#define MSI_IOVA_BASE 0x8000000
292#define MSI_IOVA_LENGTH 0x100000
293
Will Deacon4cf740b2014-07-14 19:47:39 +0100294static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000295module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100296MODULE_PARM_DESC(force_stage,
297 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000298static bool disable_bypass;
299module_param(disable_bypass, bool, S_IRUGO);
300MODULE_PARM_DESC(disable_bypass,
301 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100302
Robin Murphy09360402014-08-28 17:51:59 +0100303enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100304 ARM_SMMU_V1,
305 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100306 ARM_SMMU_V2,
307};
308
Robin Murphy67b65a32016-04-13 18:12:57 +0100309enum arm_smmu_implementation {
310 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100311 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100312 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100313};
314
Robin Murphy84c24372017-06-19 16:41:56 +0100315/* Until ACPICA headers cover IORT rev. C */
316#ifndef ACPI_IORT_SMMU_CORELINK_MMU401
317#define ACPI_IORT_SMMU_CORELINK_MMU401 0x4
318#endif
319#ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX
320#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5
321#endif
322
Robin Murphy8e8b2032016-09-12 17:13:50 +0100323struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100324 struct iommu_group *group;
325 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100326 enum arm_smmu_s2cr_type type;
327 enum arm_smmu_s2cr_privcfg privcfg;
328 u8 cbndx;
329};
330
331#define s2cr_init_val (struct arm_smmu_s2cr){ \
332 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
333}
334
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100336 u16 mask;
337 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100338 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100339};
340
Will Deacona9a1b0b2014-05-01 18:05:08 +0100341struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100342 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100343 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100344};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100345#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100346#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
347#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000348#define fwspec_smendx(fw, i) \
349 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100350#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000351 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100352
353struct arm_smmu_device {
354 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355
356 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100357 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100358 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100359
360#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
361#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
362#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
363#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
364#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000365#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800366#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100367#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
368#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
369#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
370#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
371#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300372#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100373 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000374
375#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
376 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100377 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100378 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100379
380 u32 num_context_banks;
381 u32 num_s2_context_banks;
382 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
383 atomic_t irptndx;
384
385 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100386 u16 streamid_mask;
387 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100388 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100389 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100390 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100391
Will Deacon518f7132014-11-14 17:17:54 +0000392 unsigned long va_size;
393 unsigned long ipa_size;
394 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100395 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100396
397 u32 num_global_irqs;
398 u32 num_context_irqs;
399 unsigned int *irqs;
400
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800401 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100402
403 /* IOMMU core code handle */
404 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100405};
406
Robin Murphy7602b872016-04-28 17:12:09 +0100407enum arm_smmu_context_fmt {
408 ARM_SMMU_CTX_FMT_NONE,
409 ARM_SMMU_CTX_FMT_AARCH64,
410 ARM_SMMU_CTX_FMT_AARCH32_L,
411 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100412};
413
414struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100415 u8 cbndx;
416 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100417 union {
418 u16 asid;
419 u16 vmid;
420 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100421 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100422 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100423};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100424#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100425
Will Deaconc752ce42014-06-25 22:46:31 +0100426enum arm_smmu_domain_stage {
427 ARM_SMMU_DOMAIN_S1 = 0,
428 ARM_SMMU_DOMAIN_S2,
429 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000430 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100431};
432
Will Deacon45ae7cf2013-06-24 18:31:25 +0100433struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100434 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000435 struct io_pgtable_ops *pgtbl_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100436 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100437 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000438 struct mutex init_mutex; /* Protects smmu pointer */
Robin Murphy523d7422017-06-22 16:53:56 +0100439 spinlock_t cb_lock; /* Serialises ATS1* ops */
Joerg Roedel1d672632015-03-26 13:43:10 +0100440 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100441};
442
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000443struct arm_smmu_option_prop {
444 u32 opt;
445 const char *prop;
446};
447
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800448static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
449
Robin Murphy021bb842016-09-14 15:26:46 +0100450static bool using_legacy_binding, using_generic_binding;
451
Mitchel Humpherys29073202014-07-08 09:52:18 -0700452static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000453 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
454 { 0, NULL},
455};
456
Joerg Roedel1d672632015-03-26 13:43:10 +0100457static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
458{
459 return container_of(dom, struct arm_smmu_domain, domain);
460}
461
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000462static void parse_driver_options(struct arm_smmu_device *smmu)
463{
464 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700465
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000466 do {
467 if (of_property_read_bool(smmu->dev->of_node,
468 arm_smmu_options[i].prop)) {
469 smmu->options |= arm_smmu_options[i].opt;
470 dev_notice(smmu->dev, "option %s\n",
471 arm_smmu_options[i].prop);
472 }
473 } while (arm_smmu_options[++i].opt);
474}
475
Will Deacon8f68f8e2014-07-15 11:27:08 +0100476static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100477{
478 if (dev_is_pci(dev)) {
479 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700480
Will Deacona9a1b0b2014-05-01 18:05:08 +0100481 while (!pci_is_root_bus(bus))
482 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100483 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100484 }
485
Robin Murphyf80cd882016-09-14 15:21:39 +0100486 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100487}
488
Robin Murphyf80cd882016-09-14 15:21:39 +0100489static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100490{
Robin Murphyf80cd882016-09-14 15:21:39 +0100491 *((__be32 *)data) = cpu_to_be32(alias);
492 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100493}
494
Robin Murphyf80cd882016-09-14 15:21:39 +0100495static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100496{
Robin Murphyf80cd882016-09-14 15:21:39 +0100497 struct of_phandle_iterator *it = *(void **)data;
498 struct device_node *np = it->node;
499 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100500
Robin Murphyf80cd882016-09-14 15:21:39 +0100501 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
502 "#stream-id-cells", 0)
503 if (it->node == np) {
504 *(void **)data = dev;
505 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700506 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100507 it->node = np;
508 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100509}
510
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100511static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100512static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100513
Robin Murphyadfec2e2016-09-12 17:13:55 +0100514static int arm_smmu_register_legacy_master(struct device *dev,
515 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100516{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100517 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100518 struct device_node *np;
519 struct of_phandle_iterator it;
520 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100521 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100522 __be32 pci_sid;
523 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100524
Robin Murphyf80cd882016-09-14 15:21:39 +0100525 np = dev_get_dev_node(dev);
526 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
527 of_node_put(np);
528 return -ENODEV;
529 }
530
531 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100532 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
533 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100534 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100535 of_node_put(np);
536 if (err == 0)
537 return -ENODEV;
538 if (err < 0)
539 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100540
Robin Murphyf80cd882016-09-14 15:21:39 +0100541 if (dev_is_pci(dev)) {
542 /* "mmu-masters" assumes Stream ID == Requester ID */
543 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
544 &pci_sid);
545 it.cur = &pci_sid;
546 it.cur_count = 1;
547 }
548
Robin Murphyadfec2e2016-09-12 17:13:55 +0100549 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
550 &arm_smmu_ops);
551 if (err)
552 return err;
553
554 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
555 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100556 return -ENOMEM;
557
Robin Murphyadfec2e2016-09-12 17:13:55 +0100558 *smmu = dev_get_drvdata(smmu_dev);
559 of_phandle_iterator_args(&it, sids, it.cur_count);
560 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
561 kfree(sids);
562 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100563}
564
565static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
566{
567 int idx;
568
569 do {
570 idx = find_next_zero_bit(map, end, start);
571 if (idx == end)
572 return -ENOSPC;
573 } while (test_and_set_bit(idx, map));
574
575 return idx;
576}
577
578static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
579{
580 clear_bit(idx, map);
581}
582
583/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100584static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
585 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100586{
Robin Murphy8513c892017-03-30 17:56:32 +0100587 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100588
Robin Murphy11febfc2017-03-30 17:56:31 +0100589 writel_relaxed(0, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100590 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
591 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
592 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
593 return;
594 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100595 }
Robin Murphy8513c892017-03-30 17:56:32 +0100596 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100597 }
Robin Murphy8513c892017-03-30 17:56:32 +0100598 dev_err_ratelimited(smmu->dev,
599 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100600}
601
Robin Murphy11febfc2017-03-30 17:56:31 +0100602static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100603{
Robin Murphy11febfc2017-03-30 17:56:31 +0100604 void __iomem *base = ARM_SMMU_GR0(smmu);
605
606 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
607 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon518f7132014-11-14 17:17:54 +0000608}
609
Robin Murphy11febfc2017-03-30 17:56:31 +0100610static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100611{
Will Deacon518f7132014-11-14 17:17:54 +0000612 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100613 struct arm_smmu_device *smmu = smmu_domain->smmu;
614 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
615
616 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
617 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon518f7132014-11-14 17:17:54 +0000618}
619
Robin Murphy11febfc2017-03-30 17:56:31 +0100620static void arm_smmu_tlb_sync_vmid(void *cookie)
621{
622 struct arm_smmu_domain *smmu_domain = cookie;
623
624 arm_smmu_tlb_sync_global(smmu_domain->smmu);
625}
626
627static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000628{
629 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100630 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100631 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
632
633 writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
634 arm_smmu_tlb_sync_context(cookie);
635}
636
637static void arm_smmu_tlb_inv_context_s2(void *cookie)
638{
639 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100640 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100641 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100642
Robin Murphy11febfc2017-03-30 17:56:31 +0100643 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
644 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100645}
646
Will Deacon518f7132014-11-14 17:17:54 +0000647static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000648 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000649{
650 struct arm_smmu_domain *smmu_domain = cookie;
651 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000652 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100653 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000654
655 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000656 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
657
Robin Murphy7602b872016-04-28 17:12:09 +0100658 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000659 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100660 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000661 do {
662 writel_relaxed(iova, reg);
663 iova += granule;
664 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000665 } else {
666 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100667 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000668 do {
669 writeq_relaxed(iova, reg);
670 iova += granule >> 12;
671 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000672 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100673 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000674 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
675 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000676 iova >>= 12;
677 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100678 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000679 iova += granule >> 12;
680 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000681 }
682}
683
Robin Murphy11febfc2017-03-30 17:56:31 +0100684/*
685 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
686 * almost negligible, but the benefit of getting the first one in as far ahead
687 * of the sync as possible is significant, hence we don't just make this a
688 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
689 */
690static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
691 size_t granule, bool leaf, void *cookie)
692{
693 struct arm_smmu_domain *smmu_domain = cookie;
694 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
695
696 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
697}
698
699static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
700 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000701 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100702 .tlb_sync = arm_smmu_tlb_sync_context,
703};
704
705static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
706 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
707 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
708 .tlb_sync = arm_smmu_tlb_sync_context,
709};
710
711static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
712 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
713 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
714 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000715};
716
Will Deacon45ae7cf2013-06-24 18:31:25 +0100717static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
718{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100719 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100720 unsigned long iova;
721 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100722 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100723 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
724 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100725 void __iomem *cb_base;
726
Robin Murphy452107c2017-03-30 17:56:30 +0100727 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100728 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
729
730 if (!(fsr & FSR_FAULT))
731 return IRQ_NONE;
732
Will Deacon45ae7cf2013-06-24 18:31:25 +0100733 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100734 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100735
Will Deacon3714ce1d2016-08-05 19:49:45 +0100736 dev_err_ratelimited(smmu->dev,
737 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
738 fsr, iova, fsynr, cfg->cbndx);
739
Will Deacon45ae7cf2013-06-24 18:31:25 +0100740 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100741 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100742}
743
744static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
745{
746 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
747 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000748 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100749
750 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
751 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
752 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
753 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
754
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000755 if (!gfsr)
756 return IRQ_NONE;
757
Will Deacon45ae7cf2013-06-24 18:31:25 +0100758 dev_err_ratelimited(smmu->dev,
759 "Unexpected global fault, this could be serious\n");
760 dev_err_ratelimited(smmu->dev,
761 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
762 gfsr, gfsynr0, gfsynr1, gfsynr2);
763
764 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100765 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100766}
767
Will Deacon518f7132014-11-14 17:17:54 +0000768static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
769 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100770{
Robin Murphy60705292016-08-11 17:44:06 +0100771 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100772 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100773 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100774 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
775 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100776 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100777
Will Deacon45ae7cf2013-06-24 18:31:25 +0100778 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100779 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy452107c2017-03-30 17:56:30 +0100780 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100781
Will Deacon4a1c93c2015-03-04 12:21:03 +0000782 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100783 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
784 reg = CBA2R_RW64_64BIT;
785 else
786 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800787 /* 16-bit VMIDs live in CBA2R */
788 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100789 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800790
Will Deacon4a1c93c2015-03-04 12:21:03 +0000791 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
792 }
793
Will Deacon45ae7cf2013-06-24 18:31:25 +0100794 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100795 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100796 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700797 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100798
Will Deacon57ca90f2014-02-06 14:59:05 +0000799 /*
800 * Use the weakest shareability/memory types, so they are
801 * overridden by the ttbcr/pte.
802 */
803 if (stage1) {
804 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
805 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800806 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
807 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100808 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000809 }
Will Deacon44680ee2014-06-25 11:29:12 +0100810 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100811
Sunil Goutham125458a2017-03-28 16:11:12 +0530812 /*
813 * TTBCR
814 * We must write this before the TTBRs, since it determines the
815 * access behaviour of some fields (in particular, ASID[15:8]).
816 */
Will Deacon518f7132014-11-14 17:17:54 +0000817 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100818 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
819 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
820 reg2 = 0;
821 } else {
822 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
823 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
824 reg2 |= TTBCR2_SEP_UPSTREAM;
Tomasz Nowicki3677a642017-01-16 08:16:07 +0100825 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
826 reg2 |= TTBCR2_AS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100827 }
Robin Murphy60705292016-08-11 17:44:06 +0100828 if (smmu->version > ARM_SMMU_V1)
829 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100830 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000831 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100832 }
Robin Murphy60705292016-08-11 17:44:06 +0100833 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100834
Will Deacon45ae7cf2013-06-24 18:31:25 +0100835 /* TTBRs */
836 if (stage1) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100837 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
838 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
839 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
840 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
841 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
Robin Murphy280b6832017-03-30 17:56:29 +0100842 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100843 } else {
844 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Robin Murphy280b6832017-03-30 17:56:29 +0100845 reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100846 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
847 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Robin Murphy280b6832017-03-30 17:56:29 +0100848 reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100849 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
850 }
851 } else {
852 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
853 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
854 }
855
Will Deacon518f7132014-11-14 17:17:54 +0000856 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100857 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100858 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
859 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
860 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
861 } else {
862 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
863 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
864 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100865 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphy60705292016-08-11 17:44:06 +0100866 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100867 }
868
Will Deacon45ae7cf2013-06-24 18:31:25 +0100869 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100870 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100871 if (stage1)
872 reg |= SCTLR_S1_ASIDPNE;
873#ifdef __BIG_ENDIAN
874 reg |= SCTLR_E;
875#endif
Will Deacon25724842013-08-21 13:49:53 +0100876 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100877}
878
879static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100880 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100881{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100882 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000883 unsigned long ias, oas;
884 struct io_pgtable_ops *pgtbl_ops;
885 struct io_pgtable_cfg pgtbl_cfg;
886 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100887 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100888 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100889 const struct iommu_gather_ops *tlb_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100890
Will Deacon518f7132014-11-14 17:17:54 +0000891 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100892 if (smmu_domain->smmu)
893 goto out_unlock;
894
Will Deacon61bc6712017-01-06 16:56:03 +0000895 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
896 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
897 smmu_domain->smmu = smmu;
898 goto out_unlock;
899 }
900
Will Deaconc752ce42014-06-25 22:46:31 +0100901 /*
902 * Mapping the requested stage onto what we support is surprisingly
903 * complicated, mainly because the spec allows S1+S2 SMMUs without
904 * support for nested translation. That means we end up with the
905 * following table:
906 *
907 * Requested Supported Actual
908 * S1 N S1
909 * S1 S1+S2 S1
910 * S1 S2 S2
911 * S1 S1 S1
912 * N N N
913 * N S1+S2 S2
914 * N S2 S2
915 * N S1 S1
916 *
917 * Note that you can't actually request stage-2 mappings.
918 */
919 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
920 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
921 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
922 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
923
Robin Murphy7602b872016-04-28 17:12:09 +0100924 /*
925 * Choosing a suitable context format is even more fiddly. Until we
926 * grow some way for the caller to express a preference, and/or move
927 * the decision into the io-pgtable code where it arguably belongs,
928 * just aim for the closest thing to the rest of the system, and hope
929 * that the hardware isn't esoteric enough that we can't assume AArch64
930 * support to be a superset of AArch32 support...
931 */
932 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
933 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100934 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
935 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
936 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
937 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
938 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100939 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
940 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
941 ARM_SMMU_FEAT_FMT_AARCH64_16K |
942 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
943 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
944
945 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
946 ret = -EINVAL;
947 goto out_unlock;
948 }
949
Will Deaconc752ce42014-06-25 22:46:31 +0100950 switch (smmu_domain->stage) {
951 case ARM_SMMU_DOMAIN_S1:
952 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
953 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000954 ias = smmu->va_size;
955 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100956 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000957 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100958 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000959 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100960 ias = min(ias, 32UL);
961 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100962 } else {
963 fmt = ARM_V7S;
964 ias = min(ias, 32UL);
965 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100966 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100967 tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100968 break;
969 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100970 /*
971 * We will likely want to change this if/when KVM gets
972 * involved.
973 */
Will Deaconc752ce42014-06-25 22:46:31 +0100974 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100975 cfg->cbar = CBAR_TYPE_S2_TRANS;
976 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000977 ias = smmu->ipa_size;
978 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100979 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000980 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100981 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000982 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100983 ias = min(ias, 40UL);
984 oas = min(oas, 40UL);
985 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100986 if (smmu->version == ARM_SMMU_V2)
987 tlb_ops = &arm_smmu_s2_tlb_ops_v2;
988 else
989 tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100990 break;
991 default:
992 ret = -EINVAL;
993 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100994 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100995 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
996 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200997 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100998 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100999
Will Deacon44680ee2014-06-25 11:29:12 +01001000 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +01001001 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001002 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1003 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001004 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001005 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001006 }
1007
Robin Murphy280b6832017-03-30 17:56:29 +01001008 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
1009 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
1010 else
1011 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
1012
Will Deacon518f7132014-11-14 17:17:54 +00001013 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001014 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001015 .ias = ias,
1016 .oas = oas,
Robin Murphy11febfc2017-03-30 17:56:31 +01001017 .tlb = tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001018 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001019 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001020
Robin Murphy81b3c252017-06-22 16:53:53 +01001021 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1022 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
1023
Will Deacon518f7132014-11-14 17:17:54 +00001024 smmu_domain->smmu = smmu;
1025 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1026 if (!pgtbl_ops) {
1027 ret = -ENOMEM;
1028 goto out_clear_smmu;
1029 }
1030
Robin Murphyd5466352016-05-09 17:20:09 +01001031 /* Update the domain's page sizes to reflect the page table format */
1032 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +01001033 domain->geometry.aperture_end = (1UL << ias) - 1;
1034 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001035
1036 /* Initialise the context bank with our page table cfg */
1037 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1038
1039 /*
1040 * Request context fault interrupt. Do this last to avoid the
1041 * handler seeing a half-initialised domain state.
1042 */
Will Deacon44680ee2014-06-25 11:29:12 +01001043 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001044 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
1045 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001046 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001047 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001048 cfg->irptndx, irq);
1049 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001050 }
1051
Will Deacon518f7132014-11-14 17:17:54 +00001052 mutex_unlock(&smmu_domain->init_mutex);
1053
1054 /* Publish page table ops for map/unmap */
1055 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001056 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001057
Will Deacon518f7132014-11-14 17:17:54 +00001058out_clear_smmu:
1059 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001060out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001061 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001062 return ret;
1063}
1064
1065static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1066{
Joerg Roedel1d672632015-03-26 13:43:10 +01001067 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001068 struct arm_smmu_device *smmu = smmu_domain->smmu;
1069 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001070 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001071 int irq;
1072
Will Deacon61bc6712017-01-06 16:56:03 +00001073 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001074 return;
1075
Will Deacon518f7132014-11-14 17:17:54 +00001076 /*
1077 * Disable the context bank and free the page tables before freeing
1078 * it.
1079 */
Robin Murphy452107c2017-03-30 17:56:30 +01001080 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001081 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001082
Will Deacon44680ee2014-06-25 11:29:12 +01001083 if (cfg->irptndx != INVALID_IRPTNDX) {
1084 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001085 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001086 }
1087
Markus Elfring44830b02015-11-06 18:32:41 +01001088 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001089 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001090}
1091
Joerg Roedel1d672632015-03-26 13:43:10 +01001092static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001093{
1094 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001095
Will Deacon61bc6712017-01-06 16:56:03 +00001096 if (type != IOMMU_DOMAIN_UNMANAGED &&
1097 type != IOMMU_DOMAIN_DMA &&
1098 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +01001099 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001100 /*
1101 * Allocate the domain and initialise some of its data structures.
1102 * We can't really do anything meaningful until we've added a
1103 * master.
1104 */
1105 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1106 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001107 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001108
Robin Murphy021bb842016-09-14 15:26:46 +01001109 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1110 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001111 kfree(smmu_domain);
1112 return NULL;
1113 }
1114
Will Deacon518f7132014-11-14 17:17:54 +00001115 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +01001116 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001117
1118 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001119}
1120
Joerg Roedel1d672632015-03-26 13:43:10 +01001121static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001122{
Joerg Roedel1d672632015-03-26 13:43:10 +01001123 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001124
1125 /*
1126 * Free the domain resources. We assume that all devices have
1127 * already been detached.
1128 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001129 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001130 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001131 kfree(smmu_domain);
1132}
1133
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001134static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1135{
1136 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001137 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001138
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001139 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001140 reg |= SMR_VALID;
1141 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1142}
1143
Robin Murphy8e8b2032016-09-12 17:13:50 +01001144static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1145{
1146 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1147 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1148 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1149 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1150
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001151 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1152 smmu->smrs[idx].valid)
1153 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001154 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1155}
1156
1157static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1158{
1159 arm_smmu_write_s2cr(smmu, idx);
1160 if (smmu->smrs)
1161 arm_smmu_write_smr(smmu, idx);
1162}
1163
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001164/*
1165 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1166 * should be called after sCR0 is written.
1167 */
1168static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1169{
1170 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1171 u32 smr;
1172
1173 if (!smmu->smrs)
1174 return;
1175
1176 /*
1177 * SMR.ID bits may not be preserved if the corresponding MASK
1178 * bits are set, so check each one separately. We can reject
1179 * masters later if they try to claim IDs outside these masks.
1180 */
1181 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1182 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1183 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1184 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1185
1186 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1187 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1188 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1189 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1190}
1191
Robin Murphy588888a2016-09-12 17:13:54 +01001192static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001193{
1194 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001195 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001196
Robin Murphy588888a2016-09-12 17:13:54 +01001197 /* Stream indexing is blissfully easy */
1198 if (!smrs)
1199 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001200
Robin Murphy588888a2016-09-12 17:13:54 +01001201 /* Validating SMRs is... less so */
1202 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1203 if (!smrs[i].valid) {
1204 /*
1205 * Note the first free entry we come across, which
1206 * we'll claim in the end if nothing else matches.
1207 */
1208 if (free_idx < 0)
1209 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001210 continue;
1211 }
Robin Murphy588888a2016-09-12 17:13:54 +01001212 /*
1213 * If the new entry is _entirely_ matched by an existing entry,
1214 * then reuse that, with the guarantee that there also cannot
1215 * be any subsequent conflicting entries. In normal use we'd
1216 * expect simply identical entries for this case, but there's
1217 * no harm in accommodating the generalisation.
1218 */
1219 if ((mask & smrs[i].mask) == mask &&
1220 !((id ^ smrs[i].id) & ~smrs[i].mask))
1221 return i;
1222 /*
1223 * If the new entry has any other overlap with an existing one,
1224 * though, then there always exists at least one stream ID
1225 * which would cause a conflict, and we can't allow that risk.
1226 */
1227 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1228 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001229 }
1230
Robin Murphy588888a2016-09-12 17:13:54 +01001231 return free_idx;
1232}
1233
1234static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1235{
1236 if (--smmu->s2crs[idx].count)
1237 return false;
1238
1239 smmu->s2crs[idx] = s2cr_init_val;
1240 if (smmu->smrs)
1241 smmu->smrs[idx].valid = false;
1242
1243 return true;
1244}
1245
1246static int arm_smmu_master_alloc_smes(struct device *dev)
1247{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001248 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1249 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001250 struct arm_smmu_device *smmu = cfg->smmu;
1251 struct arm_smmu_smr *smrs = smmu->smrs;
1252 struct iommu_group *group;
1253 int i, idx, ret;
1254
1255 mutex_lock(&smmu->stream_map_mutex);
1256 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001257 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001258 u16 sid = fwspec->ids[i];
1259 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1260
Robin Murphy588888a2016-09-12 17:13:54 +01001261 if (idx != INVALID_SMENDX) {
1262 ret = -EEXIST;
1263 goto out_err;
1264 }
1265
Robin Murphy021bb842016-09-14 15:26:46 +01001266 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001267 if (ret < 0)
1268 goto out_err;
1269
1270 idx = ret;
1271 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001272 smrs[idx].id = sid;
1273 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001274 smrs[idx].valid = true;
1275 }
1276 smmu->s2crs[idx].count++;
1277 cfg->smendx[i] = (s16)idx;
1278 }
1279
1280 group = iommu_group_get_for_dev(dev);
1281 if (!group)
1282 group = ERR_PTR(-ENOMEM);
1283 if (IS_ERR(group)) {
1284 ret = PTR_ERR(group);
1285 goto out_err;
1286 }
1287 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001288
Will Deacon45ae7cf2013-06-24 18:31:25 +01001289 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001290 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001291 arm_smmu_write_sme(smmu, idx);
1292 smmu->s2crs[idx].group = group;
1293 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001294
Robin Murphy588888a2016-09-12 17:13:54 +01001295 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001296 return 0;
1297
Robin Murphy588888a2016-09-12 17:13:54 +01001298out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001299 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001300 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001301 cfg->smendx[i] = INVALID_SMENDX;
1302 }
Robin Murphy588888a2016-09-12 17:13:54 +01001303 mutex_unlock(&smmu->stream_map_mutex);
1304 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001305}
1306
Robin Murphyadfec2e2016-09-12 17:13:55 +01001307static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001308{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001309 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1310 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001311 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001312
Robin Murphy588888a2016-09-12 17:13:54 +01001313 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001314 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001315 if (arm_smmu_free_sme(smmu, idx))
1316 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001317 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001318 }
Robin Murphy588888a2016-09-12 17:13:54 +01001319 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001320}
1321
Will Deacon45ae7cf2013-06-24 18:31:25 +01001322static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001323 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001324{
Will Deacon44680ee2014-06-25 11:29:12 +01001325 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001326 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001327 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001328 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001329 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001330
Will Deacon61bc6712017-01-06 16:56:03 +00001331 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1332 type = S2CR_TYPE_BYPASS;
1333 else
1334 type = S2CR_TYPE_TRANS;
1335
Robin Murphyadfec2e2016-09-12 17:13:55 +01001336 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001337 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001338 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001339
Robin Murphy8e8b2032016-09-12 17:13:50 +01001340 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301341 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001342 s2cr[idx].cbndx = cbndx;
1343 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001344 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001345 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001346}
1347
Will Deacon45ae7cf2013-06-24 18:31:25 +01001348static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1349{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001350 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001351 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1352 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001353 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001354
Robin Murphyadfec2e2016-09-12 17:13:55 +01001355 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001356 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1357 return -ENXIO;
1358 }
1359
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001360 /*
1361 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1362 * domains between of_xlate() and add_device() - we have no way to cope
1363 * with that, so until ARM gets converted to rely on groups and default
1364 * domains, just say no (but more politely than by dereferencing NULL).
1365 * This should be at least a WARN_ON once that's sorted.
1366 */
1367 if (!fwspec->iommu_priv)
1368 return -ENODEV;
1369
Robin Murphyadfec2e2016-09-12 17:13:55 +01001370 smmu = fwspec_smmu(fwspec);
Will Deacon518f7132014-11-14 17:17:54 +00001371 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001372 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001373 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001374 return ret;
1375
Will Deacon45ae7cf2013-06-24 18:31:25 +01001376 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001377 * Sanity check the domain. We don't support domains across
1378 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001379 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001380 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001381 dev_err(dev,
1382 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001383 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001384 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001385 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001386
1387 /* Looks ok, so add the device to the domain */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001388 return arm_smmu_domain_add_master(smmu_domain, fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001389}
1390
Will Deacon45ae7cf2013-06-24 18:31:25 +01001391static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001392 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001393{
Robin Murphy523d7422017-06-22 16:53:56 +01001394 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001395
Will Deacon518f7132014-11-14 17:17:54 +00001396 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001397 return -ENODEV;
1398
Robin Murphy523d7422017-06-22 16:53:56 +01001399 return ops->map(ops, iova, paddr, size, prot);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001400}
1401
1402static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1403 size_t size)
1404{
Robin Murphy523d7422017-06-22 16:53:56 +01001405 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001406
Will Deacon518f7132014-11-14 17:17:54 +00001407 if (!ops)
1408 return 0;
1409
Robin Murphy523d7422017-06-22 16:53:56 +01001410 return ops->unmap(ops, iova, size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001411}
1412
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001413static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1414 dma_addr_t iova)
1415{
Joerg Roedel1d672632015-03-26 13:43:10 +01001416 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001417 struct arm_smmu_device *smmu = smmu_domain->smmu;
1418 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1419 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1420 struct device *dev = smmu->dev;
1421 void __iomem *cb_base;
1422 u32 tmp;
1423 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001424 unsigned long va, flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001425
Robin Murphy452107c2017-03-30 17:56:30 +01001426 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001427
Robin Murphy523d7422017-06-22 16:53:56 +01001428 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001429 /* ATS1 registers can only be written atomically */
1430 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001431 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001432 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1433 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001434 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001435
1436 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1437 !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001438 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001439 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001440 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001441 &iova);
1442 return ops->iova_to_phys(ops, iova);
1443 }
1444
Robin Murphyf9a05f02016-04-13 18:13:01 +01001445 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001446 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001447 if (phys & CB_PAR_F) {
1448 dev_err(dev, "translation fault!\n");
1449 dev_err(dev, "PAR = 0x%llx\n", phys);
1450 return 0;
1451 }
1452
1453 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1454}
1455
Will Deacon45ae7cf2013-06-24 18:31:25 +01001456static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001457 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001458{
Joerg Roedel1d672632015-03-26 13:43:10 +01001459 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001460 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001461
Sunil Gouthambdf95922017-04-25 15:27:52 +05301462 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1463 return iova;
1464
Will Deacon518f7132014-11-14 17:17:54 +00001465 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001466 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001467
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001468 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001469 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1470 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001471
Robin Murphy523d7422017-06-22 16:53:56 +01001472 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001473}
1474
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001475static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001476{
Will Deacond0948942014-06-24 17:30:10 +01001477 switch (cap) {
1478 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001479 /*
1480 * Return true here as the SMMU can always send out coherent
1481 * requests.
1482 */
1483 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001484 case IOMMU_CAP_NOEXEC:
1485 return true;
Will Deacond0948942014-06-24 17:30:10 +01001486 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001487 return false;
Will Deacond0948942014-06-24 17:30:10 +01001488 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001489}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001490
Robin Murphy021bb842016-09-14 15:26:46 +01001491static int arm_smmu_match_node(struct device *dev, void *data)
1492{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001493 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001494}
1495
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001496static
1497struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001498{
1499 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001500 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001501 put_device(dev);
1502 return dev ? dev_get_drvdata(dev) : NULL;
1503}
1504
Will Deacon03edb222015-01-19 14:27:33 +00001505static int arm_smmu_add_device(struct device *dev)
1506{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001507 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001508 struct arm_smmu_master_cfg *cfg;
Robin Murphy021bb842016-09-14 15:26:46 +01001509 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001510 int i, ret;
1511
Robin Murphy021bb842016-09-14 15:26:46 +01001512 if (using_legacy_binding) {
1513 ret = arm_smmu_register_legacy_master(dev, &smmu);
1514 fwspec = dev->iommu_fwspec;
1515 if (ret)
1516 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001517 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001518 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001519 } else {
1520 return -ENODEV;
1521 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001522
1523 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001524 for (i = 0; i < fwspec->num_ids; i++) {
1525 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001526 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001527
Robin Murphyadfec2e2016-09-12 17:13:55 +01001528 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001529 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001530 sid, smmu->streamid_mask);
1531 goto out_free;
1532 }
1533 if (mask & ~smmu->smr_mask_mask) {
1534 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001535 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001536 goto out_free;
1537 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001538 }
Will Deacon03edb222015-01-19 14:27:33 +00001539
Robin Murphyadfec2e2016-09-12 17:13:55 +01001540 ret = -ENOMEM;
1541 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1542 GFP_KERNEL);
1543 if (!cfg)
1544 goto out_free;
1545
1546 cfg->smmu = smmu;
1547 fwspec->iommu_priv = cfg;
1548 while (i--)
1549 cfg->smendx[i] = INVALID_SMENDX;
1550
Robin Murphy588888a2016-09-12 17:13:54 +01001551 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001552 if (ret)
1553 goto out_free;
1554
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001555 iommu_device_link(&smmu->iommu, dev);
1556
Robin Murphyadfec2e2016-09-12 17:13:55 +01001557 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001558
1559out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001560 if (fwspec)
1561 kfree(fwspec->iommu_priv);
1562 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001563 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001564}
1565
Will Deacon45ae7cf2013-06-24 18:31:25 +01001566static void arm_smmu_remove_device(struct device *dev)
1567{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001568 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001569 struct arm_smmu_master_cfg *cfg;
1570 struct arm_smmu_device *smmu;
1571
Robin Murphy8e8b2032016-09-12 17:13:50 +01001572
Robin Murphyadfec2e2016-09-12 17:13:55 +01001573 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001574 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001575
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001576 cfg = fwspec->iommu_priv;
1577 smmu = cfg->smmu;
1578
1579 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001580 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001581 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001582 kfree(fwspec->iommu_priv);
1583 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001584}
1585
Joerg Roedelaf659932015-10-21 23:51:41 +02001586static struct iommu_group *arm_smmu_device_group(struct device *dev)
1587{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001588 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1589 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001590 struct iommu_group *group = NULL;
1591 int i, idx;
1592
Robin Murphyadfec2e2016-09-12 17:13:55 +01001593 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001594 if (group && smmu->s2crs[idx].group &&
1595 group != smmu->s2crs[idx].group)
1596 return ERR_PTR(-EINVAL);
1597
1598 group = smmu->s2crs[idx].group;
1599 }
1600
1601 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001602 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001603
1604 if (dev_is_pci(dev))
1605 group = pci_device_group(dev);
1606 else
1607 group = generic_device_group(dev);
1608
Joerg Roedelaf659932015-10-21 23:51:41 +02001609 return group;
1610}
1611
Will Deaconc752ce42014-06-25 22:46:31 +01001612static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1613 enum iommu_attr attr, void *data)
1614{
Joerg Roedel1d672632015-03-26 13:43:10 +01001615 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001616
Will Deacon0834cc22017-01-06 16:28:17 +00001617 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1618 return -EINVAL;
1619
Will Deaconc752ce42014-06-25 22:46:31 +01001620 switch (attr) {
1621 case DOMAIN_ATTR_NESTING:
1622 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1623 return 0;
1624 default:
1625 return -ENODEV;
1626 }
1627}
1628
1629static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1630 enum iommu_attr attr, void *data)
1631{
Will Deacon518f7132014-11-14 17:17:54 +00001632 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001633 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001634
Will Deacon0834cc22017-01-06 16:28:17 +00001635 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1636 return -EINVAL;
1637
Will Deacon518f7132014-11-14 17:17:54 +00001638 mutex_lock(&smmu_domain->init_mutex);
1639
Will Deaconc752ce42014-06-25 22:46:31 +01001640 switch (attr) {
1641 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001642 if (smmu_domain->smmu) {
1643 ret = -EPERM;
1644 goto out_unlock;
1645 }
1646
Will Deaconc752ce42014-06-25 22:46:31 +01001647 if (*(int *)data)
1648 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1649 else
1650 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1651
Will Deacon518f7132014-11-14 17:17:54 +00001652 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001653 default:
Will Deacon518f7132014-11-14 17:17:54 +00001654 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001655 }
Will Deacon518f7132014-11-14 17:17:54 +00001656
1657out_unlock:
1658 mutex_unlock(&smmu_domain->init_mutex);
1659 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001660}
1661
Robin Murphy021bb842016-09-14 15:26:46 +01001662static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1663{
Robin Murphy56fbf602017-03-31 12:03:33 +01001664 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001665
1666 if (args->args_count > 0)
1667 fwid |= (u16)args->args[0];
1668
1669 if (args->args_count > 1)
1670 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001671 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1672 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001673
1674 return iommu_fwspec_add_ids(dev, &fwid, 1);
1675}
1676
Eric Augerf3ebee82017-01-19 20:57:55 +00001677static void arm_smmu_get_resv_regions(struct device *dev,
1678 struct list_head *head)
1679{
1680 struct iommu_resv_region *region;
1681 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1682
1683 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001684 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001685 if (!region)
1686 return;
1687
1688 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001689
1690 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001691}
1692
1693static void arm_smmu_put_resv_regions(struct device *dev,
1694 struct list_head *head)
1695{
1696 struct iommu_resv_region *entry, *next;
1697
1698 list_for_each_entry_safe(entry, next, head, list)
1699 kfree(entry);
1700}
1701
Will Deacon518f7132014-11-14 17:17:54 +00001702static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001703 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001704 .domain_alloc = arm_smmu_domain_alloc,
1705 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001706 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001707 .map = arm_smmu_map,
1708 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001709 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001710 .iova_to_phys = arm_smmu_iova_to_phys,
1711 .add_device = arm_smmu_add_device,
1712 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001713 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001714 .domain_get_attr = arm_smmu_domain_get_attr,
1715 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001716 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001717 .get_resv_regions = arm_smmu_get_resv_regions,
1718 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001719 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001720};
1721
1722static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1723{
1724 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001725 void __iomem *cb_base;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001726 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001727 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001728
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001729 /* clear global FSR */
1730 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1731 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001732
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001733 /*
1734 * Reset stream mapping groups: Initial values mark all SMRn as
1735 * invalid and all S2CRn as bypass unless overridden.
1736 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001737 for (i = 0; i < smmu->num_mapping_groups; ++i)
1738 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001739
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301740 if (smmu->model == ARM_MMU500) {
1741 /*
1742 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1743 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1744 * bit is only present in MMU-500r2 onwards.
1745 */
1746 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1747 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001748 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301749 if (major >= 2)
1750 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1751 /*
1752 * Allow unmatched Stream IDs to allocate bypass
1753 * TLB entries for reduced latency.
1754 */
1755 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001756 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1757 }
1758
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001759 /* Make sure all context banks are disabled and clear CB_FSR */
1760 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy452107c2017-03-30 17:56:30 +01001761 cb_base = ARM_SMMU_CB(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001762 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1763 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001764 /*
1765 * Disable MMU-500's not-particularly-beneficial next-page
1766 * prefetcher for the sake of errata #841119 and #826419.
1767 */
1768 if (smmu->model == ARM_MMU500) {
1769 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1770 reg &= ~ARM_MMU500_ACTLR_CPRE;
1771 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1772 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001773 }
Will Deacon1463fe42013-07-31 19:21:27 +01001774
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001776 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1777 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1778
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001779 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001780
Will Deacon45ae7cf2013-06-24 18:31:25 +01001781 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001782 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001783
1784 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001785 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001786
Robin Murphy25a1c962016-02-10 14:25:33 +00001787 /* Enable client access, handling unmatched streams as appropriate */
1788 reg &= ~sCR0_CLIENTPD;
1789 if (disable_bypass)
1790 reg |= sCR0_USFCFG;
1791 else
1792 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001793
1794 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001795 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001796
1797 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001798 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001800 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1801 reg |= sCR0_VMID16EN;
1802
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001803 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1804 reg |= sCR0_EXIDENABLE;
1805
Will Deacon45ae7cf2013-06-24 18:31:25 +01001806 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001807 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001808 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001809}
1810
1811static int arm_smmu_id_size_to_bits(int size)
1812{
1813 switch (size) {
1814 case 0:
1815 return 32;
1816 case 1:
1817 return 36;
1818 case 2:
1819 return 40;
1820 case 3:
1821 return 42;
1822 case 4:
1823 return 44;
1824 case 5:
1825 default:
1826 return 48;
1827 }
1828}
1829
1830static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1831{
1832 unsigned long size;
1833 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1834 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001835 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001836 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001837
1838 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001839 dev_notice(smmu->dev, "SMMUv%d with:\n",
1840 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001841
1842 /* ID0 */
1843 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001844
1845 /* Restrict available stages based on module parameter */
1846 if (force_stage == 1)
1847 id &= ~(ID0_S2TS | ID0_NTS);
1848 else if (force_stage == 2)
1849 id &= ~(ID0_S1TS | ID0_NTS);
1850
Will Deacon45ae7cf2013-06-24 18:31:25 +01001851 if (id & ID0_S1TS) {
1852 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1853 dev_notice(smmu->dev, "\tstage 1 translation\n");
1854 }
1855
1856 if (id & ID0_S2TS) {
1857 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1858 dev_notice(smmu->dev, "\tstage 2 translation\n");
1859 }
1860
1861 if (id & ID0_NTS) {
1862 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1863 dev_notice(smmu->dev, "\tnested translation\n");
1864 }
1865
1866 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001867 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001868 dev_err(smmu->dev, "\tno translation support!\n");
1869 return -ENODEV;
1870 }
1871
Robin Murphyb7862e32016-04-13 18:13:03 +01001872 if ((id & ID0_S1TS) &&
1873 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001874 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1875 dev_notice(smmu->dev, "\taddress translation ops\n");
1876 }
1877
Robin Murphybae2c2d2015-07-29 19:46:05 +01001878 /*
1879 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001880 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001881 * Fortunately, this also opens up a workaround for systems where the
1882 * ID register value has ended up configured incorrectly.
1883 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001884 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001885 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001886 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001887 cttw_fw ? "" : "non-");
1888 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001889 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001890 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001891
Robin Murphy21174242016-09-12 17:13:48 +01001892 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001893 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1894 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1895 size = 1 << 16;
1896 } else {
1897 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1898 }
Robin Murphy21174242016-09-12 17:13:48 +01001899 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001901 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001902 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1903 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001904 dev_err(smmu->dev,
1905 "stream-matching supported, but no SMRs present!\n");
1906 return -ENODEV;
1907 }
1908
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001909 /* Zero-initialised to mark as invalid */
1910 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1911 GFP_KERNEL);
1912 if (!smmu->smrs)
1913 return -ENOMEM;
1914
Will Deacon45ae7cf2013-06-24 18:31:25 +01001915 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001916 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001917 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001918 /* s2cr->type == 0 means translation, so initialise explicitly */
1919 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1920 GFP_KERNEL);
1921 if (!smmu->s2crs)
1922 return -ENOMEM;
1923 for (i = 0; i < size; i++)
1924 smmu->s2crs[i] = s2cr_init_val;
1925
Robin Murphy21174242016-09-12 17:13:48 +01001926 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001927 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001928
Robin Murphy7602b872016-04-28 17:12:09 +01001929 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1930 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1931 if (!(id & ID0_PTFS_NO_AARCH32S))
1932 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1933 }
1934
Will Deacon45ae7cf2013-06-24 18:31:25 +01001935 /* ID1 */
1936 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001937 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001938
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001939 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001940 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001941 size <<= smmu->pgshift;
1942 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001943 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001944 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1945 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001946
Will Deacon518f7132014-11-14 17:17:54 +00001947 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001948 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1949 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1950 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1951 return -ENODEV;
1952 }
1953 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1954 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001955 /*
1956 * Cavium CN88xx erratum #27704.
1957 * Ensure ASID and VMID allocation is unique across all SMMUs in
1958 * the system.
1959 */
1960 if (smmu->model == CAVIUM_SMMUV2) {
1961 smmu->cavium_id_base =
1962 atomic_add_return(smmu->num_context_banks,
1963 &cavium_smmu_context_count);
1964 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001965 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001966 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001967
1968 /* ID2 */
1969 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1970 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001971 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972
Will Deacon518f7132014-11-14 17:17:54 +00001973 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001974 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001975 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001976
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001977 if (id & ID2_VMID16)
1978 smmu->features |= ARM_SMMU_FEAT_VMID16;
1979
Robin Murphyf1d84542015-03-04 16:41:05 +00001980 /*
1981 * What the page table walker can address actually depends on which
1982 * descriptor format is in use, but since a) we don't know that yet,
1983 * and b) it can vary per context bank, this will have to do...
1984 */
1985 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1986 dev_warn(smmu->dev,
1987 "failed to set DMA mask for table walker\n");
1988
Robin Murphyb7862e32016-04-13 18:13:03 +01001989 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001990 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001991 if (smmu->version == ARM_SMMU_V1_64K)
1992 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001993 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001994 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001995 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001996 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001997 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001998 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001999 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00002000 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01002001 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002002 }
2003
Robin Murphy7602b872016-04-28 17:12:09 +01002004 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01002005 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01002006 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01002007 if (smmu->features &
2008 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01002009 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01002010 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01002011 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01002012 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01002013 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01002014
Robin Murphyd5466352016-05-09 17:20:09 +01002015 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2016 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2017 else
2018 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2019 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2020 smmu->pgsize_bitmap);
2021
Will Deacon518f7132014-11-14 17:17:54 +00002022
Will Deacon28d60072014-09-01 16:24:48 +01002023 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2024 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002025 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002026
2027 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2028 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00002029 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01002030
Will Deacon45ae7cf2013-06-24 18:31:25 +01002031 return 0;
2032}
2033
Robin Murphy67b65a32016-04-13 18:12:57 +01002034struct arm_smmu_match_data {
2035 enum arm_smmu_arch_version version;
2036 enum arm_smmu_implementation model;
2037};
2038
2039#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2040static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2041
2042ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2043ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01002044ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002045ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01002046ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01002047
Joerg Roedel09b52692014-10-02 12:24:45 +02002048static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01002049 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2050 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2051 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01002052 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01002053 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01002054 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01002055 { },
2056};
2057MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2058
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002059#ifdef CONFIG_ACPI
2060static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2061{
2062 int ret = 0;
2063
2064 switch (model) {
2065 case ACPI_IORT_SMMU_V1:
2066 case ACPI_IORT_SMMU_CORELINK_MMU400:
2067 smmu->version = ARM_SMMU_V1;
2068 smmu->model = GENERIC_SMMU;
2069 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002070 case ACPI_IORT_SMMU_CORELINK_MMU401:
2071 smmu->version = ARM_SMMU_V1_64K;
2072 smmu->model = GENERIC_SMMU;
2073 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002074 case ACPI_IORT_SMMU_V2:
2075 smmu->version = ARM_SMMU_V2;
2076 smmu->model = GENERIC_SMMU;
2077 break;
2078 case ACPI_IORT_SMMU_CORELINK_MMU500:
2079 smmu->version = ARM_SMMU_V2;
2080 smmu->model = ARM_MMU500;
2081 break;
Robin Murphy84c24372017-06-19 16:41:56 +01002082 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2083 smmu->version = ARM_SMMU_V2;
2084 smmu->model = CAVIUM_SMMUV2;
2085 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002086 default:
2087 ret = -ENODEV;
2088 }
2089
2090 return ret;
2091}
2092
2093static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2094 struct arm_smmu_device *smmu)
2095{
2096 struct device *dev = smmu->dev;
2097 struct acpi_iort_node *node =
2098 *(struct acpi_iort_node **)dev_get_platdata(dev);
2099 struct acpi_iort_smmu *iort_smmu;
2100 int ret;
2101
2102 /* Retrieve SMMU1/2 specific data */
2103 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2104
2105 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2106 if (ret < 0)
2107 return ret;
2108
2109 /* Ignore the configuration access interrupt */
2110 smmu->num_global_irqs = 1;
2111
2112 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2113 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2114
2115 return 0;
2116}
2117#else
2118static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2119 struct arm_smmu_device *smmu)
2120{
2121 return -ENODEV;
2122}
2123#endif
2124
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002125static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2126 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002127{
Robin Murphy67b65a32016-04-13 18:12:57 +01002128 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002129 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01002130 bool legacy_binding;
2131
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002132 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2133 &smmu->num_global_irqs)) {
2134 dev_err(dev, "missing #global-interrupts property\n");
2135 return -ENODEV;
2136 }
2137
2138 data = of_device_get_match_data(dev);
2139 smmu->version = data->version;
2140 smmu->model = data->model;
2141
2142 parse_driver_options(smmu);
2143
Robin Murphy021bb842016-09-14 15:26:46 +01002144 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2145 if (legacy_binding && !using_generic_binding) {
2146 if (!using_legacy_binding)
2147 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2148 using_legacy_binding = true;
2149 } else if (!legacy_binding && !using_legacy_binding) {
2150 using_generic_binding = true;
2151 } else {
2152 dev_err(dev, "not probing due to mismatched DT properties\n");
2153 return -ENODEV;
2154 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002155
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002156 if (of_dma_is_coherent(dev->of_node))
2157 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2158
2159 return 0;
2160}
2161
Robin Murphyf6810c12017-04-10 16:51:05 +05302162static void arm_smmu_bus_init(void)
2163{
2164 /* Oh, for a proper bus abstraction */
2165 if (!iommu_present(&platform_bus_type))
2166 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2167#ifdef CONFIG_ARM_AMBA
2168 if (!iommu_present(&amba_bustype))
2169 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2170#endif
2171#ifdef CONFIG_PCI
2172 if (!iommu_present(&pci_bus_type)) {
2173 pci_request_acs();
2174 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2175 }
2176#endif
2177}
2178
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002179static int arm_smmu_device_probe(struct platform_device *pdev)
2180{
2181 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002182 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002183 struct arm_smmu_device *smmu;
2184 struct device *dev = &pdev->dev;
2185 int num_irqs, i, err;
2186
Will Deacon45ae7cf2013-06-24 18:31:25 +01002187 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2188 if (!smmu) {
2189 dev_err(dev, "failed to allocate arm_smmu_device\n");
2190 return -ENOMEM;
2191 }
2192 smmu->dev = dev;
2193
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002194 if (dev->of_node)
2195 err = arm_smmu_device_dt_probe(pdev, smmu);
2196 else
2197 err = arm_smmu_device_acpi_probe(pdev, smmu);
2198
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002199 if (err)
2200 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002201
Will Deacon45ae7cf2013-06-24 18:31:25 +01002202 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002203 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002204 smmu->base = devm_ioremap_resource(dev, res);
2205 if (IS_ERR(smmu->base))
2206 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002207 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002208
Will Deacon45ae7cf2013-06-24 18:31:25 +01002209 num_irqs = 0;
2210 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2211 num_irqs++;
2212 if (num_irqs > smmu->num_global_irqs)
2213 smmu->num_context_irqs++;
2214 }
2215
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002216 if (!smmu->num_context_irqs) {
2217 dev_err(dev, "found %d interrupts but expected at least %d\n",
2218 num_irqs, smmu->num_global_irqs + 1);
2219 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002220 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002221
2222 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2223 GFP_KERNEL);
2224 if (!smmu->irqs) {
2225 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2226 return -ENOMEM;
2227 }
2228
2229 for (i = 0; i < num_irqs; ++i) {
2230 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002231
Will Deacon45ae7cf2013-06-24 18:31:25 +01002232 if (irq < 0) {
2233 dev_err(dev, "failed to get irq index %d\n", i);
2234 return -ENODEV;
2235 }
2236 smmu->irqs[i] = irq;
2237 }
2238
Olav Haugan3c8766d2014-08-22 17:12:32 -07002239 err = arm_smmu_device_cfg_probe(smmu);
2240 if (err)
2241 return err;
2242
Robin Murphyb7862e32016-04-13 18:13:03 +01002243 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002244 smmu->num_context_banks != smmu->num_context_irqs) {
2245 dev_err(dev,
2246 "found only %d context interrupt(s) but %d required\n",
2247 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01002248 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002249 }
2250
Will Deacon45ae7cf2013-06-24 18:31:25 +01002251 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002252 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2253 arm_smmu_global_fault,
2254 IRQF_SHARED,
2255 "arm-smmu global fault",
2256 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002257 if (err) {
2258 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2259 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002260 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002261 }
2262 }
2263
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002264 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2265 "smmu.%pa", &ioaddr);
2266 if (err) {
2267 dev_err(dev, "Failed to register iommu in sysfs\n");
2268 return err;
2269 }
2270
2271 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2272 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2273
2274 err = iommu_device_register(&smmu->iommu);
2275 if (err) {
2276 dev_err(dev, "Failed to register iommu\n");
2277 return err;
2278 }
2279
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002280 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002281 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002282 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002283
Robin Murphyf6810c12017-04-10 16:51:05 +05302284 /*
2285 * For ACPI and generic DT bindings, an SMMU will be probed before
2286 * any device which might need it, so we want the bus ops in place
2287 * ready to handle default domain setup as soon as any SMMU exists.
2288 */
2289 if (!using_legacy_binding)
2290 arm_smmu_bus_init();
2291
Will Deacon45ae7cf2013-06-24 18:31:25 +01002292 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002293}
2294
Robin Murphyf6810c12017-04-10 16:51:05 +05302295/*
2296 * With the legacy DT binding in play, though, we have no guarantees about
2297 * probe order, but then we're also not doing default domains, so we can
2298 * delay setting bus ops until we're sure every possible SMMU is ready,
2299 * and that way ensure that no add_device() calls get missed.
2300 */
2301static int arm_smmu_legacy_bus_init(void)
2302{
2303 if (using_legacy_binding)
2304 arm_smmu_bus_init();
2305 return 0;
2306}
2307device_initcall_sync(arm_smmu_legacy_bus_init);
2308
Will Deacon45ae7cf2013-06-24 18:31:25 +01002309static int arm_smmu_device_remove(struct platform_device *pdev)
2310{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002311 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002312
2313 if (!smmu)
2314 return -ENODEV;
2315
Will Deaconecfadb62013-07-31 19:21:28 +01002316 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002317 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002318
Will Deacon45ae7cf2013-06-24 18:31:25 +01002319 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002320 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002321 return 0;
2322}
2323
Will Deacon45ae7cf2013-06-24 18:31:25 +01002324static struct platform_driver arm_smmu_driver = {
2325 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002326 .name = "arm-smmu",
2327 .of_match_table = of_match_ptr(arm_smmu_of_match),
2328 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002329 .probe = arm_smmu_device_probe,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002330 .remove = arm_smmu_device_remove,
2331};
Robin Murphyf6810c12017-04-10 16:51:05 +05302332module_platform_driver(arm_smmu_driver);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002333
Robin Murphyf6810c12017-04-10 16:51:05 +05302334IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL);
2335IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL);
2336IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL);
2337IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL);
2338IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL);
2339IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL);
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002340
Will Deacon45ae7cf2013-06-24 18:31:25 +01002341MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2342MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2343MODULE_LICENSE("GPL v2");