blob: d83ad10e5fc30e9659f5513a9ec626864a66b6c6 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy468f4942016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyfe52d4f2016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphy06e393e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070050#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070051#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070052#include <linux/msm-bus.h>
53#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55#include <linux/amba/bus.h>
56
Will Deacon518f7132014-11-14 17:17:54 +000057#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* Maximum number of context banks per SMMU */
60#define ARM_SMMU_MAX_CBS 128
61
Will Deacon45ae7cf2013-06-24 18:31:25 +010062/* SMMU global address space */
63#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010064#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010065
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000066/*
67 * SMMU global address space with conditional offset to access secure
68 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 * nsGFSYNR0: 0x450)
70 */
71#define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu)->base + \
73 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
74 ? 0x400 : 0))
75
Robin Murphyf9a05f02016-04-13 18:13:01 +010076/*
77 * Some 64-bit registers only make sense to write atomically, but in such
78 * cases all the data relevant to AArch32 formats lies within the lower word,
79 * therefore this actually makes more sense than it might first appear.
80 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#endif
86
Will Deacon45ae7cf2013-06-24 18:31:25 +010087/* Configuration registers */
88#define ARM_SMMU_GR0_sCR0 0x0
89#define sCR0_CLIENTPD (1 << 0)
90#define sCR0_GFRE (1 << 1)
91#define sCR0_GFIE (1 << 2)
92#define sCR0_GCFGFRE (1 << 4)
93#define sCR0_GCFGFIE (1 << 5)
94#define sCR0_USFCFG (1 << 10)
95#define sCR0_VMIDPNE (1 << 11)
96#define sCR0_PTM (1 << 12)
97#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080098#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010099#define sCR0_BSU_SHIFT 14
100#define sCR0_BSU_MASK 0x3
101
Peng Fan3ca37122016-05-03 21:50:30 +0800102/* Auxiliary Configuration register */
103#define ARM_SMMU_GR0_sACR 0x10
104
Will Deacon45ae7cf2013-06-24 18:31:25 +0100105/* Identification registers */
106#define ARM_SMMU_GR0_ID0 0x20
107#define ARM_SMMU_GR0_ID1 0x24
108#define ARM_SMMU_GR0_ID2 0x28
109#define ARM_SMMU_GR0_ID3 0x2c
110#define ARM_SMMU_GR0_ID4 0x30
111#define ARM_SMMU_GR0_ID5 0x34
112#define ARM_SMMU_GR0_ID6 0x38
113#define ARM_SMMU_GR0_ID7 0x3c
114#define ARM_SMMU_GR0_sGFSR 0x48
115#define ARM_SMMU_GR0_sGFSYNR0 0x50
116#define ARM_SMMU_GR0_sGFSYNR1 0x54
117#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100118
119#define ID0_S1TS (1 << 30)
120#define ID0_S2TS (1 << 29)
121#define ID0_NTS (1 << 28)
122#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000123#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100124#define ID0_PTFS_NO_AARCH32 (1 << 25)
125#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100126#define ID0_CTTW (1 << 14)
127#define ID0_NUMIRPT_SHIFT 16
128#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700129#define ID0_NUMSIDB_SHIFT 9
130#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100131#define ID0_NUMSMRG_SHIFT 0
132#define ID0_NUMSMRG_MASK 0xff
133
134#define ID1_PAGESIZE (1 << 31)
135#define ID1_NUMPAGENDXB_SHIFT 28
136#define ID1_NUMPAGENDXB_MASK 7
137#define ID1_NUMS2CB_SHIFT 16
138#define ID1_NUMS2CB_MASK 0xff
139#define ID1_NUMCB_SHIFT 0
140#define ID1_NUMCB_MASK 0xff
141
142#define ID2_OAS_SHIFT 4
143#define ID2_OAS_MASK 0xf
144#define ID2_IAS_SHIFT 0
145#define ID2_IAS_MASK 0xf
146#define ID2_UBS_SHIFT 8
147#define ID2_UBS_MASK 0xf
148#define ID2_PTFS_4K (1 << 12)
149#define ID2_PTFS_16K (1 << 13)
150#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800151#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152
Peng Fan3ca37122016-05-03 21:50:30 +0800153#define ID7_MAJOR_SHIFT 4
154#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157#define ARM_SMMU_GR0_TLBIVMID 0x64
158#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
159#define ARM_SMMU_GR0_TLBIALLH 0x6c
160#define ARM_SMMU_GR0_sTLBGSYNC 0x70
161#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
162#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800163#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100164
165/* Stream mapping registers */
166#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
167#define SMR_VALID (1 << 31)
168#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100169#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100170
171#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
172#define S2CR_CBNDX_SHIFT 0
173#define S2CR_CBNDX_MASK 0xff
174#define S2CR_TYPE_SHIFT 16
175#define S2CR_TYPE_MASK 0x3
Robin Murphya754fd12016-09-12 17:13:50 +0100176enum arm_smmu_s2cr_type {
177 S2CR_TYPE_TRANS,
178 S2CR_TYPE_BYPASS,
179 S2CR_TYPE_FAULT,
180};
181
182#define S2CR_PRIVCFG_SHIFT 24
183#define S2CR_PRIVCFG_MASK 0x3
184enum arm_smmu_s2cr_privcfg {
185 S2CR_PRIVCFG_DEFAULT,
186 S2CR_PRIVCFG_DIPAN,
187 S2CR_PRIVCFG_UNPRIV,
188 S2CR_PRIVCFG_PRIV,
189};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100190
191/* Context bank attribute registers */
192#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
193#define CBAR_VMID_SHIFT 0
194#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000195#define CBAR_S1_BPSHCFG_SHIFT 8
196#define CBAR_S1_BPSHCFG_MASK 3
197#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100198#define CBAR_S1_MEMATTR_SHIFT 12
199#define CBAR_S1_MEMATTR_MASK 0xf
200#define CBAR_S1_MEMATTR_WB 0xf
201#define CBAR_TYPE_SHIFT 16
202#define CBAR_TYPE_MASK 0x3
203#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
204#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
205#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
206#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
207#define CBAR_IRPTNDX_SHIFT 24
208#define CBAR_IRPTNDX_MASK 0xff
209
Shalaj Jain04059c52015-03-03 13:34:59 -0800210#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
211#define CBFRSYNRA_SID_MASK (0xffff)
212
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
214#define CBA2R_RW64_32BIT (0 << 0)
215#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800216#define CBA2R_VMID_SHIFT 16
217#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218
219/* Translation context bank */
220#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100221#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222
223#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100224#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_RESUME 0x8
226#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100227#define ARM_SMMU_CB_TTBR0 0x20
228#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600230#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100231#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100233#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100234#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700235#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100236#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000238#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100239#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000240#define ARM_SMMU_CB_S1_TLBIVAL 0x620
241#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
242#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700243#define ARM_SMMU_CB_TLBSYNC 0x7f0
244#define ARM_SMMU_CB_TLBSTATUS 0x7f4
245#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100246#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000247#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248
249#define SCTLR_S1_ASIDPNE (1 << 12)
250#define SCTLR_CFCFG (1 << 7)
251#define SCTLR_CFIE (1 << 6)
252#define SCTLR_CFRE (1 << 5)
253#define SCTLR_E (1 << 4)
254#define SCTLR_AFE (1 << 2)
255#define SCTLR_TRE (1 << 1)
256#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100258#define ARM_MMU500_ACTLR_CPRE (1 << 1)
259
Peng Fan3ca37122016-05-03 21:50:30 +0800260#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
261
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700262#define ARM_SMMU_IMPL_DEF0(smmu) \
263 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
264#define ARM_SMMU_IMPL_DEF1(smmu) \
265 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000266#define CB_PAR_F (1 << 0)
267
268#define ATSR_ACTIVE (1 << 0)
269
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270#define RESUME_RETRY (0 << 0)
271#define RESUME_TERMINATE (1 << 0)
272
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100274#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100276#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100277
278#define FSR_MULTI (1 << 31)
279#define FSR_SS (1 << 30)
280#define FSR_UUT (1 << 8)
281#define FSR_ASF (1 << 7)
282#define FSR_TLBLKF (1 << 6)
283#define FSR_TLBMCF (1 << 5)
284#define FSR_EF (1 << 4)
285#define FSR_PF (1 << 3)
286#define FSR_AFF (1 << 2)
287#define FSR_TF (1 << 1)
288
Mitchel Humpherys29073202014-07-08 09:52:18 -0700289#define FSR_IGN (FSR_AFF | FSR_ASF | \
290 FSR_TLBMCF | FSR_TLBLKF)
291#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100292 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100293
294#define FSYNR0_WNR (1 << 4)
295
Will Deacon4cf740b2014-07-14 19:47:39 +0100296static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000297module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100298MODULE_PARM_DESC(force_stage,
299 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Dalya728cfd2016-11-15 17:49:29 -0800300static bool disable_bypass;
Robin Murphy25a1c962016-02-10 14:25:33 +0000301module_param(disable_bypass, bool, S_IRUGO);
302MODULE_PARM_DESC(disable_bypass,
303 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100304
Robin Murphy09360402014-08-28 17:51:59 +0100305enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100306 ARM_SMMU_V1,
307 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100308 ARM_SMMU_V2,
309};
310
Robin Murphy67b65a32016-04-13 18:12:57 +0100311enum arm_smmu_implementation {
312 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100313 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100314 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700315 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700316 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100317};
318
Patrick Dalyd7476202016-09-08 18:23:28 -0700319struct arm_smmu_device;
320struct arm_smmu_arch_ops {
321 int (*init)(struct arm_smmu_device *smmu);
322 void (*device_reset)(struct arm_smmu_device *smmu);
323 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
324 dma_addr_t iova);
325 void (*iova_to_phys_fault)(struct iommu_domain *domain,
326 dma_addr_t iova, phys_addr_t *phys1,
327 phys_addr_t *phys_post_tlbiall);
328};
329
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700330struct arm_smmu_impl_def_reg {
331 u32 offset;
332 u32 value;
333};
334
Robin Murphya754fd12016-09-12 17:13:50 +0100335struct arm_smmu_s2cr {
Robin Murphy6668f692016-09-12 17:13:54 +0100336 struct iommu_group *group;
337 int count;
Robin Murphya754fd12016-09-12 17:13:50 +0100338 enum arm_smmu_s2cr_type type;
339 enum arm_smmu_s2cr_privcfg privcfg;
340 u8 cbndx;
341};
342
343#define s2cr_init_val (struct arm_smmu_s2cr){ \
344 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
345}
346
Will Deacon45ae7cf2013-06-24 18:31:25 +0100347struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100348 u16 mask;
349 u16 id;
Robin Murphy468f4942016-09-12 17:13:49 +0100350 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351};
352
Will Deacona9a1b0b2014-05-01 18:05:08 +0100353struct arm_smmu_master_cfg {
Robin Murphyd5b41782016-09-14 15:21:39 +0100354 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +0100355 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100356};
Robin Murphy468f4942016-09-12 17:13:49 +0100357#define INVALID_SMENDX -1
Robin Murphy06e393e2016-09-12 17:13:55 +0100358#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
359#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
360#define for_each_cfg_sme(fw, i, idx) \
361 for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100362
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700363/*
364 * Describes resources required for on/off power operation.
365 * Separate reference count is provided for atomic/nonatomic
366 * operations.
367 */
368struct arm_smmu_power_resources {
369 struct platform_device *pdev;
370 struct device *dev;
371
372 struct clk **clocks;
373 int num_clocks;
374
375 struct regulator_bulk_data *gdscs;
376 int num_gdscs;
377
378 uint32_t bus_client;
379 struct msm_bus_scale_pdata *bus_dt_data;
380
381 /* Protects power_count */
382 struct mutex power_lock;
383 int power_count;
384
385 /* Protects clock_refs_count */
386 spinlock_t clock_refs_lock;
387 int clock_refs_count;
388};
389
Will Deacon45ae7cf2013-06-24 18:31:25 +0100390struct arm_smmu_device {
391 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100392
393 void __iomem *base;
394 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100395 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100396
397#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
398#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
399#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
400#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
401#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000402#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800403#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100404#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
405#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
406#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
407#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
408#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100409 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000410
411#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800412#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800413#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700414#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000415 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100416 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100417 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100418
419 u32 num_context_banks;
420 u32 num_s2_context_banks;
421 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
422 atomic_t irptndx;
423
424 u32 num_mapping_groups;
Robin Murphy53867802016-09-12 17:13:48 +0100425 u16 streamid_mask;
426 u16 smr_mask_mask;
Robin Murphy468f4942016-09-12 17:13:49 +0100427 struct arm_smmu_smr *smrs;
Robin Murphya754fd12016-09-12 17:13:50 +0100428 struct arm_smmu_s2cr *s2crs;
Robin Murphy6668f692016-09-12 17:13:54 +0100429 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100430
Will Deacon518f7132014-11-14 17:17:54 +0000431 unsigned long va_size;
432 unsigned long ipa_size;
433 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100434 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100435
436 u32 num_global_irqs;
437 u32 num_context_irqs;
438 unsigned int *irqs;
439
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800440 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700441 /* Specific to QCOM */
442 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
443 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800444
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700445 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700446
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800447 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700448
449 /* protects idr */
450 struct mutex idr_mutex;
451 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700452
453 struct arm_smmu_arch_ops *arch_ops;
454 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100455};
456
Robin Murphy7602b872016-04-28 17:12:09 +0100457enum arm_smmu_context_fmt {
458 ARM_SMMU_CTX_FMT_NONE,
459 ARM_SMMU_CTX_FMT_AARCH64,
460 ARM_SMMU_CTX_FMT_AARCH32_L,
461 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100462};
463
464struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100465 u8 cbndx;
466 u8 irptndx;
467 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600468 u32 procid;
469 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100470 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100471};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100472#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600473#define INVALID_CBNDX 0xff
474#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700475/*
476 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
477 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
478 */
479#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600481#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800482#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100483
Will Deaconc752ce42014-06-25 22:46:31 +0100484enum arm_smmu_domain_stage {
485 ARM_SMMU_DOMAIN_S1 = 0,
486 ARM_SMMU_DOMAIN_S2,
487 ARM_SMMU_DOMAIN_NESTED,
488};
489
Patrick Dalyc11d1082016-09-01 15:52:44 -0700490struct arm_smmu_pte_info {
491 void *virt_addr;
492 size_t size;
493 struct list_head entry;
494};
495
Will Deacon45ae7cf2013-06-24 18:31:25 +0100496struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100497 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000498 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700499 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000500 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100501 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100502 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000503 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700504 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700505 u32 secure_vmid;
506 struct list_head pte_info_list;
507 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700508 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700509 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100510 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100511};
512
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000513struct arm_smmu_option_prop {
514 u32 opt;
515 const char *prop;
516};
517
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800518static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
519
Robin Murphy7e96c742016-09-14 15:26:46 +0100520static bool using_legacy_binding, using_generic_binding;
521
Mitchel Humpherys29073202014-07-08 09:52:18 -0700522static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000523 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800524 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800525 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700526 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000527 { 0, NULL},
528};
529
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800530static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
531 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700532static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
533 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600534static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800535
Patrick Dalyc11d1082016-09-01 15:52:44 -0700536static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
537static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700538static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700539static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
540
Patrick Dalyd7476202016-09-08 18:23:28 -0700541static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
542static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
543
Joerg Roedel1d672632015-03-26 13:43:10 +0100544static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
545{
546 return container_of(dom, struct arm_smmu_domain, domain);
547}
548
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000549static void parse_driver_options(struct arm_smmu_device *smmu)
550{
551 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700552
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000553 do {
554 if (of_property_read_bool(smmu->dev->of_node,
555 arm_smmu_options[i].prop)) {
556 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700557 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000558 arm_smmu_options[i].prop);
559 }
560 } while (arm_smmu_options[++i].opt);
561}
562
Patrick Dalyc190d932016-08-30 17:23:28 -0700563static bool is_dynamic_domain(struct iommu_domain *domain)
564{
565 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
566
567 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
568}
569
Patrick Dalye271f212016-10-04 13:24:49 -0700570static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
571{
572 return (smmu_domain->secure_vmid != VMID_INVAL);
573}
574
575static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
576{
577 if (arm_smmu_is_domain_secure(smmu_domain))
578 mutex_lock(&smmu_domain->assign_lock);
579}
580
581static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
582{
583 if (arm_smmu_is_domain_secure(smmu_domain))
584 mutex_unlock(&smmu_domain->assign_lock);
585}
586
Will Deacon8f68f8e2014-07-15 11:27:08 +0100587static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100588{
589 if (dev_is_pci(dev)) {
590 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700591
Will Deacona9a1b0b2014-05-01 18:05:08 +0100592 while (!pci_is_root_bus(bus))
593 bus = bus->parent;
Robin Murphyd5b41782016-09-14 15:21:39 +0100594 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100595 }
596
Robin Murphyd5b41782016-09-14 15:21:39 +0100597 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100598}
599
Robin Murphyd5b41782016-09-14 15:21:39 +0100600static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100601{
Robin Murphyd5b41782016-09-14 15:21:39 +0100602 *((__be32 *)data) = cpu_to_be32(alias);
603 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100604}
605
Robin Murphyd5b41782016-09-14 15:21:39 +0100606static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100607{
Robin Murphyd5b41782016-09-14 15:21:39 +0100608 struct of_phandle_iterator *it = *(void **)data;
609 struct device_node *np = it->node;
610 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100611
Robin Murphyd5b41782016-09-14 15:21:39 +0100612 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
613 "#stream-id-cells", 0)
614 if (it->node == np) {
615 *(void **)data = dev;
616 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700617 }
Robin Murphyd5b41782016-09-14 15:21:39 +0100618 it->node = np;
619 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100620}
621
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100622static struct platform_driver arm_smmu_driver;
Robin Murphy06e393e2016-09-12 17:13:55 +0100623static struct iommu_ops arm_smmu_ops;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100624
Robin Murphy06e393e2016-09-12 17:13:55 +0100625static int arm_smmu_register_legacy_master(struct device *dev,
626 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100627{
Robin Murphy06e393e2016-09-12 17:13:55 +0100628 struct device *smmu_dev;
Robin Murphyd5b41782016-09-14 15:21:39 +0100629 struct device_node *np;
630 struct of_phandle_iterator it;
631 void *data = &it;
Robin Murphy06e393e2016-09-12 17:13:55 +0100632 u32 *sids;
Robin Murphyd5b41782016-09-14 15:21:39 +0100633 __be32 pci_sid;
634 int err = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100635
Robin Murphyd5b41782016-09-14 15:21:39 +0100636 memset(&it, sizeof(it), 0);
637 np = dev_get_dev_node(dev);
638 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
639 of_node_put(np);
640 return -ENODEV;
641 }
642
643 it.node = np;
Robin Murphyfe52d4f2016-09-12 17:13:52 +0100644 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
645 __find_legacy_master_phandle);
Robin Murphy06e393e2016-09-12 17:13:55 +0100646 smmu_dev = data;
Robin Murphyd5b41782016-09-14 15:21:39 +0100647 of_node_put(np);
648 if (err == 0)
649 return -ENODEV;
650 if (err < 0)
651 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100652
Robin Murphyd5b41782016-09-14 15:21:39 +0100653 if (dev_is_pci(dev)) {
654 /* "mmu-masters" assumes Stream ID == Requester ID */
655 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
656 &pci_sid);
657 it.cur = &pci_sid;
658 it.cur_count = 1;
659 }
660
Robin Murphy06e393e2016-09-12 17:13:55 +0100661 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
662 &arm_smmu_ops);
663 if (err)
664 return err;
665
666 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
667 if (!sids)
Robin Murphyd5b41782016-09-14 15:21:39 +0100668 return -ENOMEM;
669
Robin Murphy06e393e2016-09-12 17:13:55 +0100670 *smmu = dev_get_drvdata(smmu_dev);
671 of_phandle_iterator_args(&it, sids, it.cur_count);
672 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
673 kfree(sids);
674 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100675}
676
677static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
678{
679 int idx;
680
681 do {
682 idx = find_next_zero_bit(map, end, start);
683 if (idx == end)
684 return -ENOSPC;
685 } while (test_and_set_bit(idx, map));
686
687 return idx;
688}
689
690static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
691{
692 clear_bit(idx, map);
693}
694
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700695static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700696{
697 int i, ret = 0;
698
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700699 for (i = 0; i < pwr->num_clocks; ++i) {
700 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700701 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700702 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700703 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700704 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700705 break;
706 }
707 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700708 return ret;
709}
710
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700711static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700712{
713 int i;
714
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700715 for (i = pwr->num_clocks; i; --i)
716 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700717}
718
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700719static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700720{
721 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700722
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700723 for (i = 0; i < pwr->num_clocks; ++i) {
724 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700725 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700726 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700727 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700728 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700729 break;
730 }
731 }
732
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700733 return ret;
734}
Patrick Daly8befb662016-08-17 20:03:28 -0700735
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700736static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
737{
738 int i;
739
740 for (i = pwr->num_clocks; i; --i)
741 clk_disable(pwr->clocks[i - 1]);
742}
743
744static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
745{
746 if (!pwr->bus_client)
747 return 0;
748 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
749}
750
751static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
752{
753 if (!pwr->bus_client)
754 return;
755 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
756}
757
758/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
759static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
760{
761 int ret = 0;
762 unsigned long flags;
763
764 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
765 if (pwr->clock_refs_count > 0) {
766 pwr->clock_refs_count++;
767 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
768 return 0;
769 }
770
771 ret = arm_smmu_enable_clocks(pwr);
772 if (!ret)
773 pwr->clock_refs_count = 1;
774
775 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700776 return ret;
777}
778
779/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700780static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700781{
Patrick Daly8befb662016-08-17 20:03:28 -0700782 unsigned long flags;
783
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700784 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
785 if (pwr->clock_refs_count == 0) {
786 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
787 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
788 return;
789
790 } else if (pwr->clock_refs_count > 1) {
791 pwr->clock_refs_count--;
792 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700793 return;
794 }
795
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700796 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700797
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700798 pwr->clock_refs_count = 0;
799 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700800}
801
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700802static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700803{
804 int ret;
805
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700806 mutex_lock(&pwr->power_lock);
807 if (pwr->power_count > 0) {
808 pwr->power_count += 1;
809 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700810 return 0;
811 }
812
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700813 ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700814 if (ret)
815 goto out_unlock;
816
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700817 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700818 if (ret)
819 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700820
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700821 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700822 if (ret)
823 goto out_disable_bus;
824
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700825 pwr->power_count = 1;
826 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700827 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700828
Patrick Daly2764f952016-09-06 19:22:44 -0700829out_disable_bus:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700830 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700831out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700832 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700833out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700834 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700835 return ret;
836}
837
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700838static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700839{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700840 mutex_lock(&pwr->power_lock);
841 if (pwr->power_count == 0) {
842 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
843 mutex_unlock(&pwr->power_lock);
844 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700845
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700846 } else if (pwr->power_count > 1) {
847 pwr->power_count--;
848 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700849 return;
850 }
851
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700852 arm_smmu_unprepare_clocks(pwr);
853 arm_smmu_unrequest_bus(pwr);
854 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700855
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700856 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700857}
858
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700859static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700860{
861 int ret;
862
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700863 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700864 if (ret)
865 return ret;
866
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700867 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700868 if (ret)
869 goto out_disable;
870
871 return 0;
872
873out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700874 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700875 return ret;
876}
877
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700878static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700879{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700880 arm_smmu_power_off_atomic(pwr);
881 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700882}
883
884/*
885 * Must be used instead of arm_smmu_power_on if it may be called from
886 * atomic context
887 */
888static int arm_smmu_domain_power_on(struct iommu_domain *domain,
889 struct arm_smmu_device *smmu)
890{
891 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
892 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
893
894 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700895 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700896
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700897 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700898}
899
900/*
901 * Must be used instead of arm_smmu_power_on if it may be called from
902 * atomic context
903 */
904static void arm_smmu_domain_power_off(struct iommu_domain *domain,
905 struct arm_smmu_device *smmu)
906{
907 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
908 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
909
910 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700911 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700912 return;
913 }
914
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700915 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700916}
917
Will Deacon45ae7cf2013-06-24 18:31:25 +0100918/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700919static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
920 int cbndx)
921{
922 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
923 u32 val;
924
925 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
926 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
927 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -0700928 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700929 dev_err(smmu->dev, "TLBSYNC timeout!\n");
930}
931
Will Deacon518f7132014-11-14 17:17:54 +0000932static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100933{
934 int count = 0;
935 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
936
937 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
938 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
939 & sTLBGSTATUS_GSACTIVE) {
940 cpu_relax();
941 if (++count == TLB_LOOP_TIMEOUT) {
942 dev_err_ratelimited(smmu->dev,
943 "TLB sync timed out -- SMMU may be deadlocked\n");
944 return;
945 }
946 udelay(1);
947 }
948}
949
Will Deacon518f7132014-11-14 17:17:54 +0000950static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100951{
Will Deacon518f7132014-11-14 17:17:54 +0000952 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700953 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000954}
955
Patrick Daly8befb662016-08-17 20:03:28 -0700956/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +0000957static void arm_smmu_tlb_inv_context(void *cookie)
958{
959 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100960 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
961 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100962 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000963 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100964
965 if (stage1) {
966 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800967 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100968 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700969 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100970 } else {
971 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800972 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100973 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700974 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100975 }
Will Deacon1463fe42013-07-31 19:21:27 +0100976}
977
Will Deacon518f7132014-11-14 17:17:54 +0000978static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000979 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000980{
981 struct arm_smmu_domain *smmu_domain = cookie;
982 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
983 struct arm_smmu_device *smmu = smmu_domain->smmu;
984 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
985 void __iomem *reg;
986
987 if (stage1) {
988 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
989 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
990
Robin Murphy7602b872016-04-28 17:12:09 +0100991 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000992 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800993 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000994 do {
995 writel_relaxed(iova, reg);
996 iova += granule;
997 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000998 } else {
999 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001000 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001001 do {
1002 writeq_relaxed(iova, reg);
1003 iova += granule >> 12;
1004 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001005 }
Will Deacon518f7132014-11-14 17:17:54 +00001006 } else if (smmu->version == ARM_SMMU_V2) {
1007 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1008 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1009 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001010 iova >>= 12;
1011 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001012 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001013 iova += granule >> 12;
1014 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001015 } else {
1016 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001017 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001018 }
1019}
1020
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001021struct arm_smmu_secure_pool_chunk {
1022 void *addr;
1023 size_t size;
1024 struct list_head list;
1025};
1026
1027static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1028 size_t size)
1029{
1030 struct arm_smmu_secure_pool_chunk *it;
1031
1032 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1033 if (it->size == size) {
1034 void *addr = it->addr;
1035
1036 list_del(&it->list);
1037 kfree(it);
1038 return addr;
1039 }
1040 }
1041
1042 return NULL;
1043}
1044
1045static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1046 void *addr, size_t size)
1047{
1048 struct arm_smmu_secure_pool_chunk *chunk;
1049
1050 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1051 if (!chunk)
1052 return -ENOMEM;
1053
1054 chunk->addr = addr;
1055 chunk->size = size;
1056 memset(addr, 0, size);
1057 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1058
1059 return 0;
1060}
1061
1062static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1063{
1064 struct arm_smmu_secure_pool_chunk *it, *i;
1065
1066 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1067 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1068 /* pages will be freed later (after being unassigned) */
1069 kfree(it);
1070 }
1071}
1072
Patrick Dalyc11d1082016-09-01 15:52:44 -07001073static void *arm_smmu_alloc_pages_exact(void *cookie,
1074 size_t size, gfp_t gfp_mask)
1075{
1076 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001077 void *page;
1078 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001079
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001080 if (!arm_smmu_is_domain_secure(smmu_domain))
1081 return alloc_pages_exact(size, gfp_mask);
1082
1083 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1084 if (page)
1085 return page;
1086
1087 page = alloc_pages_exact(size, gfp_mask);
1088 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001089 ret = arm_smmu_prepare_pgtable(page, cookie);
1090 if (ret) {
1091 free_pages_exact(page, size);
1092 return NULL;
1093 }
1094 }
1095
1096 return page;
1097}
1098
1099static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1100{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001101 struct arm_smmu_domain *smmu_domain = cookie;
1102
1103 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1104 free_pages_exact(virt, size);
1105 return;
1106 }
1107
1108 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1109 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001110}
1111
Will Deacon518f7132014-11-14 17:17:54 +00001112static struct iommu_gather_ops arm_smmu_gather_ops = {
1113 .tlb_flush_all = arm_smmu_tlb_inv_context,
1114 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1115 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001116 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1117 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001118};
1119
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001120static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1121 dma_addr_t iova, u32 fsr)
1122{
1123 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001124 struct arm_smmu_device *smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001125 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001126 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001127
1128 smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001129
Patrick Dalyad441dd2016-09-15 15:50:46 -07001130 if (smmu->arch_ops && smmu->arch_ops->iova_to_phys_fault) {
1131 smmu->arch_ops->iova_to_phys_fault(domain, iova, &phys,
1132 &phys_post_tlbiall);
1133 } else {
1134 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001135 arm_smmu_tlb_inv_context(smmu_domain);
Patrick Dalyad441dd2016-09-15 15:50:46 -07001136 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001137 }
1138
Patrick Dalyad441dd2016-09-15 15:50:46 -07001139 if (phys != phys_post_tlbiall) {
1140 dev_err(smmu->dev,
1141 "ATOS results differed across TLBIALL...\n"
1142 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1143 }
1144 if (!phys_post_tlbiall) {
1145 dev_err(smmu->dev,
1146 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1147 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001148
Patrick Dalyad441dd2016-09-15 15:50:46 -07001149 return phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001150}
1151
Will Deacon45ae7cf2013-06-24 18:31:25 +01001152static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1153{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001154 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001155 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001156 unsigned long iova;
1157 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001158 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001159 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1160 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001161 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001162 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001163 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001164 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001165 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001166 bool non_fatal_fault = !!(smmu_domain->attributes &
1167 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001168
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001169 static DEFINE_RATELIMIT_STATE(_rs,
1170 DEFAULT_RATELIMIT_INTERVAL,
1171 DEFAULT_RATELIMIT_BURST);
1172
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001173 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001174 if (ret)
1175 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001176
Shalaj Jain04059c52015-03-03 13:34:59 -08001177 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001178 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001179 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1180
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001181 if (!(fsr & FSR_FAULT)) {
1182 ret = IRQ_NONE;
1183 goto out_power_off;
1184 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001185
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001186 if (fatal_asf && (fsr & FSR_ASF)) {
1187 dev_err(smmu->dev,
1188 "Took an address size fault. Refusing to recover.\n");
1189 BUG();
1190 }
1191
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001193 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001194 if (fsr & FSR_TF)
1195 flags |= IOMMU_FAULT_TRANSLATION;
1196 if (fsr & FSR_PF)
1197 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001198 if (fsr & FSR_EF)
1199 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001200 if (fsr & FSR_SS)
1201 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001202
Robin Murphyf9a05f02016-04-13 18:13:01 +01001203 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001204 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001205 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1206 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001207 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1208 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001209 dev_dbg(smmu->dev,
1210 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1211 iova, fsr, fsynr, cfg->cbndx);
1212 dev_dbg(smmu->dev,
1213 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001214 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001215 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001216 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001217 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1218 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001219 if (__ratelimit(&_rs)) {
1220 dev_err(smmu->dev,
1221 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1222 iova, fsr, fsynr, cfg->cbndx);
1223 dev_err(smmu->dev, "FAR = %016lx\n",
1224 (unsigned long)iova);
1225 dev_err(smmu->dev,
1226 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1227 fsr,
1228 (fsr & 0x02) ? "TF " : "",
1229 (fsr & 0x04) ? "AFF " : "",
1230 (fsr & 0x08) ? "PF " : "",
1231 (fsr & 0x10) ? "EF " : "",
1232 (fsr & 0x20) ? "TLBMCF " : "",
1233 (fsr & 0x40) ? "TLBLKF " : "",
1234 (fsr & 0x80) ? "MHF " : "",
1235 (fsr & 0x40000000) ? "SS " : "",
1236 (fsr & 0x80000000) ? "MULTI " : "");
1237 dev_err(smmu->dev,
1238 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001239 if (!phys_soft)
1240 dev_err(smmu->dev,
1241 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1242 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001243 dev_err(smmu->dev,
1244 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1245 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1246 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001247 ret = IRQ_NONE;
1248 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001249 if (!non_fatal_fault) {
1250 dev_err(smmu->dev,
1251 "Unhandled arm-smmu context fault!\n");
1252 BUG();
1253 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001254 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001255
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001256 /*
1257 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1258 * if stalled. This is required to keep the IOMMU client stalled on
1259 * the outstanding fault. This gives the client a chance to take any
1260 * debug action and then terminate the stalled transaction.
1261 * So, the sequence in case of stall on fault should be:
1262 * 1) Do not clear FSR or write to RESUME here
1263 * 2) Client takes any debug action
1264 * 3) Client terminates the stalled transaction and resumes the IOMMU
1265 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1266 * not before so that the fault remains outstanding. This ensures
1267 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1268 * need to be terminated.
1269 */
1270 if (tmp != -EBUSY) {
1271 /* Clear the faulting FSR */
1272 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001273
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001274 /*
1275 * Barrier required to ensure that the FSR is cleared
1276 * before resuming SMMU operation
1277 */
1278 wmb();
1279
1280 /* Retry or terminate any stalled transactions */
1281 if (fsr & FSR_SS)
1282 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1283 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001284
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001285out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001286 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001287
Patrick Daly5ba28112016-08-30 19:18:52 -07001288 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001289}
1290
1291static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1292{
1293 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1294 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001295 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001296
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001297 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001298 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001299
Will Deacon45ae7cf2013-06-24 18:31:25 +01001300 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1301 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1302 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1303 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1304
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001305 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001306 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001307 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001308 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001309
Will Deacon45ae7cf2013-06-24 18:31:25 +01001310 dev_err_ratelimited(smmu->dev,
1311 "Unexpected global fault, this could be serious\n");
1312 dev_err_ratelimited(smmu->dev,
1313 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1314 gfsr, gfsynr0, gfsynr1, gfsynr2);
1315
1316 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001317 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001318 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001319}
1320
Will Deacon518f7132014-11-14 17:17:54 +00001321static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1322 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001323{
Robin Murphyb94df6f2016-08-11 17:44:06 +01001324 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001325 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001326 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001327 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1328 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001329 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001330
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001332 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1333 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001334
Will Deacon4a1c93c2015-03-04 12:21:03 +00001335 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001336 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1337 reg = CBA2R_RW64_64BIT;
1338 else
1339 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001340 /* 16-bit VMIDs live in CBA2R */
1341 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001342 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001343
Will Deacon4a1c93c2015-03-04 12:21:03 +00001344 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1345 }
1346
Will Deacon45ae7cf2013-06-24 18:31:25 +01001347 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001348 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001349 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001350 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001351
Will Deacon57ca90f2014-02-06 14:59:05 +00001352 /*
1353 * Use the weakest shareability/memory types, so they are
1354 * overridden by the ttbcr/pte.
1355 */
1356 if (stage1) {
1357 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1358 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001359 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1360 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001361 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001362 }
Will Deacon44680ee2014-06-25 11:29:12 +01001363 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364
Will Deacon518f7132014-11-14 17:17:54 +00001365 /* TTBRs */
1366 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001367 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001368
Robin Murphyb94df6f2016-08-11 17:44:06 +01001369 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1370 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
1371 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
1372 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
1373 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
1374 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
1375 } else {
1376 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1377 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1378 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
1379 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1380 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
1381 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
1382 }
Will Deacon518f7132014-11-14 17:17:54 +00001383 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001384 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001385 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001386 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001387
Will Deacon518f7132014-11-14 17:17:54 +00001388 /* TTBCR */
1389 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001390 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1391 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
1392 reg2 = 0;
1393 } else {
1394 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1395 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
1396 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001397 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001398 if (smmu->version > ARM_SMMU_V1)
1399 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001400 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001401 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001402 }
Robin Murphyb94df6f2016-08-11 17:44:06 +01001403 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404
Will Deacon518f7132014-11-14 17:17:54 +00001405 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001406 if (stage1) {
Robin Murphyb94df6f2016-08-11 17:44:06 +01001407 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
1408 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
1409 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
1410 } else {
1411 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1412 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1413 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001414 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001415 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001416 }
1417
Will Deacon45ae7cf2013-06-24 18:31:25 +01001418 /* SCTLR */
Robin Murphyb94df6f2016-08-11 17:44:06 +01001419 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
Patrick Dalye62d3362016-03-15 18:58:28 -07001420 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1421 !stage1)
1422 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001423 if (stage1)
1424 reg |= SCTLR_S1_ASIDPNE;
1425#ifdef __BIG_ENDIAN
1426 reg |= SCTLR_E;
1427#endif
Will Deacon25724842013-08-21 13:49:53 +01001428 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001429}
1430
Patrick Dalyc190d932016-08-30 17:23:28 -07001431static int arm_smmu_init_asid(struct iommu_domain *domain,
1432 struct arm_smmu_device *smmu)
1433{
1434 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1435 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1436 bool dynamic = is_dynamic_domain(domain);
1437 int ret;
1438
1439 if (!dynamic) {
1440 cfg->asid = cfg->cbndx + 1;
1441 } else {
1442 mutex_lock(&smmu->idr_mutex);
1443 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1444 smmu->num_context_banks + 2,
1445 MAX_ASID + 1, GFP_KERNEL);
1446
1447 mutex_unlock(&smmu->idr_mutex);
1448 if (ret < 0) {
1449 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1450 ret);
1451 return ret;
1452 }
1453 cfg->asid = ret;
1454 }
1455 return 0;
1456}
1457
1458static void arm_smmu_free_asid(struct iommu_domain *domain)
1459{
1460 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1461 struct arm_smmu_device *smmu = smmu_domain->smmu;
1462 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1463 bool dynamic = is_dynamic_domain(domain);
1464
1465 if (cfg->asid == INVALID_ASID || !dynamic)
1466 return;
1467
1468 mutex_lock(&smmu->idr_mutex);
1469 idr_remove(&smmu->asid_idr, cfg->asid);
1470 mutex_unlock(&smmu->idr_mutex);
1471}
1472
Will Deacon45ae7cf2013-06-24 18:31:25 +01001473static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001474 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001475{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001476 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001477 unsigned long ias, oas;
1478 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001479 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001480 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001481 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001482 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyce6786f2016-11-09 14:19:23 -08001483 unsigned long quirks = 0;
Patrick Dalyc190d932016-08-30 17:23:28 -07001484 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001485
Will Deacon518f7132014-11-14 17:17:54 +00001486 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001487 if (smmu_domain->smmu)
1488 goto out_unlock;
1489
Patrick Dalyc190d932016-08-30 17:23:28 -07001490 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1491 smmu_domain->cfg.asid = INVALID_ASID;
1492
Patrick Dalyc190d932016-08-30 17:23:28 -07001493 dynamic = is_dynamic_domain(domain);
1494 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1495 dev_err(smmu->dev, "dynamic domains not supported\n");
1496 ret = -EPERM;
1497 goto out_unlock;
1498 }
1499
Will Deaconc752ce42014-06-25 22:46:31 +01001500 /*
1501 * Mapping the requested stage onto what we support is surprisingly
1502 * complicated, mainly because the spec allows S1+S2 SMMUs without
1503 * support for nested translation. That means we end up with the
1504 * following table:
1505 *
1506 * Requested Supported Actual
1507 * S1 N S1
1508 * S1 S1+S2 S1
1509 * S1 S2 S2
1510 * S1 S1 S1
1511 * N N N
1512 * N S1+S2 S2
1513 * N S2 S2
1514 * N S1 S1
1515 *
1516 * Note that you can't actually request stage-2 mappings.
1517 */
1518 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1519 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1520 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1521 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1522
Robin Murphy7602b872016-04-28 17:12:09 +01001523 /*
1524 * Choosing a suitable context format is even more fiddly. Until we
1525 * grow some way for the caller to express a preference, and/or move
1526 * the decision into the io-pgtable code where it arguably belongs,
1527 * just aim for the closest thing to the rest of the system, and hope
1528 * that the hardware isn't esoteric enough that we can't assume AArch64
1529 * support to be a superset of AArch32 support...
1530 */
1531 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1532 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001533 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
1534 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
1535 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
1536 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
1537 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +01001538 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1539 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1540 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1541 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1542 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1543
1544 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1545 ret = -EINVAL;
1546 goto out_unlock;
1547 }
1548
Will Deaconc752ce42014-06-25 22:46:31 +01001549 switch (smmu_domain->stage) {
1550 case ARM_SMMU_DOMAIN_S1:
1551 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1552 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001553 ias = smmu->va_size;
1554 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001555 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001556 fmt = ARM_64_LPAE_S1;
Robin Murphyb94df6f2016-08-11 17:44:06 +01001557 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +00001558 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001559 ias = min(ias, 32UL);
1560 oas = min(oas, 40UL);
Robin Murphyb94df6f2016-08-11 17:44:06 +01001561 } else {
1562 fmt = ARM_V7S;
1563 ias = min(ias, 32UL);
1564 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +01001565 }
Will Deaconc752ce42014-06-25 22:46:31 +01001566 break;
1567 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001568 /*
1569 * We will likely want to change this if/when KVM gets
1570 * involved.
1571 */
Will Deaconc752ce42014-06-25 22:46:31 +01001572 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001573 cfg->cbar = CBAR_TYPE_S2_TRANS;
1574 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001575 ias = smmu->ipa_size;
1576 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001577 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001578 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001579 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001580 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001581 ias = min(ias, 40UL);
1582 oas = min(oas, 40UL);
1583 }
Will Deaconc752ce42014-06-25 22:46:31 +01001584 break;
1585 default:
1586 ret = -EINVAL;
1587 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001588 }
1589
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001590 if (is_fast)
1591 fmt = ARM_V8L_FAST;
1592
Patrick Dalyce6786f2016-11-09 14:19:23 -08001593 if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
1594 quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001595
Patrick Dalyc190d932016-08-30 17:23:28 -07001596 /* Dynamic domains must set cbndx through domain attribute */
1597 if (!dynamic) {
1598 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001599 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001600 if (ret < 0)
1601 goto out_unlock;
1602 cfg->cbndx = ret;
1603 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001604 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001605 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1606 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001607 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001608 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001609 }
1610
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001611 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Patrick Dalyce6786f2016-11-09 14:19:23 -08001612 .quirks = quirks,
Robin Murphyd5466352016-05-09 17:20:09 +01001613 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001614 .ias = ias,
1615 .oas = oas,
1616 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001617 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001618 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001619
Will Deacon518f7132014-11-14 17:17:54 +00001620 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001621 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1622 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001623 if (!pgtbl_ops) {
1624 ret = -ENOMEM;
1625 goto out_clear_smmu;
1626 }
1627
Patrick Dalyc11d1082016-09-01 15:52:44 -07001628 /*
1629 * assign any page table memory that might have been allocated
1630 * during alloc_io_pgtable_ops
1631 */
Patrick Dalye271f212016-10-04 13:24:49 -07001632 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001633 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001634 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001635
Robin Murphyd5466352016-05-09 17:20:09 +01001636 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001637 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Robin Murphyd7a8d042016-09-12 17:13:58 +01001638 domain->geometry.aperture_end = (1UL << ias) - 1;
1639 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +00001640
Patrick Dalyc190d932016-08-30 17:23:28 -07001641 /* Assign an asid */
1642 ret = arm_smmu_init_asid(domain, smmu);
1643 if (ret)
1644 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001645
Patrick Dalyc190d932016-08-30 17:23:28 -07001646 if (!dynamic) {
1647 /* Initialise the context bank with our page table cfg */
1648 arm_smmu_init_context_bank(smmu_domain,
1649 &smmu_domain->pgtbl_cfg);
1650
1651 /*
1652 * Request context fault interrupt. Do this last to avoid the
1653 * handler seeing a half-initialised domain state.
1654 */
1655 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1656 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001657 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1658 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001659 if (ret < 0) {
1660 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1661 cfg->irptndx, irq);
1662 cfg->irptndx = INVALID_IRPTNDX;
1663 goto out_clear_smmu;
1664 }
1665 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001666 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001667 }
Will Deacon518f7132014-11-14 17:17:54 +00001668 mutex_unlock(&smmu_domain->init_mutex);
1669
1670 /* Publish page table ops for map/unmap */
1671 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001672 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001673
Will Deacon518f7132014-11-14 17:17:54 +00001674out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001675 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001676 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001677out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001678 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001679 return ret;
1680}
1681
Patrick Daly77db4f92016-10-14 15:34:10 -07001682static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1683{
1684 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1685 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1686 smmu_domain->secure_vmid = VMID_INVAL;
1687}
1688
Will Deacon45ae7cf2013-06-24 18:31:25 +01001689static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1690{
Joerg Roedel1d672632015-03-26 13:43:10 +01001691 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001692 struct arm_smmu_device *smmu = smmu_domain->smmu;
1693 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001694 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001695 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001696 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001697 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001698
Robin Murphy7e96c742016-09-14 15:26:46 +01001699 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001700 return;
1701
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001702 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001703 if (ret) {
1704 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1705 smmu);
1706 return;
1707 }
1708
Patrick Dalyc190d932016-08-30 17:23:28 -07001709 dynamic = is_dynamic_domain(domain);
1710 if (dynamic) {
1711 arm_smmu_free_asid(domain);
1712 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001713 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001714 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001715 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001716 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001717 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001718 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001719 return;
1720 }
1721
Will Deacon518f7132014-11-14 17:17:54 +00001722 /*
1723 * Disable the context bank and free the page tables before freeing
1724 * it.
1725 */
Will Deacon44680ee2014-06-25 11:29:12 +01001726 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001727 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001728
Will Deacon44680ee2014-06-25 11:29:12 +01001729 if (cfg->irptndx != INVALID_IRPTNDX) {
1730 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001731 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001732 }
1733
Markus Elfring44830b02015-11-06 18:32:41 +01001734 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001735 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001736 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001737 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001738 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001739 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001740
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001741 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001742 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001743}
1744
Joerg Roedel1d672632015-03-26 13:43:10 +01001745static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001746{
1747 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001748
Patrick Daly09801312016-08-29 17:02:52 -07001749 /* Do not support DOMAIN_DMA for now */
1750 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001751 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752 /*
1753 * Allocate the domain and initialise some of its data structures.
1754 * We can't really do anything meaningful until we've added a
1755 * master.
1756 */
1757 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1758 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001759 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001760
Robin Murphy7e96c742016-09-14 15:26:46 +01001761 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1762 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001763 kfree(smmu_domain);
1764 return NULL;
1765 }
1766
Will Deacon518f7132014-11-14 17:17:54 +00001767 mutex_init(&smmu_domain->init_mutex);
1768 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001769 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1770 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001771 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001772 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001773 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001774
1775 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001776}
1777
Joerg Roedel1d672632015-03-26 13:43:10 +01001778static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001779{
Joerg Roedel1d672632015-03-26 13:43:10 +01001780 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001781
1782 /*
1783 * Free the domain resources. We assume that all devices have
1784 * already been detached.
1785 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001786 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001787 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001788 kfree(smmu_domain);
1789}
1790
Robin Murphy468f4942016-09-12 17:13:49 +01001791static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1792{
1793 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyd5b41782016-09-14 15:21:39 +01001794 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy468f4942016-09-12 17:13:49 +01001795
1796 if (smr->valid)
1797 reg |= SMR_VALID;
1798 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1799}
1800
Robin Murphya754fd12016-09-12 17:13:50 +01001801static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1802{
1803 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1804 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1805 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1806 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1807
1808 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1809}
1810
1811static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1812{
1813 arm_smmu_write_s2cr(smmu, idx);
1814 if (smmu->smrs)
1815 arm_smmu_write_smr(smmu, idx);
1816}
1817
Robin Murphy6668f692016-09-12 17:13:54 +01001818static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy468f4942016-09-12 17:13:49 +01001819{
1820 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy6668f692016-09-12 17:13:54 +01001821 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001822
Robin Murphy6668f692016-09-12 17:13:54 +01001823 /* Stream indexing is blissfully easy */
1824 if (!smrs)
1825 return id;
Robin Murphy468f4942016-09-12 17:13:49 +01001826
Robin Murphy6668f692016-09-12 17:13:54 +01001827 /* Validating SMRs is... less so */
1828 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1829 if (!smrs[i].valid) {
1830 /*
1831 * Note the first free entry we come across, which
1832 * we'll claim in the end if nothing else matches.
1833 */
1834 if (free_idx < 0)
1835 free_idx = i;
Robin Murphy468f4942016-09-12 17:13:49 +01001836 continue;
1837 }
Robin Murphy6668f692016-09-12 17:13:54 +01001838 /*
1839 * If the new entry is _entirely_ matched by an existing entry,
1840 * then reuse that, with the guarantee that there also cannot
1841 * be any subsequent conflicting entries. In normal use we'd
1842 * expect simply identical entries for this case, but there's
1843 * no harm in accommodating the generalisation.
1844 */
1845 if ((mask & smrs[i].mask) == mask &&
1846 !((id ^ smrs[i].id) & ~smrs[i].mask))
1847 return i;
1848 /*
1849 * If the new entry has any other overlap with an existing one,
1850 * though, then there always exists at least one stream ID
1851 * which would cause a conflict, and we can't allow that risk.
1852 */
1853 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1854 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001855 }
1856
Robin Murphy6668f692016-09-12 17:13:54 +01001857 return free_idx;
1858}
1859
1860static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1861{
1862 if (--smmu->s2crs[idx].count)
1863 return false;
1864
1865 smmu->s2crs[idx] = s2cr_init_val;
1866 if (smmu->smrs)
1867 smmu->smrs[idx].valid = false;
1868
1869 return true;
1870}
1871
1872static int arm_smmu_master_alloc_smes(struct device *dev)
1873{
Robin Murphy06e393e2016-09-12 17:13:55 +01001874 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1875 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy6668f692016-09-12 17:13:54 +01001876 struct arm_smmu_device *smmu = cfg->smmu;
1877 struct arm_smmu_smr *smrs = smmu->smrs;
1878 struct iommu_group *group;
1879 int i, idx, ret;
1880
1881 mutex_lock(&smmu->stream_map_mutex);
1882 /* Figure out a viable stream map entry allocation */
Robin Murphy06e393e2016-09-12 17:13:55 +01001883 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001884 u16 sid = fwspec->ids[i];
1885 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1886
Robin Murphy6668f692016-09-12 17:13:54 +01001887 if (idx != INVALID_SMENDX) {
1888 ret = -EEXIST;
1889 goto out_err;
1890 }
1891
Robin Murphy7e96c742016-09-14 15:26:46 +01001892 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy6668f692016-09-12 17:13:54 +01001893 if (ret < 0)
1894 goto out_err;
1895
1896 idx = ret;
1897 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy7e96c742016-09-14 15:26:46 +01001898 smrs[idx].id = sid;
1899 smrs[idx].mask = mask;
Robin Murphy6668f692016-09-12 17:13:54 +01001900 smrs[idx].valid = true;
1901 }
1902 smmu->s2crs[idx].count++;
1903 cfg->smendx[i] = (s16)idx;
1904 }
1905
1906 group = iommu_group_get_for_dev(dev);
1907 if (!group)
1908 group = ERR_PTR(-ENOMEM);
1909 if (IS_ERR(group)) {
1910 ret = PTR_ERR(group);
1911 goto out_err;
1912 }
1913 iommu_group_put(group);
Robin Murphy468f4942016-09-12 17:13:49 +01001914
Will Deacon45ae7cf2013-06-24 18:31:25 +01001915 /* It worked! Now, poke the actual hardware */
Robin Murphy06e393e2016-09-12 17:13:55 +01001916 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001917 arm_smmu_write_sme(smmu, idx);
1918 smmu->s2crs[idx].group = group;
1919 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001920
Robin Murphy6668f692016-09-12 17:13:54 +01001921 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001922 return 0;
1923
Robin Murphy6668f692016-09-12 17:13:54 +01001924out_err:
Robin Murphy468f4942016-09-12 17:13:49 +01001925 while (i--) {
Robin Murphy6668f692016-09-12 17:13:54 +01001926 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy468f4942016-09-12 17:13:49 +01001927 cfg->smendx[i] = INVALID_SMENDX;
1928 }
Robin Murphy6668f692016-09-12 17:13:54 +01001929 mutex_unlock(&smmu->stream_map_mutex);
1930 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001931}
1932
Robin Murphy06e393e2016-09-12 17:13:55 +01001933static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001934{
Robin Murphy06e393e2016-09-12 17:13:55 +01001935 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1936 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy1fb519a2016-09-12 17:13:53 +01001937 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001938
Robin Murphy6668f692016-09-12 17:13:54 +01001939 mutex_lock(&smmu->stream_map_mutex);
Robin Murphy06e393e2016-09-12 17:13:55 +01001940 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01001941 if (arm_smmu_free_sme(smmu, idx))
1942 arm_smmu_write_sme(smmu, idx);
Robin Murphy468f4942016-09-12 17:13:49 +01001943 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001944 }
Robin Murphy6668f692016-09-12 17:13:54 +01001945 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001946}
1947
Will Deacon45ae7cf2013-06-24 18:31:25 +01001948static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphy06e393e2016-09-12 17:13:55 +01001949 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001950{
Will Deacon44680ee2014-06-25 11:29:12 +01001951 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphya754fd12016-09-12 17:13:50 +01001952 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1953 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1954 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy6668f692016-09-12 17:13:54 +01001955 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001956
Robin Murphy06e393e2016-09-12 17:13:55 +01001957 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphya754fd12016-09-12 17:13:50 +01001958 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy6668f692016-09-12 17:13:54 +01001959 continue;
Robin Murphya754fd12016-09-12 17:13:50 +01001960
1961 s2cr[idx].type = type;
1962 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1963 s2cr[idx].cbndx = cbndx;
1964 arm_smmu_write_s2cr(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001965 }
1966
1967 return 0;
1968}
1969
Patrick Daly09801312016-08-29 17:02:52 -07001970static void arm_smmu_detach_dev(struct iommu_domain *domain,
1971 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001972{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001973 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07001974 struct arm_smmu_device *smmu = smmu_domain->smmu;
Patrick Daly09801312016-08-29 17:02:52 -07001975 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07001976 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07001977
1978 if (dynamic)
1979 return;
1980
Patrick Daly09801312016-08-29 17:02:52 -07001981 if (!smmu) {
1982 dev_err(dev, "Domain not attached; cannot detach!\n");
1983 return;
1984 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001985
Patrick Daly8befb662016-08-17 20:03:28 -07001986 /* Remove additional vote for atomic power */
1987 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001988 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
1989 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001990 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001991}
1992
Patrick Dalye271f212016-10-04 13:24:49 -07001993static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07001994{
Patrick Dalye271f212016-10-04 13:24:49 -07001995 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001996 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
1997 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
1998 int source_vmid = VMID_HLOS;
1999 struct arm_smmu_pte_info *pte_info, *temp;
2000
Patrick Dalye271f212016-10-04 13:24:49 -07002001 if (!arm_smmu_is_domain_secure(smmu_domain))
2002 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002003
Patrick Dalye271f212016-10-04 13:24:49 -07002004 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002005 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2006 PAGE_SIZE, &source_vmid, 1,
2007 dest_vmids, dest_perms, 2);
2008 if (WARN_ON(ret))
2009 break;
2010 }
2011
2012 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2013 entry) {
2014 list_del(&pte_info->entry);
2015 kfree(pte_info);
2016 }
Patrick Dalye271f212016-10-04 13:24:49 -07002017 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002018}
2019
2020static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2021{
2022 int ret;
2023 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002024 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002025 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2026 struct arm_smmu_pte_info *pte_info, *temp;
2027
Patrick Dalye271f212016-10-04 13:24:49 -07002028 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002029 return;
2030
2031 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2032 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2033 PAGE_SIZE, source_vmlist, 2,
2034 &dest_vmids, &dest_perms, 1);
2035 if (WARN_ON(ret))
2036 break;
2037 free_pages_exact(pte_info->virt_addr, pte_info->size);
2038 }
2039
2040 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2041 entry) {
2042 list_del(&pte_info->entry);
2043 kfree(pte_info);
2044 }
2045}
2046
2047static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2048{
2049 struct arm_smmu_domain *smmu_domain = cookie;
2050 struct arm_smmu_pte_info *pte_info;
2051
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002052 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002053
2054 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2055 if (!pte_info)
2056 return;
2057
2058 pte_info->virt_addr = addr;
2059 pte_info->size = size;
2060 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2061}
2062
2063static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2064{
2065 struct arm_smmu_domain *smmu_domain = cookie;
2066 struct arm_smmu_pte_info *pte_info;
2067
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002068 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002069
2070 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2071 if (!pte_info)
2072 return -ENOMEM;
2073 pte_info->virt_addr = addr;
2074 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2075 return 0;
2076}
2077
Will Deacon45ae7cf2013-06-24 18:31:25 +01002078static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2079{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002080 int ret;
Robin Murphy06e393e2016-09-12 17:13:55 +01002081 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Will Deacon518f7132014-11-14 17:17:54 +00002082 struct arm_smmu_device *smmu;
Robin Murphy06e393e2016-09-12 17:13:55 +01002083 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly8befb662016-08-17 20:03:28 -07002084 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002085
Robin Murphy06e393e2016-09-12 17:13:55 +01002086 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002087 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2088 return -ENXIO;
2089 }
Robin Murphy06e393e2016-09-12 17:13:55 +01002090
Robin Murphy4f79b142016-10-17 12:06:21 +01002091 /*
2092 * FIXME: The arch/arm DMA API code tries to attach devices to its own
2093 * domains between of_xlate() and add_device() - we have no way to cope
2094 * with that, so until ARM gets converted to rely on groups and default
2095 * domains, just say no (but more politely than by dereferencing NULL).
2096 * This should be at least a WARN_ON once that's sorted.
2097 */
2098 if (!fwspec->iommu_priv)
2099 return -ENODEV;
2100
Robin Murphy06e393e2016-09-12 17:13:55 +01002101 smmu = fwspec_smmu(fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002102
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002103 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002104 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002105 if (ret)
2106 return ret;
2107
Will Deacon518f7132014-11-14 17:17:54 +00002108 /* Ensure that the domain is finalised */
Robin Murphy06e393e2016-09-12 17:13:55 +01002109 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002110 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002111 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002112
Patrick Dalyc190d932016-08-30 17:23:28 -07002113 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002114 if (is_dynamic_domain(domain)) {
2115 ret = 0;
2116 goto out_power_off;
2117 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002118
Will Deacon45ae7cf2013-06-24 18:31:25 +01002119 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002120 * Sanity check the domain. We don't support domains across
2121 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002122 */
Robin Murphy06e393e2016-09-12 17:13:55 +01002123 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002124 dev_err(dev,
2125 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002126 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002127 ret = -EINVAL;
2128 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002129 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002130
2131 /* Looks ok, so add the device to the domain */
Robin Murphy06e393e2016-09-12 17:13:55 +01002132 ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002133
2134out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002135 /*
2136 * Keep an additional vote for non-atomic power until domain is
2137 * detached
2138 */
2139 if (!ret && atomic_domain) {
2140 WARN_ON(arm_smmu_power_on(smmu->pwr));
2141 arm_smmu_power_off_atomic(smmu->pwr);
2142 }
2143
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002144 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002145
Will Deacon45ae7cf2013-06-24 18:31:25 +01002146 return ret;
2147}
2148
Will Deacon45ae7cf2013-06-24 18:31:25 +01002149static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002150 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002151{
Will Deacon518f7132014-11-14 17:17:54 +00002152 int ret;
2153 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002154 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002155 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002156
Will Deacon518f7132014-11-14 17:17:54 +00002157 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002158 return -ENODEV;
2159
Patrick Dalye271f212016-10-04 13:24:49 -07002160 arm_smmu_secure_domain_lock(smmu_domain);
2161
Will Deacon518f7132014-11-14 17:17:54 +00002162 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2163 ret = ops->map(ops, iova, paddr, size, prot);
2164 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002165
2166 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002167 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002168
Will Deacon518f7132014-11-14 17:17:54 +00002169 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002170}
2171
2172static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2173 size_t size)
2174{
Will Deacon518f7132014-11-14 17:17:54 +00002175 size_t ret;
2176 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002177 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002178 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002179
Will Deacon518f7132014-11-14 17:17:54 +00002180 if (!ops)
2181 return 0;
2182
Patrick Daly8befb662016-08-17 20:03:28 -07002183 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002184 if (ret)
2185 return ret;
2186
Patrick Dalye271f212016-10-04 13:24:49 -07002187 arm_smmu_secure_domain_lock(smmu_domain);
2188
Will Deacon518f7132014-11-14 17:17:54 +00002189 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2190 ret = ops->unmap(ops, iova, size);
2191 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002192
Patrick Daly8befb662016-08-17 20:03:28 -07002193 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002194 /*
2195 * While splitting up block mappings, we might allocate page table
2196 * memory during unmap, so the vmids needs to be assigned to the
2197 * memory here as well.
2198 */
2199 arm_smmu_assign_table(smmu_domain);
2200 /* Also unassign any pages that were free'd during unmap */
2201 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002202 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002203 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002204}
2205
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002206static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2207 struct scatterlist *sg, unsigned int nents, int prot)
2208{
2209 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002210 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002211 unsigned long flags;
2212 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2213 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2214
2215 if (!ops)
2216 return -ENODEV;
2217
Patrick Daly8befb662016-08-17 20:03:28 -07002218 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002219 if (ret)
2220 return ret;
2221
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002222 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002223 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002224 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002225
2226 if (!ret)
2227 arm_smmu_unmap(domain, iova, size);
2228
Patrick Daly8befb662016-08-17 20:03:28 -07002229 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002230 arm_smmu_assign_table(smmu_domain);
2231
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002232 return ret;
2233}
2234
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002235static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002236 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002237{
Joerg Roedel1d672632015-03-26 13:43:10 +01002238 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002239 struct arm_smmu_device *smmu = smmu_domain->smmu;
2240 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2241 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2242 struct device *dev = smmu->dev;
2243 void __iomem *cb_base;
2244 u32 tmp;
2245 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002246 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002247
2248 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2249
Robin Murphy661d9622015-05-27 17:09:34 +01002250 /* ATS1 registers can only be written atomically */
2251 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002252 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002253 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2254 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002255 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002256
2257 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2258 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002259 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002260 dev_err(dev,
2261 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2262 &iova, &phys);
2263 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002264 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002265 }
2266
Robin Murphyf9a05f02016-04-13 18:13:01 +01002267 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002268 if (phys & CB_PAR_F) {
2269 dev_err(dev, "translation fault!\n");
2270 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002271 phys = 0;
2272 } else {
2273 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002274 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002275
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002276 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002277}
2278
Will Deacon45ae7cf2013-06-24 18:31:25 +01002279static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002280 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002281{
Will Deacon518f7132014-11-14 17:17:54 +00002282 phys_addr_t ret;
2283 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002284 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002285 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002286
Will Deacon518f7132014-11-14 17:17:54 +00002287 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002288 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002289
Will Deacon518f7132014-11-14 17:17:54 +00002290 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002291 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002292 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002293
Will Deacon518f7132014-11-14 17:17:54 +00002294 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002295}
2296
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002297/*
2298 * This function can sleep, and cannot be called from atomic context. Will
2299 * power on register block if required. This restriction does not apply to the
2300 * original iova_to_phys() op.
2301 */
2302static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2303 dma_addr_t iova)
2304{
2305 phys_addr_t ret = 0;
2306 unsigned long flags;
2307 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002308
Patrick Dalyad441dd2016-09-15 15:50:46 -07002309 if (smmu_domain->smmu->arch_ops &&
2310 smmu_domain->smmu->arch_ops->iova_to_phys_hard)
2311 return smmu_domain->smmu->arch_ops->iova_to_phys_hard(
2312 domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002313
2314 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2315 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2316 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002317 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002318
2319 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2320
2321 return ret;
2322}
2323
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002324static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002325{
Will Deacond0948942014-06-24 17:30:10 +01002326 switch (cap) {
2327 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002328 /*
2329 * Return true here as the SMMU can always send out coherent
2330 * requests.
2331 */
2332 return true;
Will Deacond0948942014-06-24 17:30:10 +01002333 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002334 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002335 case IOMMU_CAP_NOEXEC:
2336 return true;
Will Deacond0948942014-06-24 17:30:10 +01002337 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002338 return false;
Will Deacond0948942014-06-24 17:30:10 +01002339 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002340}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002341
Robin Murphy7e96c742016-09-14 15:26:46 +01002342static int arm_smmu_match_node(struct device *dev, void *data)
2343{
2344 return dev->of_node == data;
2345}
2346
2347static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
2348{
2349 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2350 np, arm_smmu_match_node);
2351 put_device(dev);
2352 return dev ? dev_get_drvdata(dev) : NULL;
2353}
2354
Will Deacon03edb222015-01-19 14:27:33 +00002355static int arm_smmu_add_device(struct device *dev)
2356{
Robin Murphy06e393e2016-09-12 17:13:55 +01002357 struct arm_smmu_device *smmu;
Robin Murphyd5b41782016-09-14 15:21:39 +01002358 struct arm_smmu_master_cfg *cfg;
Robin Murphy7e96c742016-09-14 15:26:46 +01002359 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyd5b41782016-09-14 15:21:39 +01002360 int i, ret;
2361
Robin Murphy7e96c742016-09-14 15:26:46 +01002362 if (using_legacy_binding) {
2363 ret = arm_smmu_register_legacy_master(dev, &smmu);
2364 fwspec = dev->iommu_fwspec;
2365 if (ret)
2366 goto out_free;
Robin Murphy22e6f6c2016-11-02 17:31:32 +00002367 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Robin Murphy7e96c742016-09-14 15:26:46 +01002368 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
2369 if (!smmu)
2370 return -ENODEV;
2371 } else {
2372 return -ENODEV;
2373 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002374
2375 ret = -EINVAL;
Robin Murphy06e393e2016-09-12 17:13:55 +01002376 for (i = 0; i < fwspec->num_ids; i++) {
2377 u16 sid = fwspec->ids[i];
Robin Murphy7e96c742016-09-14 15:26:46 +01002378 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyd5b41782016-09-14 15:21:39 +01002379
Robin Murphy06e393e2016-09-12 17:13:55 +01002380 if (sid & ~smmu->streamid_mask) {
Robin Murphyd5b41782016-09-14 15:21:39 +01002381 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy06e393e2016-09-12 17:13:55 +01002382 sid, smmu->streamid_mask);
Robin Murphyd5b41782016-09-14 15:21:39 +01002383 goto out_free;
2384 }
Robin Murphy7e96c742016-09-14 15:26:46 +01002385 if (mask & ~smmu->smr_mask_mask) {
2386 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
2387 sid, smmu->smr_mask_mask);
2388 goto out_free;
2389 }
Robin Murphyd5b41782016-09-14 15:21:39 +01002390 }
Will Deacon03edb222015-01-19 14:27:33 +00002391
Robin Murphy06e393e2016-09-12 17:13:55 +01002392 ret = -ENOMEM;
2393 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
2394 GFP_KERNEL);
2395 if (!cfg)
2396 goto out_free;
2397
2398 cfg->smmu = smmu;
2399 fwspec->iommu_priv = cfg;
2400 while (i--)
2401 cfg->smendx[i] = INVALID_SMENDX;
2402
Robin Murphy6668f692016-09-12 17:13:54 +01002403 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002404 if (ret)
2405 goto out_free;
2406
2407 return 0;
Robin Murphyd5b41782016-09-14 15:21:39 +01002408
2409out_free:
Robin Murphy06e393e2016-09-12 17:13:55 +01002410 if (fwspec)
2411 kfree(fwspec->iommu_priv);
2412 iommu_fwspec_free(dev);
Robin Murphyd5b41782016-09-14 15:21:39 +01002413 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00002414}
2415
Will Deacon45ae7cf2013-06-24 18:31:25 +01002416static void arm_smmu_remove_device(struct device *dev)
2417{
Robin Murphy06e393e2016-09-12 17:13:55 +01002418 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphya754fd12016-09-12 17:13:50 +01002419
Robin Murphy06e393e2016-09-12 17:13:55 +01002420 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyd5b41782016-09-14 15:21:39 +01002421 return;
Robin Murphya754fd12016-09-12 17:13:50 +01002422
Robin Murphy06e393e2016-09-12 17:13:55 +01002423 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002424 iommu_group_remove_device(dev);
Robin Murphy06e393e2016-09-12 17:13:55 +01002425 kfree(fwspec->iommu_priv);
2426 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002427}
2428
Joerg Roedelaf659932015-10-21 23:51:41 +02002429static struct iommu_group *arm_smmu_device_group(struct device *dev)
2430{
Robin Murphy06e393e2016-09-12 17:13:55 +01002431 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2432 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy6668f692016-09-12 17:13:54 +01002433 struct iommu_group *group = NULL;
2434 int i, idx;
2435
Robin Murphy06e393e2016-09-12 17:13:55 +01002436 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy6668f692016-09-12 17:13:54 +01002437 if (group && smmu->s2crs[idx].group &&
2438 group != smmu->s2crs[idx].group)
2439 return ERR_PTR(-EINVAL);
2440
2441 group = smmu->s2crs[idx].group;
2442 }
2443
2444 if (group)
2445 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02002446
2447 if (dev_is_pci(dev))
2448 group = pci_device_group(dev);
2449 else
2450 group = generic_device_group(dev);
2451
Joerg Roedelaf659932015-10-21 23:51:41 +02002452 return group;
2453}
2454
Will Deaconc752ce42014-06-25 22:46:31 +01002455static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2456 enum iommu_attr attr, void *data)
2457{
Joerg Roedel1d672632015-03-26 13:43:10 +01002458 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002459 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002460
2461 switch (attr) {
2462 case DOMAIN_ATTR_NESTING:
2463 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2464 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002465 case DOMAIN_ATTR_PT_BASE_ADDR:
2466 *((phys_addr_t *)data) =
2467 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2468 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002469 case DOMAIN_ATTR_CONTEXT_BANK:
2470 /* context bank index isn't valid until we are attached */
2471 if (smmu_domain->smmu == NULL)
2472 return -ENODEV;
2473
2474 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2475 ret = 0;
2476 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002477 case DOMAIN_ATTR_TTBR0: {
2478 u64 val;
2479 struct arm_smmu_device *smmu = smmu_domain->smmu;
2480 /* not valid until we are attached */
2481 if (smmu == NULL)
2482 return -ENODEV;
2483
2484 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2485 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2486 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2487 << (TTBRn_ASID_SHIFT);
2488 *((u64 *)data) = val;
2489 ret = 0;
2490 break;
2491 }
2492 case DOMAIN_ATTR_CONTEXTIDR:
2493 /* not valid until attached */
2494 if (smmu_domain->smmu == NULL)
2495 return -ENODEV;
2496 *((u32 *)data) = smmu_domain->cfg.procid;
2497 ret = 0;
2498 break;
2499 case DOMAIN_ATTR_PROCID:
2500 *((u32 *)data) = smmu_domain->cfg.procid;
2501 ret = 0;
2502 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002503 case DOMAIN_ATTR_DYNAMIC:
2504 *((int *)data) = !!(smmu_domain->attributes
2505 & (1 << DOMAIN_ATTR_DYNAMIC));
2506 ret = 0;
2507 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002508 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2509 *((int *)data) = !!(smmu_domain->attributes
2510 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2511 ret = 0;
2512 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002513 case DOMAIN_ATTR_S1_BYPASS:
2514 *((int *)data) = !!(smmu_domain->attributes
2515 & (1 << DOMAIN_ATTR_S1_BYPASS));
2516 ret = 0;
2517 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002518 case DOMAIN_ATTR_SECURE_VMID:
2519 *((int *)data) = smmu_domain->secure_vmid;
2520 ret = 0;
2521 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002522 case DOMAIN_ATTR_PGTBL_INFO: {
2523 struct iommu_pgtbl_info *info = data;
2524
2525 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2526 ret = -ENODEV;
2527 break;
2528 }
2529 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2530 ret = 0;
2531 break;
2532 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002533 case DOMAIN_ATTR_FAST:
2534 *((int *)data) = !!(smmu_domain->attributes
2535 & (1 << DOMAIN_ATTR_FAST));
2536 ret = 0;
2537 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002538 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2539 *((int *)data) = !!(smmu_domain->attributes &
2540 (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
2541 ret = 0;
2542 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002543 default:
2544 return -ENODEV;
2545 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002546 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002547}
2548
2549static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2550 enum iommu_attr attr, void *data)
2551{
Will Deacon518f7132014-11-14 17:17:54 +00002552 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002553 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002554
Will Deacon518f7132014-11-14 17:17:54 +00002555 mutex_lock(&smmu_domain->init_mutex);
2556
Will Deaconc752ce42014-06-25 22:46:31 +01002557 switch (attr) {
2558 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002559 if (smmu_domain->smmu) {
2560 ret = -EPERM;
2561 goto out_unlock;
2562 }
2563
Will Deaconc752ce42014-06-25 22:46:31 +01002564 if (*(int *)data)
2565 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2566 else
2567 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2568
Will Deacon518f7132014-11-14 17:17:54 +00002569 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002570 case DOMAIN_ATTR_PROCID:
2571 if (smmu_domain->smmu != NULL) {
2572 dev_err(smmu_domain->smmu->dev,
2573 "cannot change procid attribute while attached\n");
2574 ret = -EBUSY;
2575 break;
2576 }
2577 smmu_domain->cfg.procid = *((u32 *)data);
2578 ret = 0;
2579 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002580 case DOMAIN_ATTR_DYNAMIC: {
2581 int dynamic = *((int *)data);
2582
2583 if (smmu_domain->smmu != NULL) {
2584 dev_err(smmu_domain->smmu->dev,
2585 "cannot change dynamic attribute while attached\n");
2586 ret = -EBUSY;
2587 break;
2588 }
2589
2590 if (dynamic)
2591 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2592 else
2593 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2594 ret = 0;
2595 break;
2596 }
2597 case DOMAIN_ATTR_CONTEXT_BANK:
2598 /* context bank can't be set while attached */
2599 if (smmu_domain->smmu != NULL) {
2600 ret = -EBUSY;
2601 break;
2602 }
2603 /* ... and it can only be set for dynamic contexts. */
2604 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2605 ret = -EINVAL;
2606 break;
2607 }
2608
2609 /* this will be validated during attach */
2610 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2611 ret = 0;
2612 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002613 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2614 u32 non_fatal_faults = *((int *)data);
2615
2616 if (non_fatal_faults)
2617 smmu_domain->attributes |=
2618 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2619 else
2620 smmu_domain->attributes &=
2621 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2622 ret = 0;
2623 break;
2624 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002625 case DOMAIN_ATTR_S1_BYPASS: {
2626 int bypass = *((int *)data);
2627
2628 /* bypass can't be changed while attached */
2629 if (smmu_domain->smmu != NULL) {
2630 ret = -EBUSY;
2631 break;
2632 }
2633 if (bypass)
2634 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2635 else
2636 smmu_domain->attributes &=
2637 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2638
2639 ret = 0;
2640 break;
2641 }
Patrick Daly8befb662016-08-17 20:03:28 -07002642 case DOMAIN_ATTR_ATOMIC:
2643 {
2644 int atomic_ctx = *((int *)data);
2645
2646 /* can't be changed while attached */
2647 if (smmu_domain->smmu != NULL) {
2648 ret = -EBUSY;
2649 break;
2650 }
2651 if (atomic_ctx)
2652 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2653 else
2654 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2655 break;
2656 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002657 case DOMAIN_ATTR_SECURE_VMID:
2658 if (smmu_domain->secure_vmid != VMID_INVAL) {
2659 ret = -ENODEV;
2660 WARN(1, "secure vmid already set!");
2661 break;
2662 }
2663 smmu_domain->secure_vmid = *((int *)data);
2664 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002665 case DOMAIN_ATTR_FAST:
2666 if (*((int *)data))
2667 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2668 ret = 0;
2669 break;
Patrick Dalyce6786f2016-11-09 14:19:23 -08002670 case DOMAIN_ATTR_USE_UPSTREAM_HINT:
2671 /* can't be changed while attached */
2672 if (smmu_domain->smmu != NULL) {
2673 ret = -EBUSY;
2674 break;
2675 }
2676 if (*((int *)data))
2677 smmu_domain->attributes |=
2678 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
2679 ret = 0;
2680 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002681 default:
Will Deacon518f7132014-11-14 17:17:54 +00002682 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002683 }
Will Deacon518f7132014-11-14 17:17:54 +00002684
2685out_unlock:
2686 mutex_unlock(&smmu_domain->init_mutex);
2687 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002688}
2689
Robin Murphy7e96c742016-09-14 15:26:46 +01002690static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
2691{
2692 u32 fwid = 0;
2693
2694 if (args->args_count > 0)
2695 fwid |= (u16)args->args[0];
2696
2697 if (args->args_count > 1)
2698 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
2699
2700 return iommu_fwspec_add_ids(dev, &fwid, 1);
2701}
2702
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002703static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2704 unsigned long flags)
2705{
2706 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2707 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2708 struct arm_smmu_device *smmu;
2709 void __iomem *cb_base;
2710
2711 if (!smmu_domain->smmu) {
2712 pr_err("Can't trigger faults on non-attached domains\n");
2713 return;
2714 }
2715
2716 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002717 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002718 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002719
2720 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2721 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2722 flags, cfg->cbndx);
2723 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002724 /* give the interrupt time to fire... */
2725 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002726
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002727 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002728}
2729
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002730static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2731 unsigned long offset)
2732{
2733 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2734 struct arm_smmu_device *smmu;
2735 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2736 void __iomem *cb_base;
2737 unsigned long val;
2738
2739 if (offset >= SZ_4K) {
2740 pr_err("Invalid offset: 0x%lx\n", offset);
2741 return 0;
2742 }
2743
2744 smmu = smmu_domain->smmu;
2745 if (!smmu) {
2746 WARN(1, "Can't read registers of a detached domain\n");
2747 val = 0;
2748 return val;
2749 }
2750
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002751 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002752 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002753
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002754 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2755 val = readl_relaxed(cb_base + offset);
2756
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002757 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002758 return val;
2759}
2760
2761static void arm_smmu_reg_write(struct iommu_domain *domain,
2762 unsigned long offset, unsigned long val)
2763{
2764 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2765 struct arm_smmu_device *smmu;
2766 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2767 void __iomem *cb_base;
2768
2769 if (offset >= SZ_4K) {
2770 pr_err("Invalid offset: 0x%lx\n", offset);
2771 return;
2772 }
2773
2774 smmu = smmu_domain->smmu;
2775 if (!smmu) {
2776 WARN(1, "Can't read registers of a detached domain\n");
2777 return;
2778 }
2779
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002780 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002781 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002782
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002783 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2784 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002785
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002786 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002787}
2788
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002789static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
2790{
2791 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
2792}
2793
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002794static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
2795{
2796 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2797
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002798 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002799}
2800
2801static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
2802{
2803 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2804
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002805 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002806}
2807
Will Deacon518f7132014-11-14 17:17:54 +00002808static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002809 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002810 .domain_alloc = arm_smmu_domain_alloc,
2811 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002812 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002813 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002814 .map = arm_smmu_map,
2815 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002816 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002817 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002818 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002819 .add_device = arm_smmu_add_device,
2820 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002821 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002822 .domain_get_attr = arm_smmu_domain_get_attr,
2823 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy7e96c742016-09-14 15:26:46 +01002824 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00002825 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002826 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002827 .reg_read = arm_smmu_reg_read,
2828 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002829 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002830 .enable_config_clocks = arm_smmu_enable_config_clocks,
2831 .disable_config_clocks = arm_smmu_disable_config_clocks,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002832};
2833
Patrick Dalyad441dd2016-09-15 15:50:46 -07002834#define IMPL_DEF1_MICRO_MMU_CTRL 0
2835#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
2836#define MICRO_MMU_CTRL_IDLE (1 << 3)
2837
2838/* Definitions for implementation-defined registers */
2839#define ACTLR_QCOM_OSH_SHIFT 28
2840#define ACTLR_QCOM_OSH 1
2841
2842#define ACTLR_QCOM_ISH_SHIFT 29
2843#define ACTLR_QCOM_ISH 1
2844
2845#define ACTLR_QCOM_NSH_SHIFT 30
2846#define ACTLR_QCOM_NSH 1
2847
2848static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002849{
2850 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002851 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002852
2853 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2854 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2855 0, 30000)) {
2856 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2857 return -EBUSY;
2858 }
2859
2860 return 0;
2861}
2862
Patrick Dalyad441dd2016-09-15 15:50:46 -07002863static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002864{
2865 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2866 u32 reg;
2867
2868 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2869 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2870 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2871
Patrick Dalyad441dd2016-09-15 15:50:46 -07002872 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002873}
2874
Patrick Dalyad441dd2016-09-15 15:50:46 -07002875static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002876{
Patrick Dalyad441dd2016-09-15 15:50:46 -07002877 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002878}
2879
Patrick Dalyad441dd2016-09-15 15:50:46 -07002880static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002881{
Patrick Dalyad441dd2016-09-15 15:50:46 -07002882 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002883}
2884
Patrick Dalyad441dd2016-09-15 15:50:46 -07002885static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002886{
2887 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2888 u32 reg;
2889
2890 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2891 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2892 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2893}
2894
Patrick Dalyad441dd2016-09-15 15:50:46 -07002895static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002896{
2897 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002898 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002899 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002900 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002901
Patrick Dalyad441dd2016-09-15 15:50:46 -07002902 /*
2903 * SCTLR.M must be disabled here per ARM SMMUv2 spec
2904 * to prevent table walks with an inconsistent state.
2905 */
2906 for (i = 0; i < smmu->num_context_banks; ++i) {
2907 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2908 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2909 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2910 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2911 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
2912 }
2913
2914 /* Program implementation defined registers */
2915 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002916 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2917 writel_relaxed(regs[i].value,
2918 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002919 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002920}
2921
Patrick Dalyad441dd2016-09-15 15:50:46 -07002922static phys_addr_t __qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
2923 dma_addr_t iova, bool halt)
2924{
2925 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2926 struct arm_smmu_device *smmu = smmu_domain->smmu;
2927 int ret;
2928 phys_addr_t phys = 0;
2929 unsigned long flags;
2930
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002931 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002932 if (ret)
2933 return 0;
2934
2935 if (halt) {
2936 ret = qsmmuv2_halt(smmu);
2937 if (ret)
2938 goto out_power_off;
2939 }
2940
2941 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2942 spin_lock(&smmu->atos_lock);
2943 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
2944 spin_unlock(&smmu->atos_lock);
2945 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2946
2947 if (halt)
2948 qsmmuv2_resume(smmu);
2949
2950out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002951 arm_smmu_power_off(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002952 return phys;
2953}
2954
2955static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
2956 dma_addr_t iova)
2957{
2958 return __qsmmuv2_iova_to_phys_hard(domain, iova, true);
2959}
2960
2961static void qsmmuv2_iova_to_phys_fault(
2962 struct iommu_domain *domain,
2963 dma_addr_t iova, phys_addr_t *phys,
2964 phys_addr_t *phys_post_tlbiall)
2965{
2966 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2967 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2968 struct arm_smmu_device *smmu;
2969 void __iomem *cb_base;
2970 u64 sctlr, sctlr_orig;
2971 u32 fsr;
2972
2973 smmu = smmu_domain->smmu;
2974 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2975
2976 qsmmuv2_halt_nowait(smmu);
2977
2978 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
2979
2980 qsmmuv2_wait_for_halt(smmu);
2981
2982 /* clear FSR to allow ATOS to log any faults */
2983 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
2984 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
2985
2986 /* disable stall mode momentarily */
2987 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
2988 sctlr = sctlr_orig & ~SCTLR_CFCFG;
2989 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
2990
2991 *phys = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
2992 arm_smmu_tlb_inv_context(smmu_domain);
2993 *phys_post_tlbiall = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
2994
2995 /* restore SCTLR */
2996 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
2997
2998 qsmmuv2_resume(smmu);
2999}
3000
3001struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3002 .device_reset = qsmmuv2_device_reset,
3003 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
3004 .iova_to_phys_fault = qsmmuv2_iova_to_phys_fault,
3005};
3006
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003007static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003008{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003009 int i;
3010 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003011 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003012 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003013
Peng Fan3ca37122016-05-03 21:50:30 +08003014 /*
3015 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3016 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3017 * bit is only present in MMU-500r2 onwards.
3018 */
3019 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3020 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3021 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3022 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3023 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3024 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3025 }
3026
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003027 /* Make sure all context banks are disabled and clear CB_FSR */
3028 for (i = 0; i < smmu->num_context_banks; ++i) {
3029 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3030 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3031 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003032 /*
3033 * Disable MMU-500's not-particularly-beneficial next-page
3034 * prefetcher for the sake of errata #841119 and #826419.
3035 */
3036 if (smmu->model == ARM_MMU500) {
3037 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3038 reg &= ~ARM_MMU500_ACTLR_CPRE;
3039 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3040 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003041 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003042}
3043
3044static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3045{
3046 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy468f4942016-09-12 17:13:49 +01003047 int i;
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003048 u32 reg;
3049
3050 /* clear global FSR */
3051 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3052 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3053
Robin Murphy468f4942016-09-12 17:13:49 +01003054 /*
3055 * Reset stream mapping groups: Initial values mark all SMRn as
3056 * invalid and all S2CRn as bypass unless overridden.
3057 */
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003058 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
Robin Murphya754fd12016-09-12 17:13:50 +01003059 for (i = 0; i < smmu->num_mapping_groups; ++i)
3060 arm_smmu_write_sme(smmu, i);
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003061
3062 arm_smmu_context_bank_reset(smmu);
3063 }
Will Deacon1463fe42013-07-31 19:21:27 +01003064
Will Deacon45ae7cf2013-06-24 18:31:25 +01003065 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003066 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3067 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3068
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003069 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003070
Will Deacon45ae7cf2013-06-24 18:31:25 +01003071 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003072 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003073
3074 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003075 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003076
Robin Murphy25a1c962016-02-10 14:25:33 +00003077 /* Enable client access, handling unmatched streams as appropriate */
3078 reg &= ~sCR0_CLIENTPD;
3079 if (disable_bypass)
3080 reg |= sCR0_USFCFG;
3081 else
3082 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003083
3084 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003085 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003086
3087 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003088 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003089
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003090 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3091 reg |= sCR0_VMID16EN;
3092
Will Deacon45ae7cf2013-06-24 18:31:25 +01003093 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003094 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003095 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003096
3097 /* Manage any implementation defined features */
3098 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003099}
3100
3101static int arm_smmu_id_size_to_bits(int size)
3102{
3103 switch (size) {
3104 case 0:
3105 return 32;
3106 case 1:
3107 return 36;
3108 case 2:
3109 return 40;
3110 case 3:
3111 return 42;
3112 case 4:
3113 return 44;
3114 case 5:
3115 default:
3116 return 48;
3117 }
3118}
3119
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003120static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3121{
3122 struct device *dev = smmu->dev;
3123 int i, ntuples, ret;
3124 u32 *tuples;
3125 struct arm_smmu_impl_def_reg *regs, *regit;
3126
3127 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3128 return 0;
3129
3130 ntuples /= sizeof(u32);
3131 if (ntuples % 2) {
3132 dev_err(dev,
3133 "Invalid number of attach-impl-defs registers: %d\n",
3134 ntuples);
3135 return -EINVAL;
3136 }
3137
3138 regs = devm_kmalloc(
3139 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3140 GFP_KERNEL);
3141 if (!regs)
3142 return -ENOMEM;
3143
3144 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3145 if (!tuples)
3146 return -ENOMEM;
3147
3148 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3149 tuples, ntuples);
3150 if (ret)
3151 return ret;
3152
3153 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3154 regit->offset = tuples[i];
3155 regit->value = tuples[i + 1];
3156 }
3157
3158 devm_kfree(dev, tuples);
3159
3160 smmu->impl_def_attach_registers = regs;
3161 smmu->num_impl_def_attach_registers = ntuples / 2;
3162
3163 return 0;
3164}
3165
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003166
3167static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003168{
3169 const char *cname;
3170 struct property *prop;
3171 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003172 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003173
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003174 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003175 of_property_count_strings(dev->of_node, "clock-names");
3176
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003177 if (pwr->num_clocks < 1) {
3178 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003179 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003180 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003181
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003182 pwr->clocks = devm_kzalloc(
3183 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003184 GFP_KERNEL);
3185
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003186 if (!pwr->clocks)
3187 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003188
3189 i = 0;
3190 of_property_for_each_string(dev->of_node, "clock-names",
3191 prop, cname) {
3192 struct clk *c = devm_clk_get(dev, cname);
3193
3194 if (IS_ERR(c)) {
3195 dev_err(dev, "Couldn't get clock: %s",
3196 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003197 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003198 }
3199
3200 if (clk_get_rate(c) == 0) {
3201 long rate = clk_round_rate(c, 1000);
3202
3203 clk_set_rate(c, rate);
3204 }
3205
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003206 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003207
3208 ++i;
3209 }
3210 return 0;
3211}
3212
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003213static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003214{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003215 const char *cname;
3216 struct property *prop;
3217 int i, ret = 0;
3218 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003219
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003220 pwr->num_gdscs =
3221 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3222
3223 if (pwr->num_gdscs < 1) {
3224 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003225 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003226 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003227
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003228 pwr->gdscs = devm_kzalloc(
3229 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3230
3231 if (!pwr->gdscs)
3232 return -ENOMEM;
3233
3234 i = 0;
3235 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3236 prop, cname)
3237 pwr->gdscs[i].supply = cname;
3238
3239 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3240 return ret;
3241}
3242
3243static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3244{
3245 struct device *dev = pwr->dev;
3246
3247 /* We don't want the bus APIs to print an error message */
3248 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3249 dev_dbg(dev, "No bus scaling info\n");
3250 return 0;
3251 }
3252
3253 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3254 if (!pwr->bus_dt_data) {
3255 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3256 return -EINVAL;
3257 }
3258
3259 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3260 if (!pwr->bus_client) {
3261 dev_err(dev, "Bus client registration failed\n");
3262 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
3263 return -EINVAL;
3264 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003265
3266 return 0;
3267}
3268
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003269/*
3270 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3271 */
3272static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3273 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003274{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003275 struct arm_smmu_power_resources *pwr;
3276 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003277
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003278 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3279 if (!pwr)
3280 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003281
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003282 pwr->dev = &pdev->dev;
3283 pwr->pdev = pdev;
3284 mutex_init(&pwr->power_lock);
3285 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003286
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003287 ret = arm_smmu_init_clocks(pwr);
3288 if (ret)
3289 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003290
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003291 ret = arm_smmu_init_regulators(pwr);
3292 if (ret)
3293 return ERR_PTR(ret);
3294
3295 ret = arm_smmu_init_bus_scaling(pwr);
3296 if (ret)
3297 return ERR_PTR(ret);
3298
3299 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003300}
3301
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003302/*
3303 * Bus APIs are not devm-safe.
3304 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003305static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003306{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003307 msm_bus_scale_unregister_client(pwr->bus_client);
3308 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003309}
3310
Will Deacon45ae7cf2013-06-24 18:31:25 +01003311static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3312{
3313 unsigned long size;
3314 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3315 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003316 bool cttw_dt, cttw_reg;
Robin Murphya754fd12016-09-12 17:13:50 +01003317 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003318
Mitchel Humpherysba822582015-10-20 11:37:41 -07003319 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3320 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003321 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003322
3323 /* ID0 */
3324 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003325
3326 /* Restrict available stages based on module parameter */
3327 if (force_stage == 1)
3328 id &= ~(ID0_S2TS | ID0_NTS);
3329 else if (force_stage == 2)
3330 id &= ~(ID0_S1TS | ID0_NTS);
3331
Will Deacon45ae7cf2013-06-24 18:31:25 +01003332 if (id & ID0_S1TS) {
3333 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003334 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003335 }
3336
3337 if (id & ID0_S2TS) {
3338 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003339 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003340 }
3341
3342 if (id & ID0_NTS) {
3343 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003344 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003345 }
3346
3347 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003348 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003349 dev_err(smmu->dev, "\tno translation support!\n");
3350 return -ENODEV;
3351 }
3352
Robin Murphyb7862e32016-04-13 18:13:03 +01003353 if ((id & ID0_S1TS) &&
3354 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003355 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003356 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003357 }
3358
Robin Murphybae2c2d2015-07-29 19:46:05 +01003359 /*
3360 * In order for DMA API calls to work properly, we must defer to what
3361 * the DT says about coherency, regardless of what the hardware claims.
3362 * Fortunately, this also opens up a workaround for systems where the
3363 * ID register value has ended up configured incorrectly.
3364 */
3365 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3366 cttw_reg = !!(id & ID0_CTTW);
3367 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003368 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003369 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003370 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003371 cttw_dt ? "" : "non-");
3372 if (cttw_dt != cttw_reg)
3373 dev_notice(smmu->dev,
3374 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003375
Robin Murphy53867802016-09-12 17:13:48 +01003376 /* Max. number of entries we have for stream matching/indexing */
3377 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
3378 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003379 if (id & ID0_SMS) {
Robin Murphy53867802016-09-12 17:13:48 +01003380 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003381
3382 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy53867802016-09-12 17:13:48 +01003383 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
3384 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003385 dev_err(smmu->dev,
3386 "stream-matching supported, but no SMRs present!\n");
3387 return -ENODEV;
3388 }
3389
Robin Murphy53867802016-09-12 17:13:48 +01003390 /*
3391 * SMR.ID bits may not be preserved if the corresponding MASK
3392 * bits are set, so check each one separately. We can reject
3393 * masters later if they try to claim IDs outside these masks.
3394 */
3395 smr = smmu->streamid_mask << SMR_ID_SHIFT;
3396 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3397 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3398 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003399
Robin Murphy53867802016-09-12 17:13:48 +01003400 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
3401 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3402 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
3403 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Dhaval Patel031d7462015-05-09 14:47:29 -07003404
Robin Murphy468f4942016-09-12 17:13:49 +01003405 /* Zero-initialised to mark as invalid */
3406 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
3407 GFP_KERNEL);
3408 if (!smmu->smrs)
3409 return -ENOMEM;
3410
Robin Murphy53867802016-09-12 17:13:48 +01003411 dev_notice(smmu->dev,
3412 "\tstream matching with %lu register groups, mask 0x%x",
3413 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003414 }
Robin Murphya754fd12016-09-12 17:13:50 +01003415 /* s2cr->type == 0 means translation, so initialise explicitly */
3416 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
3417 GFP_KERNEL);
3418 if (!smmu->s2crs)
3419 return -ENOMEM;
3420 for (i = 0; i < size; i++)
3421 smmu->s2crs[i] = s2cr_init_val;
3422
Robin Murphy53867802016-09-12 17:13:48 +01003423 smmu->num_mapping_groups = size;
Robin Murphy6668f692016-09-12 17:13:54 +01003424 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003425
Robin Murphy7602b872016-04-28 17:12:09 +01003426 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3427 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3428 if (!(id & ID0_PTFS_NO_AARCH32S))
3429 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3430 }
3431
Will Deacon45ae7cf2013-06-24 18:31:25 +01003432 /* ID1 */
3433 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003434 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003435
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003436 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003437 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003438 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003439 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003440 dev_warn(smmu->dev,
3441 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3442 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003443
Will Deacon518f7132014-11-14 17:17:54 +00003444 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003445 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3446 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3447 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3448 return -ENODEV;
3449 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003450 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003451 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003452 /*
3453 * Cavium CN88xx erratum #27704.
3454 * Ensure ASID and VMID allocation is unique across all SMMUs in
3455 * the system.
3456 */
3457 if (smmu->model == CAVIUM_SMMUV2) {
3458 smmu->cavium_id_base =
3459 atomic_add_return(smmu->num_context_banks,
3460 &cavium_smmu_context_count);
3461 smmu->cavium_id_base -= smmu->num_context_banks;
3462 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003463
3464 /* ID2 */
3465 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3466 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003467 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003468
Will Deacon518f7132014-11-14 17:17:54 +00003469 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003470 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003471 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003472
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003473 if (id & ID2_VMID16)
3474 smmu->features |= ARM_SMMU_FEAT_VMID16;
3475
Robin Murphyf1d84542015-03-04 16:41:05 +00003476 /*
3477 * What the page table walker can address actually depends on which
3478 * descriptor format is in use, but since a) we don't know that yet,
3479 * and b) it can vary per context bank, this will have to do...
3480 */
3481 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3482 dev_warn(smmu->dev,
3483 "failed to set DMA mask for table walker\n");
3484
Robin Murphyb7862e32016-04-13 18:13:03 +01003485 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003486 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003487 if (smmu->version == ARM_SMMU_V1_64K)
3488 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003489 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003490 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003491 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003492 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003493 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003494 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003495 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003496 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003497 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003498 }
3499
Robin Murphy7602b872016-04-28 17:12:09 +01003500 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003501 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003502 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003503 if (smmu->features &
3504 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003505 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003506 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003507 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003508 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003509 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003510
Robin Murphyd5466352016-05-09 17:20:09 +01003511 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3512 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3513 else
3514 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003515 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003516 smmu->pgsize_bitmap);
3517
Will Deacon518f7132014-11-14 17:17:54 +00003518
Will Deacon28d60072014-09-01 16:24:48 +01003519 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003520 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3521 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003522
3523 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003524 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3525 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003526
Will Deacon45ae7cf2013-06-24 18:31:25 +01003527 return 0;
3528}
3529
Patrick Dalyd7476202016-09-08 18:23:28 -07003530static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
3531{
3532 if (!smmu->arch_ops)
3533 return 0;
3534 if (!smmu->arch_ops->init)
3535 return 0;
3536 return smmu->arch_ops->init(smmu);
3537}
3538
3539static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
3540{
3541 if (!smmu->arch_ops)
3542 return;
3543 if (!smmu->arch_ops->device_reset)
3544 return;
3545 return smmu->arch_ops->device_reset(smmu);
3546}
3547
Robin Murphy67b65a32016-04-13 18:12:57 +01003548struct arm_smmu_match_data {
3549 enum arm_smmu_arch_version version;
3550 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003551 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01003552};
3553
Patrick Dalyd7476202016-09-08 18:23:28 -07003554#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
3555static struct arm_smmu_match_data name = { \
3556.version = ver, \
3557.model = imp, \
3558.arch_ops = ops, \
3559} \
Robin Murphy67b65a32016-04-13 18:12:57 +01003560
Patrick Daly1f8a2882016-09-12 17:32:05 -07003561struct arm_smmu_arch_ops qsmmuv500_arch_ops;
3562
Patrick Dalyd7476202016-09-08 18:23:28 -07003563ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
3564ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
3565ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
3566ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
3567ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003568ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07003569ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
3570 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01003571
Joerg Roedel09b52692014-10-02 12:24:45 +02003572static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003573 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3574 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3575 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003576 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003577 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003578 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003579 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07003580 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01003581 { },
3582};
3583MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3584
Patrick Daly1f8a2882016-09-12 17:32:05 -07003585static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003586static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3587{
Robin Murphy67b65a32016-04-13 18:12:57 +01003588 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003589 struct resource *res;
3590 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003591 struct device *dev = &pdev->dev;
Robin Murphyd5b41782016-09-14 15:21:39 +01003592 int num_irqs, i, err;
Robin Murphy7e96c742016-09-14 15:26:46 +01003593 bool legacy_binding;
3594
3595 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
3596 if (legacy_binding && !using_generic_binding) {
3597 if (!using_legacy_binding)
3598 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
3599 using_legacy_binding = true;
3600 } else if (!legacy_binding && !using_legacy_binding) {
3601 using_generic_binding = true;
3602 } else {
3603 dev_err(dev, "not probing due to mismatched DT properties\n");
3604 return -ENODEV;
3605 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003606
3607 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3608 if (!smmu) {
3609 dev_err(dev, "failed to allocate arm_smmu_device\n");
3610 return -ENOMEM;
3611 }
3612 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003613 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003614 idr_init(&smmu->asid_idr);
3615 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003616
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003617 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01003618 smmu->version = data->version;
3619 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003620 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01003621
Will Deacon45ae7cf2013-06-24 18:31:25 +01003622 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003623 smmu->base = devm_ioremap_resource(dev, res);
3624 if (IS_ERR(smmu->base))
3625 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003626 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003627
3628 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3629 &smmu->num_global_irqs)) {
3630 dev_err(dev, "missing #global-interrupts property\n");
3631 return -ENODEV;
3632 }
3633
3634 num_irqs = 0;
3635 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3636 num_irqs++;
3637 if (num_irqs > smmu->num_global_irqs)
3638 smmu->num_context_irqs++;
3639 }
3640
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003641 if (!smmu->num_context_irqs) {
3642 dev_err(dev, "found %d interrupts but expected at least %d\n",
3643 num_irqs, smmu->num_global_irqs + 1);
3644 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003645 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003646
3647 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3648 GFP_KERNEL);
3649 if (!smmu->irqs) {
3650 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3651 return -ENOMEM;
3652 }
3653
3654 for (i = 0; i < num_irqs; ++i) {
3655 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003656
Will Deacon45ae7cf2013-06-24 18:31:25 +01003657 if (irq < 0) {
3658 dev_err(dev, "failed to get irq index %d\n", i);
3659 return -ENODEV;
3660 }
3661 smmu->irqs[i] = irq;
3662 }
3663
Dhaval Patel031d7462015-05-09 14:47:29 -07003664 parse_driver_options(smmu);
3665
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003666 smmu->pwr = arm_smmu_init_power_resources(pdev);
3667 if (IS_ERR(smmu->pwr))
3668 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003669
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003670 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003671 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003672 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003673
3674 err = arm_smmu_device_cfg_probe(smmu);
3675 if (err)
3676 goto out_power_off;
3677
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003678 err = arm_smmu_parse_impl_def_registers(smmu);
3679 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003680 goto out_power_off;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003681
Robin Murphyb7862e32016-04-13 18:13:03 +01003682 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003683 smmu->num_context_banks != smmu->num_context_irqs) {
3684 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08003685 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
3686 smmu->num_context_irqs, smmu->num_context_banks,
3687 smmu->num_context_banks);
3688 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003689 }
3690
Will Deacon45ae7cf2013-06-24 18:31:25 +01003691 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003692 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3693 NULL, arm_smmu_global_fault,
3694 IRQF_ONESHOT | IRQF_SHARED,
3695 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003696 if (err) {
3697 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3698 i, smmu->irqs[i]);
Robin Murphyd5b41782016-09-14 15:21:39 +01003699 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003700 }
3701 }
3702
Patrick Dalyd7476202016-09-08 18:23:28 -07003703 err = arm_smmu_arch_init(smmu);
3704 if (err)
Robin Murphyd5b41782016-09-14 15:21:39 +01003705 goto out_power_off;
Patrick Dalyd7476202016-09-08 18:23:28 -07003706
Robin Murphy06e393e2016-09-12 17:13:55 +01003707 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003708 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01003709 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003710 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07003711
Robin Murphy7e96c742016-09-14 15:26:46 +01003712 /* Oh, for a proper bus abstraction */
3713 if (!iommu_present(&platform_bus_type))
3714 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3715#ifdef CONFIG_ARM_AMBA
3716 if (!iommu_present(&amba_bustype))
3717 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
3718#endif
3719#ifdef CONFIG_PCI
3720 if (!iommu_present(&pci_bus_type)) {
3721 pci_request_acs();
3722 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
3723 }
3724#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003725 return 0;
3726
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003727out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003728 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003729
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003730out_exit_power_resources:
3731 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003732
Will Deacon45ae7cf2013-06-24 18:31:25 +01003733 return err;
3734}
3735
3736static int arm_smmu_device_remove(struct platform_device *pdev)
3737{
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003738 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003739
3740 if (!smmu)
3741 return -ENODEV;
3742
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003743 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003744 return -EINVAL;
3745
Will Deaconecfadb62013-07-31 19:21:28 +01003746 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyfe52d4f2016-09-12 17:13:52 +01003747 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003748
Patrick Dalyc190d932016-08-30 17:23:28 -07003749 idr_destroy(&smmu->asid_idr);
3750
Will Deacon45ae7cf2013-06-24 18:31:25 +01003751 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003752 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003753 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003754
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003755 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003756
Will Deacon45ae7cf2013-06-24 18:31:25 +01003757 return 0;
3758}
3759
Will Deacon45ae7cf2013-06-24 18:31:25 +01003760static struct platform_driver arm_smmu_driver = {
3761 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003762 .name = "arm-smmu",
3763 .of_match_table = of_match_ptr(arm_smmu_of_match),
3764 },
3765 .probe = arm_smmu_device_dt_probe,
3766 .remove = arm_smmu_device_remove,
3767};
3768
3769static int __init arm_smmu_init(void)
3770{
Robin Murphy7e96c742016-09-14 15:26:46 +01003771 static bool registered;
3772 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003773
Robin Murphy7e96c742016-09-14 15:26:46 +01003774 if (!registered) {
3775 ret = platform_driver_register(&arm_smmu_driver);
3776 registered = !ret;
Wei Chen112c8982016-06-13 17:20:17 +08003777 }
Robin Murphy7e96c742016-09-14 15:26:46 +01003778 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003779}
3780
3781static void __exit arm_smmu_exit(void)
3782{
3783 return platform_driver_unregister(&arm_smmu_driver);
3784}
3785
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003786subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003787module_exit(arm_smmu_exit);
3788
Robin Murphy7e96c742016-09-14 15:26:46 +01003789static int __init arm_smmu_of_init(struct device_node *np)
3790{
3791 int ret = arm_smmu_init();
3792
3793 if (ret)
3794 return ret;
3795
3796 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
3797 return -ENODEV;
3798
3799 return 0;
3800}
3801IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
3802IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
3803IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
3804IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
3805IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
3806IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
3807IOMMU_OF_DECLARE(qcom_smmuv2, "qcom,smmu-v2", arm_smmu_of_init);
3808IOMMU_OF_DECLARE(qcom_mmu500, "qcom,qsmmu-v500", arm_smmu_of_init);
3809
Patrick Daly1f8a2882016-09-12 17:32:05 -07003810#define DEBUG_SID_HALT_REG 0x0
3811#define DEBUG_SID_HALT_VAL (0x1 << 16)
3812
3813#define DEBUG_SR_HALT_ACK_REG 0x20
3814#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
3815
3816#define TBU_DBG_TIMEOUT_US 30000
3817
3818struct qsmmuv500_tbu_device {
3819 struct list_head list;
3820 struct device *dev;
3821 struct arm_smmu_device *smmu;
3822 void __iomem *base;
3823 void __iomem *status_reg;
3824
3825 struct arm_smmu_power_resources *pwr;
3826
3827 /* Protects halt count */
3828 spinlock_t halt_lock;
3829 u32 halt_count;
3830};
3831
3832static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
3833{
3834 struct qsmmuv500_tbu_device *tbu;
3835 struct list_head *list = smmu->archdata;
3836 int ret = 0;
3837
3838 list_for_each_entry(tbu, list, list) {
3839 ret = arm_smmu_power_on(tbu->pwr);
3840 if (ret)
3841 break;
3842 }
3843 if (!ret)
3844 return 0;
3845
3846 list_for_each_entry_continue_reverse(tbu, list, list) {
3847 arm_smmu_power_off(tbu->pwr);
3848 }
3849 return ret;
3850}
3851
3852static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
3853{
3854 struct qsmmuv500_tbu_device *tbu;
3855 struct list_head *list = smmu->archdata;
3856
3857 list_for_each_entry_reverse(tbu, list, list) {
3858 arm_smmu_power_off(tbu->pwr);
3859 }
3860}
3861
3862static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
3863{
3864 unsigned long flags;
3865 u32 val;
3866 void __iomem *base;
3867
3868 spin_lock_irqsave(&tbu->halt_lock, flags);
3869 if (tbu->halt_count) {
3870 tbu->halt_count++;
3871 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3872 return 0;
3873 }
3874
3875 base = tbu->base;
3876 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
3877 val |= DEBUG_SID_HALT_VAL;
3878 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
3879
3880 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
3881 val, (val & DEBUG_SR_HALT_ACK_VAL),
3882 0, TBU_DBG_TIMEOUT_US)) {
3883 dev_err(tbu->dev, "Couldn't halt TBU!\n");
3884 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3885 return -ETIMEDOUT;
3886 }
3887
3888 tbu->halt_count = 1;
3889 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3890 return 0;
3891}
3892
3893static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
3894{
3895 unsigned long flags;
3896 u32 val;
3897 void __iomem *base;
3898
3899 spin_lock_irqsave(&tbu->halt_lock, flags);
3900 if (!tbu->halt_count) {
3901 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
3902 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3903 return;
3904
3905 } else if (tbu->halt_count > 1) {
3906 tbu->halt_count--;
3907 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3908 return;
3909 }
3910
3911 base = tbu->base;
3912 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
3913 val &= ~DEBUG_SID_HALT_VAL;
3914 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
3915
3916 tbu->halt_count = 0;
3917 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3918}
3919
3920static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
3921{
3922 struct qsmmuv500_tbu_device *tbu;
3923 struct list_head *list = smmu->archdata;
3924 int ret = 0;
3925
3926 list_for_each_entry(tbu, list, list) {
3927 ret = qsmmuv500_tbu_halt(tbu);
3928 if (ret)
3929 break;
3930 }
3931
3932 if (!ret)
3933 return 0;
3934
3935 list_for_each_entry_continue_reverse(tbu, list, list) {
3936 qsmmuv500_tbu_resume(tbu);
3937 }
3938 return ret;
3939}
3940
3941static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
3942{
3943 struct qsmmuv500_tbu_device *tbu;
3944 struct list_head *list = smmu->archdata;
3945
3946 list_for_each_entry(tbu, list, list) {
3947 qsmmuv500_tbu_resume(tbu);
3948 }
3949}
3950
3951static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
3952{
3953 int i, ret;
3954 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
3955
3956 ret = qsmmuv500_tbu_power_on_all(smmu);
3957 if (ret)
3958 return;
3959
3960 /* Program implementation defined registers */
3961 qsmmuv500_halt_all(smmu);
3962 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3963 writel_relaxed(regs[i].value,
3964 ARM_SMMU_GR0(smmu) + regs[i].offset);
3965 qsmmuv500_resume_all(smmu);
3966 qsmmuv500_tbu_power_off_all(smmu);
3967}
3968
3969static int qsmmuv500_tbu_register(struct device *dev, void *data)
3970{
3971 struct arm_smmu_device *smmu = data;
3972 struct qsmmuv500_tbu_device *tbu;
3973 struct list_head *list = smmu->archdata;
3974
3975 if (!dev->driver) {
3976 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
3977 return -EINVAL;
3978 }
3979
3980 tbu = dev_get_drvdata(dev);
3981
3982 INIT_LIST_HEAD(&tbu->list);
3983 tbu->smmu = smmu;
3984 list_add(&tbu->list, list);
3985 return 0;
3986}
3987
3988static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
3989{
3990 struct device *dev = smmu->dev;
3991 struct list_head *list;
3992 int ret;
3993
3994 list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
3995 if (!list)
3996 return -ENOMEM;
3997
3998 INIT_LIST_HEAD(list);
3999 smmu->archdata = list;
4000
4001 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4002 if (ret)
4003 return ret;
4004
4005 /* Attempt to register child devices */
4006 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
4007 if (ret)
4008 return -EINVAL;
4009
4010 return 0;
4011}
4012
4013struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
4014 .init = qsmmuv500_arch_init,
4015 .device_reset = qsmmuv500_device_reset,
4016};
4017
4018static const struct of_device_id qsmmuv500_tbu_of_match[] = {
4019 {.compatible = "qcom,qsmmuv500-tbu"},
4020 {}
4021};
4022
4023static int qsmmuv500_tbu_probe(struct platform_device *pdev)
4024{
4025 struct resource *res;
4026 struct device *dev = &pdev->dev;
4027 struct qsmmuv500_tbu_device *tbu;
4028
4029 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
4030 if (!tbu)
4031 return -ENOMEM;
4032
4033 INIT_LIST_HEAD(&tbu->list);
4034 tbu->dev = dev;
4035 spin_lock_init(&tbu->halt_lock);
4036
4037 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
4038 tbu->base = devm_ioremap_resource(dev, res);
4039 if (IS_ERR(tbu->base))
4040 return PTR_ERR(tbu->base);
4041
4042 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
4043 tbu->status_reg = devm_ioremap_resource(dev, res);
4044 if (IS_ERR(tbu->status_reg))
4045 return PTR_ERR(tbu->status_reg);
4046
4047 tbu->pwr = arm_smmu_init_power_resources(pdev);
4048 if (IS_ERR(tbu->pwr))
4049 return PTR_ERR(tbu->pwr);
4050
4051 dev_set_drvdata(dev, tbu);
4052 return 0;
4053}
4054
4055static struct platform_driver qsmmuv500_tbu_driver = {
4056 .driver = {
4057 .name = "qsmmuv500-tbu",
4058 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
4059 },
4060 .probe = qsmmuv500_tbu_probe,
4061};
4062
4063static int __init qsmmuv500_tbu_init(void)
4064{
4065 return platform_driver_register(&qsmmuv500_tbu_driver);
4066}
4067subsys_initcall(qsmmuv500_tbu_init);
4068
Will Deacon45ae7cf2013-06-24 18:31:25 +01004069MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
4070MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
4071MODULE_LICENSE("GPL v2");