blob: bc8e8e0380285d1e453219244990ad4c426aa123 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070047#include <soc/qcom/secure_buffer.h>
Patrick Daly1f8a2882016-09-12 17:32:05 -070048#include <linux/of_platform.h>
Patrick Daly2764f952016-09-06 19:22:44 -070049#include <linux/msm-bus.h>
50#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52#include <linux/amba/bus.h>
53
Will Deacon518f7132014-11-14 17:17:54 +000054#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010055
56/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020057#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010058
59/* Maximum number of context banks per SMMU */
60#define ARM_SMMU_MAX_CBS 128
61
62/* Maximum number of mapping groups per SMMU */
63#define ARM_SMMU_MAX_SMRS 128
64
Will Deacon45ae7cf2013-06-24 18:31:25 +010065/* SMMU global address space */
66#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010067#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010068
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000069/*
70 * SMMU global address space with conditional offset to access secure
71 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
72 * nsGFSYNR0: 0x450)
73 */
74#define ARM_SMMU_GR0_NS(smmu) \
75 ((smmu)->base + \
76 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
77 ? 0x400 : 0))
78
Robin Murphyf9a05f02016-04-13 18:13:01 +010079/*
80 * Some 64-bit registers only make sense to write atomically, but in such
81 * cases all the data relevant to AArch32 formats lies within the lower word,
82 * therefore this actually makes more sense than it might first appear.
83 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010085#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010086#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010087#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010088#endif
89
Will Deacon45ae7cf2013-06-24 18:31:25 +010090/* Configuration registers */
91#define ARM_SMMU_GR0_sCR0 0x0
92#define sCR0_CLIENTPD (1 << 0)
93#define sCR0_GFRE (1 << 1)
94#define sCR0_GFIE (1 << 2)
95#define sCR0_GCFGFRE (1 << 4)
96#define sCR0_GCFGFIE (1 << 5)
97#define sCR0_USFCFG (1 << 10)
98#define sCR0_VMIDPNE (1 << 11)
99#define sCR0_PTM (1 << 12)
100#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800101#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100102#define sCR0_BSU_SHIFT 14
103#define sCR0_BSU_MASK 0x3
104
Peng Fan3ca37122016-05-03 21:50:30 +0800105/* Auxiliary Configuration register */
106#define ARM_SMMU_GR0_sACR 0x10
107
Will Deacon45ae7cf2013-06-24 18:31:25 +0100108/* Identification registers */
109#define ARM_SMMU_GR0_ID0 0x20
110#define ARM_SMMU_GR0_ID1 0x24
111#define ARM_SMMU_GR0_ID2 0x28
112#define ARM_SMMU_GR0_ID3 0x2c
113#define ARM_SMMU_GR0_ID4 0x30
114#define ARM_SMMU_GR0_ID5 0x34
115#define ARM_SMMU_GR0_ID6 0x38
116#define ARM_SMMU_GR0_ID7 0x3c
117#define ARM_SMMU_GR0_sGFSR 0x48
118#define ARM_SMMU_GR0_sGFSYNR0 0x50
119#define ARM_SMMU_GR0_sGFSYNR1 0x54
120#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100121
122#define ID0_S1TS (1 << 30)
123#define ID0_S2TS (1 << 29)
124#define ID0_NTS (1 << 28)
125#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000126#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100127#define ID0_PTFS_NO_AARCH32 (1 << 25)
128#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100129#define ID0_CTTW (1 << 14)
130#define ID0_NUMIRPT_SHIFT 16
131#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700132#define ID0_NUMSIDB_SHIFT 9
133#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100134#define ID0_NUMSMRG_SHIFT 0
135#define ID0_NUMSMRG_MASK 0xff
136
137#define ID1_PAGESIZE (1 << 31)
138#define ID1_NUMPAGENDXB_SHIFT 28
139#define ID1_NUMPAGENDXB_MASK 7
140#define ID1_NUMS2CB_SHIFT 16
141#define ID1_NUMS2CB_MASK 0xff
142#define ID1_NUMCB_SHIFT 0
143#define ID1_NUMCB_MASK 0xff
144
145#define ID2_OAS_SHIFT 4
146#define ID2_OAS_MASK 0xf
147#define ID2_IAS_SHIFT 0
148#define ID2_IAS_MASK 0xf
149#define ID2_UBS_SHIFT 8
150#define ID2_UBS_MASK 0xf
151#define ID2_PTFS_4K (1 << 12)
152#define ID2_PTFS_16K (1 << 13)
153#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800154#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155
Peng Fan3ca37122016-05-03 21:50:30 +0800156#define ID7_MAJOR_SHIFT 4
157#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158
Will Deacon45ae7cf2013-06-24 18:31:25 +0100159/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100160#define ARM_SMMU_GR0_TLBIVMID 0x64
161#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
162#define ARM_SMMU_GR0_TLBIALLH 0x6c
163#define ARM_SMMU_GR0_sTLBGSYNC 0x70
164#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
165#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800166#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100167
168/* Stream mapping registers */
169#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
170#define SMR_VALID (1 << 31)
171#define SMR_MASK_SHIFT 16
172#define SMR_MASK_MASK 0x7fff
173#define SMR_ID_SHIFT 0
174#define SMR_ID_MASK 0x7fff
175
176#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
177#define S2CR_CBNDX_SHIFT 0
178#define S2CR_CBNDX_MASK 0xff
179#define S2CR_TYPE_SHIFT 16
180#define S2CR_TYPE_MASK 0x3
181#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
182#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
183#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
184
185/* Context bank attribute registers */
186#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
187#define CBAR_VMID_SHIFT 0
188#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000189#define CBAR_S1_BPSHCFG_SHIFT 8
190#define CBAR_S1_BPSHCFG_MASK 3
191#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100192#define CBAR_S1_MEMATTR_SHIFT 12
193#define CBAR_S1_MEMATTR_MASK 0xf
194#define CBAR_S1_MEMATTR_WB 0xf
195#define CBAR_TYPE_SHIFT 16
196#define CBAR_TYPE_MASK 0x3
197#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
200#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
201#define CBAR_IRPTNDX_SHIFT 24
202#define CBAR_IRPTNDX_MASK 0xff
203
Shalaj Jain04059c52015-03-03 13:34:59 -0800204#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
205#define CBFRSYNRA_SID_MASK (0xffff)
206
Will Deacon45ae7cf2013-06-24 18:31:25 +0100207#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
208#define CBA2R_RW64_32BIT (0 << 0)
209#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800210#define CBA2R_VMID_SHIFT 16
211#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
213/* Translation context bank */
214#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100215#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100216
217#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100218#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_RESUME 0x8
220#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100221#define ARM_SMMU_CB_TTBR0 0x20
222#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600224#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000226#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100227#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700229#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100230#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100231#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100233#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000234#define ARM_SMMU_CB_S1_TLBIVAL 0x620
235#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
236#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700237#define ARM_SMMU_CB_TLBSYNC 0x7f0
238#define ARM_SMMU_CB_TLBSTATUS 0x7f4
239#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100240#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000241#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100242
243#define SCTLR_S1_ASIDPNE (1 << 12)
244#define SCTLR_CFCFG (1 << 7)
245#define SCTLR_CFIE (1 << 6)
246#define SCTLR_CFRE (1 << 5)
247#define SCTLR_E (1 << 4)
248#define SCTLR_AFE (1 << 2)
249#define SCTLR_TRE (1 << 1)
250#define SCTLR_M (1 << 0)
251#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
252
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100253#define ARM_MMU500_ACTLR_CPRE (1 << 1)
254
Peng Fan3ca37122016-05-03 21:50:30 +0800255#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
256
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700257#define ARM_SMMU_IMPL_DEF0(smmu) \
258 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
259#define ARM_SMMU_IMPL_DEF1(smmu) \
260 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000261#define CB_PAR_F (1 << 0)
262
263#define ATSR_ACTIVE (1 << 0)
264
Will Deacon45ae7cf2013-06-24 18:31:25 +0100265#define RESUME_RETRY (0 << 0)
266#define RESUME_TERMINATE (1 << 0)
267
Will Deacon45ae7cf2013-06-24 18:31:25 +0100268#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100269#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100270
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100271#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100272
273#define FSR_MULTI (1 << 31)
274#define FSR_SS (1 << 30)
275#define FSR_UUT (1 << 8)
276#define FSR_ASF (1 << 7)
277#define FSR_TLBLKF (1 << 6)
278#define FSR_TLBMCF (1 << 5)
279#define FSR_EF (1 << 4)
280#define FSR_PF (1 << 3)
281#define FSR_AFF (1 << 2)
282#define FSR_TF (1 << 1)
283
Mitchel Humpherys29073202014-07-08 09:52:18 -0700284#define FSR_IGN (FSR_AFF | FSR_ASF | \
285 FSR_TLBMCF | FSR_TLBLKF)
286#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100287 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100288
289#define FSYNR0_WNR (1 << 4)
290
Will Deacon4cf740b2014-07-14 19:47:39 +0100291static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000292module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100293MODULE_PARM_DESC(force_stage,
294 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Daly69884542016-10-06 21:36:36 -0700295static bool disable_bypass = 1;
Robin Murphy25a1c962016-02-10 14:25:33 +0000296module_param(disable_bypass, bool, S_IRUGO);
297MODULE_PARM_DESC(disable_bypass,
298 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100299
Robin Murphy09360402014-08-28 17:51:59 +0100300enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100301 ARM_SMMU_V1,
302 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100303 ARM_SMMU_V2,
304};
305
Robin Murphy67b65a32016-04-13 18:12:57 +0100306enum arm_smmu_implementation {
307 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100308 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100309 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700310 QCOM_SMMUV2,
Patrick Daly1f8a2882016-09-12 17:32:05 -0700311 QCOM_SMMUV500,
Robin Murphy67b65a32016-04-13 18:12:57 +0100312};
313
Patrick Dalyd7476202016-09-08 18:23:28 -0700314struct arm_smmu_device;
315struct arm_smmu_arch_ops {
316 int (*init)(struct arm_smmu_device *smmu);
317 void (*device_reset)(struct arm_smmu_device *smmu);
318 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
319 dma_addr_t iova);
320 void (*iova_to_phys_fault)(struct iommu_domain *domain,
321 dma_addr_t iova, phys_addr_t *phys1,
322 phys_addr_t *phys_post_tlbiall);
323};
324
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700325struct arm_smmu_impl_def_reg {
326 u32 offset;
327 u32 value;
328};
329
Will Deacon45ae7cf2013-06-24 18:31:25 +0100330struct arm_smmu_smr {
331 u8 idx;
332 u16 mask;
333 u16 id;
334};
335
Will Deacona9a1b0b2014-05-01 18:05:08 +0100336struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100337 int num_streamids;
338 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100339 struct arm_smmu_smr *smrs;
340};
341
Will Deacona9a1b0b2014-05-01 18:05:08 +0100342struct arm_smmu_master {
343 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100344 struct rb_node node;
345 struct arm_smmu_master_cfg cfg;
346};
347
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700348/*
349 * Describes resources required for on/off power operation.
350 * Separate reference count is provided for atomic/nonatomic
351 * operations.
352 */
353struct arm_smmu_power_resources {
354 struct platform_device *pdev;
355 struct device *dev;
356
357 struct clk **clocks;
358 int num_clocks;
359
360 struct regulator_bulk_data *gdscs;
361 int num_gdscs;
362
363 uint32_t bus_client;
364 struct msm_bus_scale_pdata *bus_dt_data;
365
366 /* Protects power_count */
367 struct mutex power_lock;
368 int power_count;
369
370 /* Protects clock_refs_count */
371 spinlock_t clock_refs_lock;
372 int clock_refs_count;
373};
374
Will Deacon45ae7cf2013-06-24 18:31:25 +0100375struct arm_smmu_device {
376 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100377
378 void __iomem *base;
379 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100380 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100381
382#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
383#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
384#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
385#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
386#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000387#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800388#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100389#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
390#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
391#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
392#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
393#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000395
396#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800397#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800398#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700399#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000400 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100401 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100402 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100403
404 u32 num_context_banks;
405 u32 num_s2_context_banks;
406 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
407 atomic_t irptndx;
408
409 u32 num_mapping_groups;
410 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
411
Will Deacon518f7132014-11-14 17:17:54 +0000412 unsigned long va_size;
413 unsigned long ipa_size;
414 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100415 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100416
417 u32 num_global_irqs;
418 u32 num_context_irqs;
419 unsigned int *irqs;
420
Will Deacon45ae7cf2013-06-24 18:31:25 +0100421 struct list_head list;
422 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800423
424 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700425 /* Specific to QCOM */
426 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
427 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800428
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700429 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700430
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800431 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700432
433 /* protects idr */
434 struct mutex idr_mutex;
435 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700436
437 struct arm_smmu_arch_ops *arch_ops;
438 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100439};
440
Robin Murphy7602b872016-04-28 17:12:09 +0100441enum arm_smmu_context_fmt {
442 ARM_SMMU_CTX_FMT_NONE,
443 ARM_SMMU_CTX_FMT_AARCH64,
444 ARM_SMMU_CTX_FMT_AARCH32_L,
445 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100446};
447
448struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100449 u8 cbndx;
450 u8 irptndx;
451 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600452 u32 procid;
453 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100454 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100455};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100456#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600457#define INVALID_CBNDX 0xff
458#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700459/*
460 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
461 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
462 */
463#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100464
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600465#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800466#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100467
Will Deaconc752ce42014-06-25 22:46:31 +0100468enum arm_smmu_domain_stage {
469 ARM_SMMU_DOMAIN_S1 = 0,
470 ARM_SMMU_DOMAIN_S2,
471 ARM_SMMU_DOMAIN_NESTED,
472};
473
Patrick Dalyc11d1082016-09-01 15:52:44 -0700474struct arm_smmu_pte_info {
475 void *virt_addr;
476 size_t size;
477 struct list_head entry;
478};
479
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100481 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000482 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700483 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000484 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100485 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100486 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000487 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700488 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700489 u32 secure_vmid;
490 struct list_head pte_info_list;
491 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700492 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700493 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100494 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100495};
496
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200497struct arm_smmu_phandle_args {
498 struct device_node *np;
499 int args_count;
500 uint32_t args[MAX_MASTER_STREAMIDS];
501};
502
Will Deacon45ae7cf2013-06-24 18:31:25 +0100503static DEFINE_SPINLOCK(arm_smmu_devices_lock);
504static LIST_HEAD(arm_smmu_devices);
505
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000506struct arm_smmu_option_prop {
507 u32 opt;
508 const char *prop;
509};
510
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800511static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
512
Mitchel Humpherys29073202014-07-08 09:52:18 -0700513static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000514 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800515 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800516 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700517 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000518 { 0, NULL},
519};
520
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800521static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
522 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700523static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
524 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600525static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800526
Patrick Dalyc11d1082016-09-01 15:52:44 -0700527static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
528static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700529static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700530static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
531
Patrick Dalyd7476202016-09-08 18:23:28 -0700532static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
533static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
534
Joerg Roedel1d672632015-03-26 13:43:10 +0100535static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
536{
537 return container_of(dom, struct arm_smmu_domain, domain);
538}
539
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000540static void parse_driver_options(struct arm_smmu_device *smmu)
541{
542 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700543
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000544 do {
545 if (of_property_read_bool(smmu->dev->of_node,
546 arm_smmu_options[i].prop)) {
547 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700548 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000549 arm_smmu_options[i].prop);
550 }
551 } while (arm_smmu_options[++i].opt);
552}
553
Patrick Dalyc190d932016-08-30 17:23:28 -0700554static bool is_dynamic_domain(struct iommu_domain *domain)
555{
556 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
557
558 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
559}
560
Patrick Dalye271f212016-10-04 13:24:49 -0700561static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
562{
563 return (smmu_domain->secure_vmid != VMID_INVAL);
564}
565
566static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
567{
568 if (arm_smmu_is_domain_secure(smmu_domain))
569 mutex_lock(&smmu_domain->assign_lock);
570}
571
572static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
573{
574 if (arm_smmu_is_domain_secure(smmu_domain))
575 mutex_unlock(&smmu_domain->assign_lock);
576}
577
Will Deacon8f68f8e2014-07-15 11:27:08 +0100578static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100579{
580 if (dev_is_pci(dev)) {
581 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700582
Will Deacona9a1b0b2014-05-01 18:05:08 +0100583 while (!pci_is_root_bus(bus))
584 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100585 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100586 }
587
Will Deacon8f68f8e2014-07-15 11:27:08 +0100588 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100589}
590
Will Deacon45ae7cf2013-06-24 18:31:25 +0100591static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
592 struct device_node *dev_node)
593{
594 struct rb_node *node = smmu->masters.rb_node;
595
596 while (node) {
597 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700598
Will Deacon45ae7cf2013-06-24 18:31:25 +0100599 master = container_of(node, struct arm_smmu_master, node);
600
601 if (dev_node < master->of_node)
602 node = node->rb_left;
603 else if (dev_node > master->of_node)
604 node = node->rb_right;
605 else
606 return master;
607 }
608
609 return NULL;
610}
611
Will Deacona9a1b0b2014-05-01 18:05:08 +0100612static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100613find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100614{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100615 struct arm_smmu_master_cfg *cfg = NULL;
616 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100617
Will Deacon8f68f8e2014-07-15 11:27:08 +0100618 if (group) {
619 cfg = iommu_group_get_iommudata(group);
620 iommu_group_put(group);
621 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100622
Will Deacon8f68f8e2014-07-15 11:27:08 +0100623 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100624}
625
Will Deacon45ae7cf2013-06-24 18:31:25 +0100626static int insert_smmu_master(struct arm_smmu_device *smmu,
627 struct arm_smmu_master *master)
628{
629 struct rb_node **new, *parent;
630
631 new = &smmu->masters.rb_node;
632 parent = NULL;
633 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700634 struct arm_smmu_master *this
635 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100636
637 parent = *new;
638 if (master->of_node < this->of_node)
639 new = &((*new)->rb_left);
640 else if (master->of_node > this->of_node)
641 new = &((*new)->rb_right);
642 else
643 return -EEXIST;
644 }
645
646 rb_link_node(&master->node, parent, new);
647 rb_insert_color(&master->node, &smmu->masters);
648 return 0;
649}
650
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700651struct iommus_entry {
652 struct list_head list;
653 struct device_node *node;
654 u16 streamids[MAX_MASTER_STREAMIDS];
655 int num_sids;
656};
657
Will Deacon45ae7cf2013-06-24 18:31:25 +0100658static int register_smmu_master(struct arm_smmu_device *smmu,
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700659 struct iommus_entry *entry)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100660{
661 int i;
662 struct arm_smmu_master *master;
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700663 struct device *dev = smmu->dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100664
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700665 master = find_smmu_master(smmu, entry->node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100666 if (master) {
667 dev_err(dev,
668 "rejecting multiple registrations for master device %s\n",
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700669 entry->node->name);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100670 return -EBUSY;
671 }
672
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700673 if (entry->num_sids > MAX_MASTER_STREAMIDS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100674 dev_err(dev,
675 "reached maximum number (%d) of stream IDs for master device %s\n",
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700676 MAX_MASTER_STREAMIDS, entry->node->name);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100677 return -ENOSPC;
678 }
679
680 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
681 if (!master)
682 return -ENOMEM;
683
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700684 master->of_node = entry->node;
685 master->cfg.num_streamids = entry->num_sids;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100686
Olav Haugan3c8766d2014-08-22 17:12:32 -0700687 for (i = 0; i < master->cfg.num_streamids; ++i) {
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700688 u16 streamid = entry->streamids[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100689
Olav Haugan3c8766d2014-08-22 17:12:32 -0700690 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
691 (streamid >= smmu->num_mapping_groups)) {
692 dev_err(dev,
693 "stream ID for master device %s greater than maximum allowed (%d)\n",
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700694 entry->node->name, smmu->num_mapping_groups);
Olav Haugan3c8766d2014-08-22 17:12:32 -0700695 return -ERANGE;
696 }
697 master->cfg.streamids[i] = streamid;
698 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100699 return insert_smmu_master(smmu, master);
700}
701
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700702static int arm_smmu_parse_iommus_properties(struct arm_smmu_device *smmu,
703 int *num_masters)
704{
705 struct of_phandle_args iommuspec;
706 struct device_node *master;
707
708 *num_masters = 0;
709
710 for_each_node_with_property(master, "iommus") {
711 int arg_ind = 0;
712 struct iommus_entry *entry, *n;
713 LIST_HEAD(iommus);
714
715 while (!of_parse_phandle_with_args(
716 master, "iommus", "#iommu-cells",
717 arg_ind, &iommuspec)) {
718 if (iommuspec.np != smmu->dev->of_node) {
719 arg_ind++;
720 continue;
721 }
722
723 list_for_each_entry(entry, &iommus, list)
724 if (entry->node == master)
725 break;
726 if (&entry->list == &iommus) {
727 entry = devm_kzalloc(smmu->dev, sizeof(*entry),
728 GFP_KERNEL);
729 if (!entry)
730 return -ENOMEM;
731 entry->node = master;
732 list_add(&entry->list, &iommus);
733 }
Patrick Dalya571f732016-09-26 15:12:36 -0700734 switch (iommuspec.args_count) {
735 case 0:
736 /*
737 * For pci-e devices the SIDs are provided
738 * at device attach time.
739 */
740 break;
741 case 1:
742 entry->num_sids++;
743 entry->streamids[entry->num_sids - 1]
744 = iommuspec.args[0];
745 break;
746 default:
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700747 dev_err(smmu->dev, "iommus property has wrong #iommu-cells");
748 return -EINVAL;
749 }
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700750 arg_ind++;
751 }
752
753 list_for_each_entry_safe(entry, n, &iommus, list) {
Mitchel Humpherys4c775602014-10-02 17:55:41 -0700754 int rc = register_smmu_master(smmu, entry);
755
756 if (rc) {
757 dev_err(smmu->dev, "Couldn't register %s\n",
758 entry->node->name);
759 } else {
760 (*num_masters)++;
761 }
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700762 list_del(&entry->list);
763 devm_kfree(smmu->dev, entry);
764 }
765 }
766
767 return 0;
768}
769
Will Deacon44680ee2014-06-25 11:29:12 +0100770static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100771{
Will Deacon44680ee2014-06-25 11:29:12 +0100772 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100773 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100774 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100775
776 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100777 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100778 master = find_smmu_master(smmu, dev_node);
779 if (master)
780 break;
781 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100782 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100783
Will Deacona9a1b0b2014-05-01 18:05:08 +0100784 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100785}
786
787static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
788{
789 int idx;
790
791 do {
792 idx = find_next_zero_bit(map, end, start);
793 if (idx == end)
794 return -ENOSPC;
795 } while (test_and_set_bit(idx, map));
796
797 return idx;
798}
799
800static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
801{
802 clear_bit(idx, map);
803}
804
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700805static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700806{
807 int i, ret = 0;
808
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700809 for (i = 0; i < pwr->num_clocks; ++i) {
810 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700811 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700812 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700813 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700814 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700815 break;
816 }
817 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700818 return ret;
819}
820
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700821static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700822{
823 int i;
824
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700825 for (i = pwr->num_clocks; i; --i)
826 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700827}
828
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700829static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700830{
831 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700832
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700833 for (i = 0; i < pwr->num_clocks; ++i) {
834 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700835 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700836 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700837 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700838 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700839 break;
840 }
841 }
842
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700843 return ret;
844}
Patrick Daly8befb662016-08-17 20:03:28 -0700845
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700846static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
847{
848 int i;
849
850 for (i = pwr->num_clocks; i; --i)
851 clk_disable(pwr->clocks[i - 1]);
852}
853
854static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
855{
856 if (!pwr->bus_client)
857 return 0;
858 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
859}
860
861static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
862{
863 if (!pwr->bus_client)
864 return;
865 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
866}
867
868/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
869static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
870{
871 int ret = 0;
872 unsigned long flags;
873
874 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
875 if (pwr->clock_refs_count > 0) {
876 pwr->clock_refs_count++;
877 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
878 return 0;
879 }
880
881 ret = arm_smmu_enable_clocks(pwr);
882 if (!ret)
883 pwr->clock_refs_count = 1;
884
885 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700886 return ret;
887}
888
889/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700890static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700891{
Patrick Daly8befb662016-08-17 20:03:28 -0700892 unsigned long flags;
893
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700894 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
895 if (pwr->clock_refs_count == 0) {
896 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
897 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
898 return;
899
900 } else if (pwr->clock_refs_count > 1) {
901 pwr->clock_refs_count--;
902 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700903 return;
904 }
905
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700906 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700907
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700908 pwr->clock_refs_count = 0;
909 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700910}
911
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700912static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700913{
914 int ret;
915
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700916 mutex_lock(&pwr->power_lock);
917 if (pwr->power_count > 0) {
918 pwr->power_count += 1;
919 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700920 return 0;
921 }
922
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700923 ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700924 if (ret)
925 goto out_unlock;
926
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700927 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700928 if (ret)
929 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700930
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700931 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700932 if (ret)
933 goto out_disable_bus;
934
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700935 pwr->power_count = 1;
936 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700937 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700938
Patrick Daly2764f952016-09-06 19:22:44 -0700939out_disable_bus:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700940 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700941out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700942 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700943out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700944 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700945 return ret;
946}
947
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700948static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700949{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700950 mutex_lock(&pwr->power_lock);
951 if (pwr->power_count == 0) {
952 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
953 mutex_unlock(&pwr->power_lock);
954 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700955
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700956 } else if (pwr->power_count > 1) {
957 pwr->power_count--;
958 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700959 return;
960 }
961
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700962 arm_smmu_unprepare_clocks(pwr);
963 arm_smmu_unrequest_bus(pwr);
964 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700965
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700966 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700967}
968
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700969static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700970{
971 int ret;
972
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700973 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700974 if (ret)
975 return ret;
976
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700977 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700978 if (ret)
979 goto out_disable;
980
981 return 0;
982
983out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700984 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700985 return ret;
986}
987
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700988static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700989{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700990 arm_smmu_power_off_atomic(pwr);
991 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700992}
993
994/*
995 * Must be used instead of arm_smmu_power_on if it may be called from
996 * atomic context
997 */
998static int arm_smmu_domain_power_on(struct iommu_domain *domain,
999 struct arm_smmu_device *smmu)
1000{
1001 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1002 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1003
1004 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001005 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001006
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001007 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001008}
1009
1010/*
1011 * Must be used instead of arm_smmu_power_on if it may be called from
1012 * atomic context
1013 */
1014static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1015 struct arm_smmu_device *smmu)
1016{
1017 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1018 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1019
1020 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001021 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001022 return;
1023 }
1024
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001025 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001026}
1027
Will Deacon45ae7cf2013-06-24 18:31:25 +01001028/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001029static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1030 int cbndx)
1031{
1032 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1033 u32 val;
1034
1035 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1036 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1037 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -07001038 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001039 dev_err(smmu->dev, "TLBSYNC timeout!\n");
1040}
1041
Will Deacon518f7132014-11-14 17:17:54 +00001042static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001043{
1044 int count = 0;
1045 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1046
1047 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1048 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1049 & sTLBGSTATUS_GSACTIVE) {
1050 cpu_relax();
1051 if (++count == TLB_LOOP_TIMEOUT) {
1052 dev_err_ratelimited(smmu->dev,
1053 "TLB sync timed out -- SMMU may be deadlocked\n");
1054 return;
1055 }
1056 udelay(1);
1057 }
1058}
1059
Will Deacon518f7132014-11-14 17:17:54 +00001060static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001061{
Will Deacon518f7132014-11-14 17:17:54 +00001062 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001063 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001064}
1065
Patrick Daly8befb662016-08-17 20:03:28 -07001066/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001067static void arm_smmu_tlb_inv_context(void *cookie)
1068{
1069 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +01001070 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1071 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001072 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001073 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +01001074
1075 if (stage1) {
1076 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001077 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001078 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001079 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001080 } else {
1081 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001082 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001083 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001084 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001085 }
Will Deacon1463fe42013-07-31 19:21:27 +01001086}
1087
Will Deacon518f7132014-11-14 17:17:54 +00001088static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001089 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001090{
1091 struct arm_smmu_domain *smmu_domain = cookie;
1092 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1093 struct arm_smmu_device *smmu = smmu_domain->smmu;
1094 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1095 void __iomem *reg;
1096
1097 if (stage1) {
1098 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1099 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1100
Robin Murphy7602b872016-04-28 17:12:09 +01001101 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001102 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001103 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001104 do {
1105 writel_relaxed(iova, reg);
1106 iova += granule;
1107 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001108 } else {
1109 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001110 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001111 do {
1112 writeq_relaxed(iova, reg);
1113 iova += granule >> 12;
1114 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001115 }
Will Deacon518f7132014-11-14 17:17:54 +00001116 } else if (smmu->version == ARM_SMMU_V2) {
1117 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1118 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1119 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001120 iova >>= 12;
1121 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001122 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001123 iova += granule >> 12;
1124 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001125 } else {
1126 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001127 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001128 }
1129}
1130
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001131struct arm_smmu_secure_pool_chunk {
1132 void *addr;
1133 size_t size;
1134 struct list_head list;
1135};
1136
1137static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1138 size_t size)
1139{
1140 struct arm_smmu_secure_pool_chunk *it;
1141
1142 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1143 if (it->size == size) {
1144 void *addr = it->addr;
1145
1146 list_del(&it->list);
1147 kfree(it);
1148 return addr;
1149 }
1150 }
1151
1152 return NULL;
1153}
1154
1155static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1156 void *addr, size_t size)
1157{
1158 struct arm_smmu_secure_pool_chunk *chunk;
1159
1160 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1161 if (!chunk)
1162 return -ENOMEM;
1163
1164 chunk->addr = addr;
1165 chunk->size = size;
1166 memset(addr, 0, size);
1167 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1168
1169 return 0;
1170}
1171
1172static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1173{
1174 struct arm_smmu_secure_pool_chunk *it, *i;
1175
1176 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1177 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1178 /* pages will be freed later (after being unassigned) */
1179 kfree(it);
1180 }
1181}
1182
Patrick Dalyc11d1082016-09-01 15:52:44 -07001183static void *arm_smmu_alloc_pages_exact(void *cookie,
1184 size_t size, gfp_t gfp_mask)
1185{
1186 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001187 void *page;
1188 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001189
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001190 if (!arm_smmu_is_domain_secure(smmu_domain))
1191 return alloc_pages_exact(size, gfp_mask);
1192
1193 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1194 if (page)
1195 return page;
1196
1197 page = alloc_pages_exact(size, gfp_mask);
1198 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001199 ret = arm_smmu_prepare_pgtable(page, cookie);
1200 if (ret) {
1201 free_pages_exact(page, size);
1202 return NULL;
1203 }
1204 }
1205
1206 return page;
1207}
1208
1209static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1210{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001211 struct arm_smmu_domain *smmu_domain = cookie;
1212
1213 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1214 free_pages_exact(virt, size);
1215 return;
1216 }
1217
1218 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1219 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001220}
1221
Will Deacon518f7132014-11-14 17:17:54 +00001222static struct iommu_gather_ops arm_smmu_gather_ops = {
1223 .tlb_flush_all = arm_smmu_tlb_inv_context,
1224 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1225 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001226 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1227 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001228};
1229
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001230static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1231 dma_addr_t iova, u32 fsr)
1232{
1233 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001234 struct arm_smmu_device *smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001235 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001236 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001237
1238 smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001239
Patrick Dalyad441dd2016-09-15 15:50:46 -07001240 if (smmu->arch_ops && smmu->arch_ops->iova_to_phys_fault) {
1241 smmu->arch_ops->iova_to_phys_fault(domain, iova, &phys,
1242 &phys_post_tlbiall);
1243 } else {
1244 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001245 arm_smmu_tlb_inv_context(smmu_domain);
Patrick Dalyad441dd2016-09-15 15:50:46 -07001246 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001247 }
1248
Patrick Dalyad441dd2016-09-15 15:50:46 -07001249 if (phys != phys_post_tlbiall) {
1250 dev_err(smmu->dev,
1251 "ATOS results differed across TLBIALL...\n"
1252 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1253 }
1254 if (!phys_post_tlbiall) {
1255 dev_err(smmu->dev,
1256 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1257 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001258
Patrick Dalyad441dd2016-09-15 15:50:46 -07001259 return phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001260}
1261
Will Deacon45ae7cf2013-06-24 18:31:25 +01001262static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1263{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001264 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001265 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001266 unsigned long iova;
1267 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001268 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001269 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1270 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001271 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001272 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001273 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001274 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001275 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001276 bool non_fatal_fault = !!(smmu_domain->attributes &
1277 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001278
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001279 static DEFINE_RATELIMIT_STATE(_rs,
1280 DEFAULT_RATELIMIT_INTERVAL,
1281 DEFAULT_RATELIMIT_BURST);
1282
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001283 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001284 if (ret)
1285 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001286
Shalaj Jain04059c52015-03-03 13:34:59 -08001287 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001288 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001289 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1290
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001291 if (!(fsr & FSR_FAULT)) {
1292 ret = IRQ_NONE;
1293 goto out_power_off;
1294 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001295
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001296 if (fatal_asf && (fsr & FSR_ASF)) {
1297 dev_err(smmu->dev,
1298 "Took an address size fault. Refusing to recover.\n");
1299 BUG();
1300 }
1301
Will Deacon45ae7cf2013-06-24 18:31:25 +01001302 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001303 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001304 if (fsr & FSR_TF)
1305 flags |= IOMMU_FAULT_TRANSLATION;
1306 if (fsr & FSR_PF)
1307 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001308 if (fsr & FSR_EF)
1309 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001310 if (fsr & FSR_SS)
1311 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001312
Robin Murphyf9a05f02016-04-13 18:13:01 +01001313 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001314 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001315 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1316 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001317 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1318 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001319 dev_dbg(smmu->dev,
1320 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1321 iova, fsr, fsynr, cfg->cbndx);
1322 dev_dbg(smmu->dev,
1323 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001324 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001325 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001326 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001327 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1328 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001329 if (__ratelimit(&_rs)) {
1330 dev_err(smmu->dev,
1331 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1332 iova, fsr, fsynr, cfg->cbndx);
1333 dev_err(smmu->dev, "FAR = %016lx\n",
1334 (unsigned long)iova);
1335 dev_err(smmu->dev,
1336 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1337 fsr,
1338 (fsr & 0x02) ? "TF " : "",
1339 (fsr & 0x04) ? "AFF " : "",
1340 (fsr & 0x08) ? "PF " : "",
1341 (fsr & 0x10) ? "EF " : "",
1342 (fsr & 0x20) ? "TLBMCF " : "",
1343 (fsr & 0x40) ? "TLBLKF " : "",
1344 (fsr & 0x80) ? "MHF " : "",
1345 (fsr & 0x40000000) ? "SS " : "",
1346 (fsr & 0x80000000) ? "MULTI " : "");
1347 dev_err(smmu->dev,
1348 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001349 if (!phys_soft)
1350 dev_err(smmu->dev,
1351 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1352 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001353 dev_err(smmu->dev,
1354 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1355 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1356 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001357 ret = IRQ_NONE;
1358 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001359 if (!non_fatal_fault) {
1360 dev_err(smmu->dev,
1361 "Unhandled arm-smmu context fault!\n");
1362 BUG();
1363 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001364 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001365
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001366 /*
1367 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1368 * if stalled. This is required to keep the IOMMU client stalled on
1369 * the outstanding fault. This gives the client a chance to take any
1370 * debug action and then terminate the stalled transaction.
1371 * So, the sequence in case of stall on fault should be:
1372 * 1) Do not clear FSR or write to RESUME here
1373 * 2) Client takes any debug action
1374 * 3) Client terminates the stalled transaction and resumes the IOMMU
1375 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1376 * not before so that the fault remains outstanding. This ensures
1377 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1378 * need to be terminated.
1379 */
1380 if (tmp != -EBUSY) {
1381 /* Clear the faulting FSR */
1382 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001383
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001384 /*
1385 * Barrier required to ensure that the FSR is cleared
1386 * before resuming SMMU operation
1387 */
1388 wmb();
1389
1390 /* Retry or terminate any stalled transactions */
1391 if (fsr & FSR_SS)
1392 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1393 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001394
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001395out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001396 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001397
Patrick Daly5ba28112016-08-30 19:18:52 -07001398 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001399}
1400
1401static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1402{
1403 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1404 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001405 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001406
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001407 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001408 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001409
Will Deacon45ae7cf2013-06-24 18:31:25 +01001410 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1411 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1412 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1413 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1414
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001415 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001416 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001417 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001418 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001419
Will Deacon45ae7cf2013-06-24 18:31:25 +01001420 dev_err_ratelimited(smmu->dev,
1421 "Unexpected global fault, this could be serious\n");
1422 dev_err_ratelimited(smmu->dev,
1423 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1424 gfsr, gfsynr0, gfsynr1, gfsynr2);
1425
1426 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001427 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001428 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001429}
1430
Will Deacon518f7132014-11-14 17:17:54 +00001431static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1432 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001433{
1434 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001435 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001436 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001437 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1438 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001439 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001440
Will Deacon45ae7cf2013-06-24 18:31:25 +01001441 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001442 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1443 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001444
Will Deacon4a1c93c2015-03-04 12:21:03 +00001445 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001446 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1447 reg = CBA2R_RW64_64BIT;
1448 else
1449 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001450 /* 16-bit VMIDs live in CBA2R */
1451 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001452 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001453
Will Deacon4a1c93c2015-03-04 12:21:03 +00001454 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1455 }
1456
Will Deacon45ae7cf2013-06-24 18:31:25 +01001457 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001458 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001459 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001460 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001461
Will Deacon57ca90f2014-02-06 14:59:05 +00001462 /*
1463 * Use the weakest shareability/memory types, so they are
1464 * overridden by the ttbcr/pte.
1465 */
1466 if (stage1) {
1467 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1468 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001469 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1470 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001471 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001472 }
Will Deacon44680ee2014-06-25 11:29:12 +01001473 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001474
Will Deacon518f7132014-11-14 17:17:54 +00001475 /* TTBRs */
1476 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001477 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001478
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001479 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001480 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001481
1482 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001483 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001484 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001485 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001486 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001487 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001488 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001489
Will Deacon518f7132014-11-14 17:17:54 +00001490 /* TTBCR */
1491 if (stage1) {
1492 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1493 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
1494 if (smmu->version > ARM_SMMU_V1) {
1495 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +01001496 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +00001497 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001498 }
1499 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001500 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1501 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001502 }
1503
Will Deacon518f7132014-11-14 17:17:54 +00001504 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001505 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +00001506 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001507 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +00001508 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1509 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001510 }
1511
Will Deacon45ae7cf2013-06-24 18:31:25 +01001512 /* SCTLR */
Patrick Dalye62d3362016-03-15 18:58:28 -07001513 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_EAE_SBOP;
1514
1515 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1516 !stage1)
1517 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001518 if (stage1)
1519 reg |= SCTLR_S1_ASIDPNE;
1520#ifdef __BIG_ENDIAN
1521 reg |= SCTLR_E;
1522#endif
Will Deacon25724842013-08-21 13:49:53 +01001523 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001524}
1525
Patrick Dalyc190d932016-08-30 17:23:28 -07001526static int arm_smmu_init_asid(struct iommu_domain *domain,
1527 struct arm_smmu_device *smmu)
1528{
1529 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1530 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1531 bool dynamic = is_dynamic_domain(domain);
1532 int ret;
1533
1534 if (!dynamic) {
1535 cfg->asid = cfg->cbndx + 1;
1536 } else {
1537 mutex_lock(&smmu->idr_mutex);
1538 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1539 smmu->num_context_banks + 2,
1540 MAX_ASID + 1, GFP_KERNEL);
1541
1542 mutex_unlock(&smmu->idr_mutex);
1543 if (ret < 0) {
1544 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1545 ret);
1546 return ret;
1547 }
1548 cfg->asid = ret;
1549 }
1550 return 0;
1551}
1552
1553static void arm_smmu_free_asid(struct iommu_domain *domain)
1554{
1555 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1556 struct arm_smmu_device *smmu = smmu_domain->smmu;
1557 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1558 bool dynamic = is_dynamic_domain(domain);
1559
1560 if (cfg->asid == INVALID_ASID || !dynamic)
1561 return;
1562
1563 mutex_lock(&smmu->idr_mutex);
1564 idr_remove(&smmu->asid_idr, cfg->asid);
1565 mutex_unlock(&smmu->idr_mutex);
1566}
1567
Will Deacon45ae7cf2013-06-24 18:31:25 +01001568static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001569 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001570{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001571 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001572 unsigned long ias, oas;
1573 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001574 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001575 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001576 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001577 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyc190d932016-08-30 17:23:28 -07001578 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001579
Will Deacon518f7132014-11-14 17:17:54 +00001580 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001581 if (smmu_domain->smmu)
1582 goto out_unlock;
1583
Patrick Dalyc190d932016-08-30 17:23:28 -07001584 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1585 smmu_domain->cfg.asid = INVALID_ASID;
1586
Robin Murphy98006992016-04-20 14:53:33 +01001587 /* We're bypassing these SIDs, so don't allocate an actual context */
1588 if (domain->type == IOMMU_DOMAIN_DMA) {
1589 smmu_domain->smmu = smmu;
1590 goto out_unlock;
1591 }
1592
Patrick Dalyc190d932016-08-30 17:23:28 -07001593 dynamic = is_dynamic_domain(domain);
1594 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1595 dev_err(smmu->dev, "dynamic domains not supported\n");
1596 ret = -EPERM;
1597 goto out_unlock;
1598 }
1599
Will Deaconc752ce42014-06-25 22:46:31 +01001600 /*
1601 * Mapping the requested stage onto what we support is surprisingly
1602 * complicated, mainly because the spec allows S1+S2 SMMUs without
1603 * support for nested translation. That means we end up with the
1604 * following table:
1605 *
1606 * Requested Supported Actual
1607 * S1 N S1
1608 * S1 S1+S2 S1
1609 * S1 S2 S2
1610 * S1 S1 S1
1611 * N N N
1612 * N S1+S2 S2
1613 * N S2 S2
1614 * N S1 S1
1615 *
1616 * Note that you can't actually request stage-2 mappings.
1617 */
1618 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1619 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1620 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1621 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1622
Robin Murphy7602b872016-04-28 17:12:09 +01001623 /*
1624 * Choosing a suitable context format is even more fiddly. Until we
1625 * grow some way for the caller to express a preference, and/or move
1626 * the decision into the io-pgtable code where it arguably belongs,
1627 * just aim for the closest thing to the rest of the system, and hope
1628 * that the hardware isn't esoteric enough that we can't assume AArch64
1629 * support to be a superset of AArch32 support...
1630 */
1631 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1632 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1633 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1634 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1635 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1636 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1637 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1638
1639 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1640 ret = -EINVAL;
1641 goto out_unlock;
1642 }
1643
Will Deaconc752ce42014-06-25 22:46:31 +01001644 switch (smmu_domain->stage) {
1645 case ARM_SMMU_DOMAIN_S1:
1646 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1647 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001648 ias = smmu->va_size;
1649 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001650 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001651 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001652 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001653 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001654 ias = min(ias, 32UL);
1655 oas = min(oas, 40UL);
1656 }
Will Deaconc752ce42014-06-25 22:46:31 +01001657 break;
1658 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001659 /*
1660 * We will likely want to change this if/when KVM gets
1661 * involved.
1662 */
Will Deaconc752ce42014-06-25 22:46:31 +01001663 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001664 cfg->cbar = CBAR_TYPE_S2_TRANS;
1665 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001666 ias = smmu->ipa_size;
1667 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001668 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001669 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001670 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001671 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001672 ias = min(ias, 40UL);
1673 oas = min(oas, 40UL);
1674 }
Will Deaconc752ce42014-06-25 22:46:31 +01001675 break;
1676 default:
1677 ret = -EINVAL;
1678 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001679 }
1680
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001681 if (is_fast)
1682 fmt = ARM_V8L_FAST;
1683
1684
Patrick Dalyc190d932016-08-30 17:23:28 -07001685 /* Dynamic domains must set cbndx through domain attribute */
1686 if (!dynamic) {
1687 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001688 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001689 if (ret < 0)
1690 goto out_unlock;
1691 cfg->cbndx = ret;
1692 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001693 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001694 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1695 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001696 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001697 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001698 }
1699
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001700 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001701 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001702 .ias = ias,
1703 .oas = oas,
1704 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001705 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001706 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001707
Will Deacon518f7132014-11-14 17:17:54 +00001708 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001709 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1710 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001711 if (!pgtbl_ops) {
1712 ret = -ENOMEM;
1713 goto out_clear_smmu;
1714 }
1715
Patrick Dalyc11d1082016-09-01 15:52:44 -07001716 /*
1717 * assign any page table memory that might have been allocated
1718 * during alloc_io_pgtable_ops
1719 */
Patrick Dalye271f212016-10-04 13:24:49 -07001720 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001721 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001722 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001723
Robin Murphyd5466352016-05-09 17:20:09 +01001724 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001725 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001726
Patrick Dalyc190d932016-08-30 17:23:28 -07001727 /* Assign an asid */
1728 ret = arm_smmu_init_asid(domain, smmu);
1729 if (ret)
1730 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001731
Patrick Dalyc190d932016-08-30 17:23:28 -07001732 if (!dynamic) {
1733 /* Initialise the context bank with our page table cfg */
1734 arm_smmu_init_context_bank(smmu_domain,
1735 &smmu_domain->pgtbl_cfg);
1736
1737 /*
1738 * Request context fault interrupt. Do this last to avoid the
1739 * handler seeing a half-initialised domain state.
1740 */
1741 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1742 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001743 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1744 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001745 if (ret < 0) {
1746 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1747 cfg->irptndx, irq);
1748 cfg->irptndx = INVALID_IRPTNDX;
1749 goto out_clear_smmu;
1750 }
1751 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001752 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001753 }
Will Deacon518f7132014-11-14 17:17:54 +00001754 mutex_unlock(&smmu_domain->init_mutex);
1755
1756 /* Publish page table ops for map/unmap */
1757 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001758 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759
Will Deacon518f7132014-11-14 17:17:54 +00001760out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001761 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001762 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001763out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001764 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001765 return ret;
1766}
1767
Patrick Daly77db4f92016-10-14 15:34:10 -07001768static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
1769{
1770 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1771 smmu_domain->cfg.cbndx = INVALID_CBNDX;
1772 smmu_domain->secure_vmid = VMID_INVAL;
1773}
1774
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1776{
Joerg Roedel1d672632015-03-26 13:43:10 +01001777 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001778 struct arm_smmu_device *smmu = smmu_domain->smmu;
1779 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001780 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001781 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001782 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001783 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001784
Robin Murphy98006992016-04-20 14:53:33 +01001785 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001786 return;
1787
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001788 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001789 if (ret) {
1790 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1791 smmu);
1792 return;
1793 }
1794
Patrick Dalyc190d932016-08-30 17:23:28 -07001795 dynamic = is_dynamic_domain(domain);
1796 if (dynamic) {
1797 arm_smmu_free_asid(domain);
1798 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001799 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001800 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001801 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001802 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001803 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Daly77db4f92016-10-14 15:34:10 -07001804 arm_smmu_domain_reinit(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001805 return;
1806 }
1807
Will Deacon518f7132014-11-14 17:17:54 +00001808 /*
1809 * Disable the context bank and free the page tables before freeing
1810 * it.
1811 */
Will Deacon44680ee2014-06-25 11:29:12 +01001812 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001813 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001814
Will Deacon44680ee2014-06-25 11:29:12 +01001815 if (cfg->irptndx != INVALID_IRPTNDX) {
1816 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001817 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001818 }
1819
Markus Elfring44830b02015-11-06 18:32:41 +01001820 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001821 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001822 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001823 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001824 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001825 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001826
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001827 arm_smmu_power_off(smmu->pwr);
Patrick Daly77db4f92016-10-14 15:34:10 -07001828 arm_smmu_domain_reinit(smmu_domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001829}
1830
Joerg Roedel1d672632015-03-26 13:43:10 +01001831static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001832{
1833 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001834
Patrick Daly09801312016-08-29 17:02:52 -07001835 /* Do not support DOMAIN_DMA for now */
1836 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001837 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001838 /*
1839 * Allocate the domain and initialise some of its data structures.
1840 * We can't really do anything meaningful until we've added a
1841 * master.
1842 */
1843 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1844 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001845 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001846
Robin Murphy9adb9592016-01-26 18:06:36 +00001847 if (type == IOMMU_DOMAIN_DMA &&
1848 iommu_get_dma_cookie(&smmu_domain->domain)) {
1849 kfree(smmu_domain);
1850 return NULL;
1851 }
1852
Will Deacon518f7132014-11-14 17:17:54 +00001853 mutex_init(&smmu_domain->init_mutex);
1854 spin_lock_init(&smmu_domain->pgtbl_lock);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001855 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1856 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001857 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001858 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Patrick Daly77db4f92016-10-14 15:34:10 -07001859 arm_smmu_domain_reinit(smmu_domain);
Joerg Roedel1d672632015-03-26 13:43:10 +01001860
1861 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001862}
1863
Joerg Roedel1d672632015-03-26 13:43:10 +01001864static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001865{
Joerg Roedel1d672632015-03-26 13:43:10 +01001866 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001867
1868 /*
1869 * Free the domain resources. We assume that all devices have
1870 * already been detached.
1871 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001872 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001873 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001874 kfree(smmu_domain);
1875}
1876
1877static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001878 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001879{
1880 int i;
1881 struct arm_smmu_smr *smrs;
1882 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1883
1884 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1885 return 0;
1886
Will Deacona9a1b0b2014-05-01 18:05:08 +01001887 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001888 return -EEXIST;
1889
Mitchel Humpherys29073202014-07-08 09:52:18 -07001890 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001891 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001892 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1893 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001894 return -ENOMEM;
1895 }
1896
Will Deacon44680ee2014-06-25 11:29:12 +01001897 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001898 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001899 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1900 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001901 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001902 dev_err(smmu->dev, "failed to allocate free SMR\n");
1903 goto err_free_smrs;
1904 }
1905
1906 smrs[i] = (struct arm_smmu_smr) {
1907 .idx = idx,
1908 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001909 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001910 };
1911 }
1912
1913 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001914 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001915 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1916 smrs[i].mask << SMR_MASK_SHIFT;
1917 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1918 }
1919
Will Deacona9a1b0b2014-05-01 18:05:08 +01001920 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001921 return 0;
1922
1923err_free_smrs:
1924 while (--i >= 0)
1925 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1926 kfree(smrs);
1927 return -ENOSPC;
1928}
1929
1930static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001931 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001932{
1933 int i;
1934 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001935 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001936
Will Deacon43b412b2014-07-15 11:22:24 +01001937 if (!smrs)
1938 return;
1939
Will Deacon45ae7cf2013-06-24 18:31:25 +01001940 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001941 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001942 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001943
Will Deacon45ae7cf2013-06-24 18:31:25 +01001944 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1945 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1946 }
1947
Will Deacona9a1b0b2014-05-01 18:05:08 +01001948 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001949 kfree(smrs);
1950}
1951
Will Deacon45ae7cf2013-06-24 18:31:25 +01001952static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001953 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954{
1955 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001956 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001957 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1958
Will Deacon5f634952016-04-20 14:53:32 +01001959 /*
1960 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1961 * for all devices behind the SMMU. Note that we need to take
1962 * care configuring SMRs for devices both a platform_device and
1963 * and a PCI device (i.e. a PCI host controller)
1964 */
1965 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1966 return 0;
1967
Will Deacon8f68f8e2014-07-15 11:27:08 +01001968 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001969 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001970 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001971 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972
Will Deacona9a1b0b2014-05-01 18:05:08 +01001973 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001974 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001975
Will Deacona9a1b0b2014-05-01 18:05:08 +01001976 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Patrick Dalyf4930442016-06-27 20:50:14 -07001977 s2cr = S2CR_TYPE_TRANS |
Will Deacon44680ee2014-06-25 11:29:12 +01001978 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001979 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1980 }
1981
1982 return 0;
1983}
1984
1985static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001986 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001987{
Will Deacon43b412b2014-07-15 11:22:24 +01001988 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001989 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001990 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001991
Will Deacon8f68f8e2014-07-15 11:27:08 +01001992 /* An IOMMU group is torn down by the first device to be removed */
1993 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1994 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001995
1996 /*
1997 * We *must* clear the S2CR first, because freeing the SMR means
1998 * that it can be re-allocated immediately.
1999 */
Will Deacon43b412b2014-07-15 11:22:24 +01002000 for (i = 0; i < cfg->num_streamids; ++i) {
2001 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00002002 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01002003
Robin Murphy25a1c962016-02-10 14:25:33 +00002004 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01002005 }
2006
Will Deacona9a1b0b2014-05-01 18:05:08 +01002007 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002008}
2009
Patrick Daly09801312016-08-29 17:02:52 -07002010static void arm_smmu_detach_dev(struct iommu_domain *domain,
2011 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002012{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002013 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002014 struct arm_smmu_device *smmu = smmu_domain->smmu;
2015 struct arm_smmu_master_cfg *cfg;
2016 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002017 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002018
2019 if (dynamic)
2020 return;
2021
2022 cfg = find_smmu_master_cfg(dev);
2023 if (!cfg)
2024 return;
2025
2026 if (!smmu) {
2027 dev_err(dev, "Domain not attached; cannot detach!\n");
2028 return;
2029 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002030
2031 dev->archdata.iommu = NULL;
2032 arm_smmu_domain_remove_master(smmu_domain, cfg);
Patrick Daly8befb662016-08-17 20:03:28 -07002033
2034 /* Remove additional vote for atomic power */
2035 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002036 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
2037 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002038 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002039}
2040
Patrick Dalye271f212016-10-04 13:24:49 -07002041static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002042{
Patrick Dalye271f212016-10-04 13:24:49 -07002043 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002044 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2045 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2046 int source_vmid = VMID_HLOS;
2047 struct arm_smmu_pte_info *pte_info, *temp;
2048
Patrick Dalye271f212016-10-04 13:24:49 -07002049 if (!arm_smmu_is_domain_secure(smmu_domain))
2050 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002051
Patrick Dalye271f212016-10-04 13:24:49 -07002052 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002053 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2054 PAGE_SIZE, &source_vmid, 1,
2055 dest_vmids, dest_perms, 2);
2056 if (WARN_ON(ret))
2057 break;
2058 }
2059
2060 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2061 entry) {
2062 list_del(&pte_info->entry);
2063 kfree(pte_info);
2064 }
Patrick Dalye271f212016-10-04 13:24:49 -07002065 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002066}
2067
2068static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2069{
2070 int ret;
2071 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002072 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002073 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2074 struct arm_smmu_pte_info *pte_info, *temp;
2075
Patrick Dalye271f212016-10-04 13:24:49 -07002076 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002077 return;
2078
2079 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2080 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2081 PAGE_SIZE, source_vmlist, 2,
2082 &dest_vmids, &dest_perms, 1);
2083 if (WARN_ON(ret))
2084 break;
2085 free_pages_exact(pte_info->virt_addr, pte_info->size);
2086 }
2087
2088 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2089 entry) {
2090 list_del(&pte_info->entry);
2091 kfree(pte_info);
2092 }
2093}
2094
2095static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2096{
2097 struct arm_smmu_domain *smmu_domain = cookie;
2098 struct arm_smmu_pte_info *pte_info;
2099
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002100 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002101
2102 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2103 if (!pte_info)
2104 return;
2105
2106 pte_info->virt_addr = addr;
2107 pte_info->size = size;
2108 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2109}
2110
2111static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2112{
2113 struct arm_smmu_domain *smmu_domain = cookie;
2114 struct arm_smmu_pte_info *pte_info;
2115
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002116 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002117
2118 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2119 if (!pte_info)
2120 return -ENOMEM;
2121 pte_info->virt_addr = addr;
2122 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2123 return 0;
2124}
2125
Will Deacon45ae7cf2013-06-24 18:31:25 +01002126static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2127{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002128 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01002129 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002130 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002131 struct arm_smmu_master_cfg *cfg;
Patrick Daly8befb662016-08-17 20:03:28 -07002132 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002133
Will Deacon8f68f8e2014-07-15 11:27:08 +01002134 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01002135 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002136 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2137 return -ENXIO;
2138 }
2139
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002140 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002141 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002142 if (ret)
2143 return ret;
2144
Will Deacon518f7132014-11-14 17:17:54 +00002145 /* Ensure that the domain is finalised */
2146 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002147 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002148 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002149
Patrick Dalyc190d932016-08-30 17:23:28 -07002150 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002151 if (is_dynamic_domain(domain)) {
2152 ret = 0;
2153 goto out_power_off;
2154 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002155
Will Deacon45ae7cf2013-06-24 18:31:25 +01002156 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002157 * Sanity check the domain. We don't support domains across
2158 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002159 */
Will Deacon518f7132014-11-14 17:17:54 +00002160 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002161 dev_err(dev,
2162 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002163 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002164 ret = -EINVAL;
2165 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002166 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002167
2168 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01002169 cfg = find_smmu_master_cfg(dev);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002170 if (!cfg) {
2171 ret = -ENODEV;
2172 goto out_power_off;
2173 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002174
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002175 /* Detach the dev from its current domain */
2176 if (dev->archdata.iommu)
Patrick Daly09801312016-08-29 17:02:52 -07002177 arm_smmu_detach_dev(dev->archdata.iommu, dev);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002178
Will Deacon844e35b2014-07-17 11:23:51 +01002179 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
2180 if (!ret)
2181 dev->archdata.iommu = domain;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002182
2183out_power_off:
Patrick Daly466237d2016-10-14 15:59:14 -07002184 /*
2185 * Keep an additional vote for non-atomic power until domain is
2186 * detached
2187 */
2188 if (!ret && atomic_domain) {
2189 WARN_ON(arm_smmu_power_on(smmu->pwr));
2190 arm_smmu_power_off_atomic(smmu->pwr);
2191 }
2192
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002193 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002194
Will Deacon45ae7cf2013-06-24 18:31:25 +01002195 return ret;
2196}
2197
Will Deacon45ae7cf2013-06-24 18:31:25 +01002198static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002199 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002200{
Will Deacon518f7132014-11-14 17:17:54 +00002201 int ret;
2202 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002203 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002204 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002205
Will Deacon518f7132014-11-14 17:17:54 +00002206 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002207 return -ENODEV;
2208
Patrick Dalye271f212016-10-04 13:24:49 -07002209 arm_smmu_secure_domain_lock(smmu_domain);
2210
Will Deacon518f7132014-11-14 17:17:54 +00002211 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2212 ret = ops->map(ops, iova, paddr, size, prot);
2213 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002214
2215 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002216 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002217
Will Deacon518f7132014-11-14 17:17:54 +00002218 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002219}
2220
2221static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2222 size_t size)
2223{
Will Deacon518f7132014-11-14 17:17:54 +00002224 size_t ret;
2225 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002226 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002227 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002228
Will Deacon518f7132014-11-14 17:17:54 +00002229 if (!ops)
2230 return 0;
2231
Patrick Daly8befb662016-08-17 20:03:28 -07002232 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002233 if (ret)
2234 return ret;
2235
Patrick Dalye271f212016-10-04 13:24:49 -07002236 arm_smmu_secure_domain_lock(smmu_domain);
2237
Will Deacon518f7132014-11-14 17:17:54 +00002238 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2239 ret = ops->unmap(ops, iova, size);
2240 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002241
Patrick Daly8befb662016-08-17 20:03:28 -07002242 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002243 /*
2244 * While splitting up block mappings, we might allocate page table
2245 * memory during unmap, so the vmids needs to be assigned to the
2246 * memory here as well.
2247 */
2248 arm_smmu_assign_table(smmu_domain);
2249 /* Also unassign any pages that were free'd during unmap */
2250 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002251 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002252 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002253}
2254
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002255static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2256 struct scatterlist *sg, unsigned int nents, int prot)
2257{
2258 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002259 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002260 unsigned long flags;
2261 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2262 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2263
2264 if (!ops)
2265 return -ENODEV;
2266
Patrick Daly8befb662016-08-17 20:03:28 -07002267 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002268 if (ret)
2269 return ret;
2270
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002271 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002272 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002273 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002274
2275 if (!ret)
2276 arm_smmu_unmap(domain, iova, size);
2277
Patrick Daly8befb662016-08-17 20:03:28 -07002278 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002279 arm_smmu_assign_table(smmu_domain);
2280
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002281 return ret;
2282}
2283
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002284static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002285 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002286{
Joerg Roedel1d672632015-03-26 13:43:10 +01002287 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002288 struct arm_smmu_device *smmu = smmu_domain->smmu;
2289 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2290 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2291 struct device *dev = smmu->dev;
2292 void __iomem *cb_base;
2293 u32 tmp;
2294 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002295 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002296
2297 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2298
Robin Murphy661d9622015-05-27 17:09:34 +01002299 /* ATS1 registers can only be written atomically */
2300 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002301 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002302 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2303 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002304 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002305
2306 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2307 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002308 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002309 dev_err(dev,
2310 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2311 &iova, &phys);
2312 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002313 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002314 }
2315
Robin Murphyf9a05f02016-04-13 18:13:01 +01002316 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002317 if (phys & CB_PAR_F) {
2318 dev_err(dev, "translation fault!\n");
2319 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002320 phys = 0;
2321 } else {
2322 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002323 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002324
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002325 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002326}
2327
Will Deacon45ae7cf2013-06-24 18:31:25 +01002328static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002329 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002330{
Will Deacon518f7132014-11-14 17:17:54 +00002331 phys_addr_t ret;
2332 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002333 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002334 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002335
Will Deacon518f7132014-11-14 17:17:54 +00002336 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002337 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002338
Will Deacon518f7132014-11-14 17:17:54 +00002339 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002340 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002341 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002342
Will Deacon518f7132014-11-14 17:17:54 +00002343 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002344}
2345
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002346/*
2347 * This function can sleep, and cannot be called from atomic context. Will
2348 * power on register block if required. This restriction does not apply to the
2349 * original iova_to_phys() op.
2350 */
2351static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2352 dma_addr_t iova)
2353{
2354 phys_addr_t ret = 0;
2355 unsigned long flags;
2356 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002357
Patrick Dalyad441dd2016-09-15 15:50:46 -07002358 if (smmu_domain->smmu->arch_ops &&
2359 smmu_domain->smmu->arch_ops->iova_to_phys_hard)
2360 return smmu_domain->smmu->arch_ops->iova_to_phys_hard(
2361 domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002362
2363 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2364 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2365 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002366 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002367
2368 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2369
2370 return ret;
2371}
2372
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002373static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002374{
Will Deacond0948942014-06-24 17:30:10 +01002375 switch (cap) {
2376 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002377 /*
2378 * Return true here as the SMMU can always send out coherent
2379 * requests.
2380 */
2381 return true;
Will Deacond0948942014-06-24 17:30:10 +01002382 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002383 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002384 case IOMMU_CAP_NOEXEC:
2385 return true;
Will Deacond0948942014-06-24 17:30:10 +01002386 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002387 return false;
Will Deacond0948942014-06-24 17:30:10 +01002388 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002389}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002390
Will Deacona9a1b0b2014-05-01 18:05:08 +01002391static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
2392{
2393 *((u16 *)data) = alias;
2394 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002395}
2396
Will Deacon8f68f8e2014-07-15 11:27:08 +01002397static void __arm_smmu_release_pci_iommudata(void *data)
2398{
2399 kfree(data);
2400}
2401
Joerg Roedelaf659932015-10-21 23:51:41 +02002402static int arm_smmu_init_pci_device(struct pci_dev *pdev,
2403 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002404{
Will Deacon03edb222015-01-19 14:27:33 +00002405 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02002406 u16 sid;
2407 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002408
Will Deacon03edb222015-01-19 14:27:33 +00002409 cfg = iommu_group_get_iommudata(group);
2410 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01002411 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002412 if (!cfg)
2413 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002414
Will Deacon03edb222015-01-19 14:27:33 +00002415 iommu_group_set_iommudata(group, cfg,
2416 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01002417 }
2418
Joerg Roedelaf659932015-10-21 23:51:41 +02002419 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
2420 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002421
Will Deacon03edb222015-01-19 14:27:33 +00002422 /*
2423 * Assume Stream ID == Requester ID for now.
2424 * We need a way to describe the ID mappings in FDT.
2425 */
2426 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
2427 for (i = 0; i < cfg->num_streamids; ++i)
2428 if (cfg->streamids[i] == sid)
2429 break;
2430
2431 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
2432 if (i == cfg->num_streamids)
2433 cfg->streamids[cfg->num_streamids++] = sid;
2434
2435 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002436}
2437
Joerg Roedelaf659932015-10-21 23:51:41 +02002438static int arm_smmu_init_platform_device(struct device *dev,
2439 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00002440{
Will Deacon03edb222015-01-19 14:27:33 +00002441 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002442 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00002443
2444 if (!smmu)
2445 return -ENODEV;
2446
2447 master = find_smmu_master(smmu, dev->of_node);
2448 if (!master)
2449 return -ENODEV;
2450
Will Deacon03edb222015-01-19 14:27:33 +00002451 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002452
2453 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002454}
2455
2456static int arm_smmu_add_device(struct device *dev)
2457{
Joerg Roedelaf659932015-10-21 23:51:41 +02002458 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00002459
Joerg Roedelaf659932015-10-21 23:51:41 +02002460 group = iommu_group_get_for_dev(dev);
2461 if (IS_ERR(group))
2462 return PTR_ERR(group);
2463
Peng Fan9a4a9d82015-11-20 16:56:18 +08002464 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02002465 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002466}
2467
Will Deacon45ae7cf2013-06-24 18:31:25 +01002468static void arm_smmu_remove_device(struct device *dev)
2469{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002470 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002471}
2472
Joerg Roedelaf659932015-10-21 23:51:41 +02002473static struct iommu_group *arm_smmu_device_group(struct device *dev)
2474{
2475 struct iommu_group *group;
2476 int ret;
2477
2478 if (dev_is_pci(dev))
2479 group = pci_device_group(dev);
2480 else
2481 group = generic_device_group(dev);
2482
2483 if (IS_ERR(group))
2484 return group;
2485
2486 if (dev_is_pci(dev))
2487 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
2488 else
2489 ret = arm_smmu_init_platform_device(dev, group);
2490
2491 if (ret) {
2492 iommu_group_put(group);
2493 group = ERR_PTR(ret);
2494 }
2495
2496 return group;
2497}
2498
Will Deaconc752ce42014-06-25 22:46:31 +01002499static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2500 enum iommu_attr attr, void *data)
2501{
Joerg Roedel1d672632015-03-26 13:43:10 +01002502 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002503 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002504
2505 switch (attr) {
2506 case DOMAIN_ATTR_NESTING:
2507 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2508 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002509 case DOMAIN_ATTR_PT_BASE_ADDR:
2510 *((phys_addr_t *)data) =
2511 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2512 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002513 case DOMAIN_ATTR_CONTEXT_BANK:
2514 /* context bank index isn't valid until we are attached */
2515 if (smmu_domain->smmu == NULL)
2516 return -ENODEV;
2517
2518 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2519 ret = 0;
2520 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002521 case DOMAIN_ATTR_TTBR0: {
2522 u64 val;
2523 struct arm_smmu_device *smmu = smmu_domain->smmu;
2524 /* not valid until we are attached */
2525 if (smmu == NULL)
2526 return -ENODEV;
2527
2528 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2529 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2530 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2531 << (TTBRn_ASID_SHIFT);
2532 *((u64 *)data) = val;
2533 ret = 0;
2534 break;
2535 }
2536 case DOMAIN_ATTR_CONTEXTIDR:
2537 /* not valid until attached */
2538 if (smmu_domain->smmu == NULL)
2539 return -ENODEV;
2540 *((u32 *)data) = smmu_domain->cfg.procid;
2541 ret = 0;
2542 break;
2543 case DOMAIN_ATTR_PROCID:
2544 *((u32 *)data) = smmu_domain->cfg.procid;
2545 ret = 0;
2546 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002547 case DOMAIN_ATTR_DYNAMIC:
2548 *((int *)data) = !!(smmu_domain->attributes
2549 & (1 << DOMAIN_ATTR_DYNAMIC));
2550 ret = 0;
2551 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002552 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2553 *((int *)data) = !!(smmu_domain->attributes
2554 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2555 ret = 0;
2556 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002557 case DOMAIN_ATTR_S1_BYPASS:
2558 *((int *)data) = !!(smmu_domain->attributes
2559 & (1 << DOMAIN_ATTR_S1_BYPASS));
2560 ret = 0;
2561 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002562 case DOMAIN_ATTR_SECURE_VMID:
2563 *((int *)data) = smmu_domain->secure_vmid;
2564 ret = 0;
2565 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002566 case DOMAIN_ATTR_PGTBL_INFO: {
2567 struct iommu_pgtbl_info *info = data;
2568
2569 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2570 ret = -ENODEV;
2571 break;
2572 }
2573 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2574 ret = 0;
2575 break;
2576 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002577 case DOMAIN_ATTR_FAST:
2578 *((int *)data) = !!(smmu_domain->attributes
2579 & (1 << DOMAIN_ATTR_FAST));
2580 ret = 0;
2581 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002582 default:
2583 return -ENODEV;
2584 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002585 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002586}
2587
2588static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2589 enum iommu_attr attr, void *data)
2590{
Will Deacon518f7132014-11-14 17:17:54 +00002591 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002592 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002593
Will Deacon518f7132014-11-14 17:17:54 +00002594 mutex_lock(&smmu_domain->init_mutex);
2595
Will Deaconc752ce42014-06-25 22:46:31 +01002596 switch (attr) {
2597 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002598 if (smmu_domain->smmu) {
2599 ret = -EPERM;
2600 goto out_unlock;
2601 }
2602
Will Deaconc752ce42014-06-25 22:46:31 +01002603 if (*(int *)data)
2604 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2605 else
2606 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2607
Will Deacon518f7132014-11-14 17:17:54 +00002608 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002609 case DOMAIN_ATTR_PROCID:
2610 if (smmu_domain->smmu != NULL) {
2611 dev_err(smmu_domain->smmu->dev,
2612 "cannot change procid attribute while attached\n");
2613 ret = -EBUSY;
2614 break;
2615 }
2616 smmu_domain->cfg.procid = *((u32 *)data);
2617 ret = 0;
2618 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002619 case DOMAIN_ATTR_DYNAMIC: {
2620 int dynamic = *((int *)data);
2621
2622 if (smmu_domain->smmu != NULL) {
2623 dev_err(smmu_domain->smmu->dev,
2624 "cannot change dynamic attribute while attached\n");
2625 ret = -EBUSY;
2626 break;
2627 }
2628
2629 if (dynamic)
2630 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2631 else
2632 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2633 ret = 0;
2634 break;
2635 }
2636 case DOMAIN_ATTR_CONTEXT_BANK:
2637 /* context bank can't be set while attached */
2638 if (smmu_domain->smmu != NULL) {
2639 ret = -EBUSY;
2640 break;
2641 }
2642 /* ... and it can only be set for dynamic contexts. */
2643 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2644 ret = -EINVAL;
2645 break;
2646 }
2647
2648 /* this will be validated during attach */
2649 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2650 ret = 0;
2651 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002652 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2653 u32 non_fatal_faults = *((int *)data);
2654
2655 if (non_fatal_faults)
2656 smmu_domain->attributes |=
2657 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2658 else
2659 smmu_domain->attributes &=
2660 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2661 ret = 0;
2662 break;
2663 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002664 case DOMAIN_ATTR_S1_BYPASS: {
2665 int bypass = *((int *)data);
2666
2667 /* bypass can't be changed while attached */
2668 if (smmu_domain->smmu != NULL) {
2669 ret = -EBUSY;
2670 break;
2671 }
2672 if (bypass)
2673 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2674 else
2675 smmu_domain->attributes &=
2676 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2677
2678 ret = 0;
2679 break;
2680 }
Patrick Daly8befb662016-08-17 20:03:28 -07002681 case DOMAIN_ATTR_ATOMIC:
2682 {
2683 int atomic_ctx = *((int *)data);
2684
2685 /* can't be changed while attached */
2686 if (smmu_domain->smmu != NULL) {
2687 ret = -EBUSY;
2688 break;
2689 }
2690 if (atomic_ctx)
2691 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2692 else
2693 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2694 break;
2695 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002696 case DOMAIN_ATTR_SECURE_VMID:
2697 if (smmu_domain->secure_vmid != VMID_INVAL) {
2698 ret = -ENODEV;
2699 WARN(1, "secure vmid already set!");
2700 break;
2701 }
2702 smmu_domain->secure_vmid = *((int *)data);
2703 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002704 case DOMAIN_ATTR_FAST:
2705 if (*((int *)data))
2706 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2707 ret = 0;
2708 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002709 default:
Will Deacon518f7132014-11-14 17:17:54 +00002710 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002711 }
Will Deacon518f7132014-11-14 17:17:54 +00002712
2713out_unlock:
2714 mutex_unlock(&smmu_domain->init_mutex);
2715 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002716}
2717
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002718static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2719 unsigned long flags)
2720{
2721 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2722 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2723 struct arm_smmu_device *smmu;
2724 void __iomem *cb_base;
2725
2726 if (!smmu_domain->smmu) {
2727 pr_err("Can't trigger faults on non-attached domains\n");
2728 return;
2729 }
2730
2731 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002732 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002733 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002734
2735 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2736 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2737 flags, cfg->cbndx);
2738 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002739 /* give the interrupt time to fire... */
2740 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002741
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002742 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002743}
2744
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002745static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2746 unsigned long offset)
2747{
2748 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2749 struct arm_smmu_device *smmu;
2750 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2751 void __iomem *cb_base;
2752 unsigned long val;
2753
2754 if (offset >= SZ_4K) {
2755 pr_err("Invalid offset: 0x%lx\n", offset);
2756 return 0;
2757 }
2758
2759 smmu = smmu_domain->smmu;
2760 if (!smmu) {
2761 WARN(1, "Can't read registers of a detached domain\n");
2762 val = 0;
2763 return val;
2764 }
2765
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002766 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002767 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002768
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002769 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2770 val = readl_relaxed(cb_base + offset);
2771
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002772 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002773 return val;
2774}
2775
2776static void arm_smmu_reg_write(struct iommu_domain *domain,
2777 unsigned long offset, unsigned long val)
2778{
2779 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2780 struct arm_smmu_device *smmu;
2781 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2782 void __iomem *cb_base;
2783
2784 if (offset >= SZ_4K) {
2785 pr_err("Invalid offset: 0x%lx\n", offset);
2786 return;
2787 }
2788
2789 smmu = smmu_domain->smmu;
2790 if (!smmu) {
2791 WARN(1, "Can't read registers of a detached domain\n");
2792 return;
2793 }
2794
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002795 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002796 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002797
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002798 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2799 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002800
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002801 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002802}
2803
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002804static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
2805{
2806 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
2807}
2808
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002809static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
2810{
2811 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2812
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002813 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002814}
2815
2816static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
2817{
2818 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2819
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002820 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002821}
2822
Will Deacon518f7132014-11-14 17:17:54 +00002823static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002824 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002825 .domain_alloc = arm_smmu_domain_alloc,
2826 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002827 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002828 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002829 .map = arm_smmu_map,
2830 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002831 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002832 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002833 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002834 .add_device = arm_smmu_add_device,
2835 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002836 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002837 .domain_get_attr = arm_smmu_domain_get_attr,
2838 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00002839 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002840 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002841 .reg_read = arm_smmu_reg_read,
2842 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002843 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002844 .enable_config_clocks = arm_smmu_enable_config_clocks,
2845 .disable_config_clocks = arm_smmu_disable_config_clocks,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002846};
2847
Patrick Dalyad441dd2016-09-15 15:50:46 -07002848#define IMPL_DEF1_MICRO_MMU_CTRL 0
2849#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
2850#define MICRO_MMU_CTRL_IDLE (1 << 3)
2851
2852/* Definitions for implementation-defined registers */
2853#define ACTLR_QCOM_OSH_SHIFT 28
2854#define ACTLR_QCOM_OSH 1
2855
2856#define ACTLR_QCOM_ISH_SHIFT 29
2857#define ACTLR_QCOM_ISH 1
2858
2859#define ACTLR_QCOM_NSH_SHIFT 30
2860#define ACTLR_QCOM_NSH 1
2861
2862static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002863{
2864 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002865 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002866
2867 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2868 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2869 0, 30000)) {
2870 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2871 return -EBUSY;
2872 }
2873
2874 return 0;
2875}
2876
Patrick Dalyad441dd2016-09-15 15:50:46 -07002877static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002878{
2879 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2880 u32 reg;
2881
2882 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2883 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2884 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2885
Patrick Dalyad441dd2016-09-15 15:50:46 -07002886 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002887}
2888
Patrick Dalyad441dd2016-09-15 15:50:46 -07002889static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002890{
Patrick Dalyad441dd2016-09-15 15:50:46 -07002891 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002892}
2893
Patrick Dalyad441dd2016-09-15 15:50:46 -07002894static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002895{
Patrick Dalyad441dd2016-09-15 15:50:46 -07002896 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002897}
2898
Patrick Dalyad441dd2016-09-15 15:50:46 -07002899static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002900{
2901 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2902 u32 reg;
2903
2904 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2905 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2906 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2907}
2908
Patrick Dalyad441dd2016-09-15 15:50:46 -07002909static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002910{
2911 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002912 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002913 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002914 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002915
Patrick Dalyad441dd2016-09-15 15:50:46 -07002916 /*
2917 * SCTLR.M must be disabled here per ARM SMMUv2 spec
2918 * to prevent table walks with an inconsistent state.
2919 */
2920 for (i = 0; i < smmu->num_context_banks; ++i) {
2921 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2922 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2923 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2924 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2925 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
2926 }
2927
2928 /* Program implementation defined registers */
2929 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002930 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2931 writel_relaxed(regs[i].value,
2932 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002933 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002934}
2935
Patrick Dalyad441dd2016-09-15 15:50:46 -07002936static phys_addr_t __qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
2937 dma_addr_t iova, bool halt)
2938{
2939 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2940 struct arm_smmu_device *smmu = smmu_domain->smmu;
2941 int ret;
2942 phys_addr_t phys = 0;
2943 unsigned long flags;
2944
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002945 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002946 if (ret)
2947 return 0;
2948
2949 if (halt) {
2950 ret = qsmmuv2_halt(smmu);
2951 if (ret)
2952 goto out_power_off;
2953 }
2954
2955 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2956 spin_lock(&smmu->atos_lock);
2957 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
2958 spin_unlock(&smmu->atos_lock);
2959 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2960
2961 if (halt)
2962 qsmmuv2_resume(smmu);
2963
2964out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002965 arm_smmu_power_off(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002966 return phys;
2967}
2968
2969static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
2970 dma_addr_t iova)
2971{
2972 return __qsmmuv2_iova_to_phys_hard(domain, iova, true);
2973}
2974
2975static void qsmmuv2_iova_to_phys_fault(
2976 struct iommu_domain *domain,
2977 dma_addr_t iova, phys_addr_t *phys,
2978 phys_addr_t *phys_post_tlbiall)
2979{
2980 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2981 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2982 struct arm_smmu_device *smmu;
2983 void __iomem *cb_base;
2984 u64 sctlr, sctlr_orig;
2985 u32 fsr;
2986
2987 smmu = smmu_domain->smmu;
2988 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2989
2990 qsmmuv2_halt_nowait(smmu);
2991
2992 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
2993
2994 qsmmuv2_wait_for_halt(smmu);
2995
2996 /* clear FSR to allow ATOS to log any faults */
2997 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
2998 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
2999
3000 /* disable stall mode momentarily */
3001 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
3002 sctlr = sctlr_orig & ~SCTLR_CFCFG;
3003 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
3004
3005 *phys = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
3006 arm_smmu_tlb_inv_context(smmu_domain);
3007 *phys_post_tlbiall = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
3008
3009 /* restore SCTLR */
3010 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3011
3012 qsmmuv2_resume(smmu);
3013}
3014
3015struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3016 .device_reset = qsmmuv2_device_reset,
3017 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
3018 .iova_to_phys_fault = qsmmuv2_iova_to_phys_fault,
3019};
3020
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003021static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003022{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003023 int i;
3024 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003025 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003026 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003027
Peng Fan3ca37122016-05-03 21:50:30 +08003028 /*
3029 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3030 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3031 * bit is only present in MMU-500r2 onwards.
3032 */
3033 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3034 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3035 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3036 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3037 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3038 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3039 }
3040
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003041 /* Make sure all context banks are disabled and clear CB_FSR */
3042 for (i = 0; i < smmu->num_context_banks; ++i) {
3043 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3044 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3045 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003046 /*
3047 * Disable MMU-500's not-particularly-beneficial next-page
3048 * prefetcher for the sake of errata #841119 and #826419.
3049 */
3050 if (smmu->model == ARM_MMU500) {
3051 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3052 reg &= ~ARM_MMU500_ACTLR_CPRE;
3053 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3054 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003055 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003056}
3057
3058static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3059{
3060 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3061 int i = 0;
3062 u32 reg;
3063
3064 /* clear global FSR */
3065 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3066 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3067
3068 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3069 /*
3070 * Mark all SMRn as invalid and all S2CRn as bypass unless
3071 * overridden
3072 */
3073 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
3074 for (i = 0; i < smmu->num_mapping_groups; ++i) {
3075 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
3076 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
3077 }
3078
3079 arm_smmu_context_bank_reset(smmu);
3080 }
Will Deacon1463fe42013-07-31 19:21:27 +01003081
Will Deacon45ae7cf2013-06-24 18:31:25 +01003082 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003083 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3084 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3085
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003086 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003087
Will Deacon45ae7cf2013-06-24 18:31:25 +01003088 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003089 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003090
3091 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003092 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003093
Robin Murphy25a1c962016-02-10 14:25:33 +00003094 /* Enable client access, handling unmatched streams as appropriate */
3095 reg &= ~sCR0_CLIENTPD;
3096 if (disable_bypass)
3097 reg |= sCR0_USFCFG;
3098 else
3099 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003100
3101 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003102 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003103
3104 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003105 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003106
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003107 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3108 reg |= sCR0_VMID16EN;
3109
Will Deacon45ae7cf2013-06-24 18:31:25 +01003110 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003111 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003112 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003113
3114 /* Manage any implementation defined features */
3115 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003116}
3117
3118static int arm_smmu_id_size_to_bits(int size)
3119{
3120 switch (size) {
3121 case 0:
3122 return 32;
3123 case 1:
3124 return 36;
3125 case 2:
3126 return 40;
3127 case 3:
3128 return 42;
3129 case 4:
3130 return 44;
3131 case 5:
3132 default:
3133 return 48;
3134 }
3135}
3136
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003137static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3138{
3139 struct device *dev = smmu->dev;
3140 int i, ntuples, ret;
3141 u32 *tuples;
3142 struct arm_smmu_impl_def_reg *regs, *regit;
3143
3144 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3145 return 0;
3146
3147 ntuples /= sizeof(u32);
3148 if (ntuples % 2) {
3149 dev_err(dev,
3150 "Invalid number of attach-impl-defs registers: %d\n",
3151 ntuples);
3152 return -EINVAL;
3153 }
3154
3155 regs = devm_kmalloc(
3156 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3157 GFP_KERNEL);
3158 if (!regs)
3159 return -ENOMEM;
3160
3161 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3162 if (!tuples)
3163 return -ENOMEM;
3164
3165 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3166 tuples, ntuples);
3167 if (ret)
3168 return ret;
3169
3170 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3171 regit->offset = tuples[i];
3172 regit->value = tuples[i + 1];
3173 }
3174
3175 devm_kfree(dev, tuples);
3176
3177 smmu->impl_def_attach_registers = regs;
3178 smmu->num_impl_def_attach_registers = ntuples / 2;
3179
3180 return 0;
3181}
3182
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003183
3184static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003185{
3186 const char *cname;
3187 struct property *prop;
3188 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003189 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003190
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003191 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003192 of_property_count_strings(dev->of_node, "clock-names");
3193
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003194 if (pwr->num_clocks < 1) {
3195 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003196 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003197 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003198
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003199 pwr->clocks = devm_kzalloc(
3200 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003201 GFP_KERNEL);
3202
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003203 if (!pwr->clocks)
3204 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003205
3206 i = 0;
3207 of_property_for_each_string(dev->of_node, "clock-names",
3208 prop, cname) {
3209 struct clk *c = devm_clk_get(dev, cname);
3210
3211 if (IS_ERR(c)) {
3212 dev_err(dev, "Couldn't get clock: %s",
3213 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003214 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003215 }
3216
3217 if (clk_get_rate(c) == 0) {
3218 long rate = clk_round_rate(c, 1000);
3219
3220 clk_set_rate(c, rate);
3221 }
3222
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003223 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003224
3225 ++i;
3226 }
3227 return 0;
3228}
3229
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003230static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003231{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003232 const char *cname;
3233 struct property *prop;
3234 int i, ret = 0;
3235 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003236
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003237 pwr->num_gdscs =
3238 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3239
3240 if (pwr->num_gdscs < 1) {
3241 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003242 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003243 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003244
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003245 pwr->gdscs = devm_kzalloc(
3246 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3247
3248 if (!pwr->gdscs)
3249 return -ENOMEM;
3250
3251 i = 0;
3252 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3253 prop, cname)
3254 pwr->gdscs[i].supply = cname;
3255
3256 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3257 return ret;
3258}
3259
3260static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3261{
3262 struct device *dev = pwr->dev;
3263
3264 /* We don't want the bus APIs to print an error message */
3265 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3266 dev_dbg(dev, "No bus scaling info\n");
3267 return 0;
3268 }
3269
3270 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3271 if (!pwr->bus_dt_data) {
3272 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3273 return -EINVAL;
3274 }
3275
3276 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3277 if (!pwr->bus_client) {
3278 dev_err(dev, "Bus client registration failed\n");
3279 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
3280 return -EINVAL;
3281 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003282
3283 return 0;
3284}
3285
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003286/*
3287 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3288 */
3289static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3290 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003291{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003292 struct arm_smmu_power_resources *pwr;
3293 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003294
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003295 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3296 if (!pwr)
3297 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003298
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003299 pwr->dev = &pdev->dev;
3300 pwr->pdev = pdev;
3301 mutex_init(&pwr->power_lock);
3302 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003303
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003304 ret = arm_smmu_init_clocks(pwr);
3305 if (ret)
3306 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003307
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003308 ret = arm_smmu_init_regulators(pwr);
3309 if (ret)
3310 return ERR_PTR(ret);
3311
3312 ret = arm_smmu_init_bus_scaling(pwr);
3313 if (ret)
3314 return ERR_PTR(ret);
3315
3316 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003317}
3318
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003319/*
3320 * Bus APIs are not devm-safe.
3321 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003322static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003323{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003324 msm_bus_scale_unregister_client(pwr->bus_client);
3325 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003326}
3327
Will Deacon45ae7cf2013-06-24 18:31:25 +01003328static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3329{
3330 unsigned long size;
3331 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3332 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003333 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003334
Mitchel Humpherysba822582015-10-20 11:37:41 -07003335 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3336 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003337 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003338
3339 /* ID0 */
3340 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003341
3342 /* Restrict available stages based on module parameter */
3343 if (force_stage == 1)
3344 id &= ~(ID0_S2TS | ID0_NTS);
3345 else if (force_stage == 2)
3346 id &= ~(ID0_S1TS | ID0_NTS);
3347
Will Deacon45ae7cf2013-06-24 18:31:25 +01003348 if (id & ID0_S1TS) {
3349 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003350 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003351 }
3352
3353 if (id & ID0_S2TS) {
3354 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003355 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003356 }
3357
3358 if (id & ID0_NTS) {
3359 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003360 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003361 }
3362
3363 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003364 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003365 dev_err(smmu->dev, "\tno translation support!\n");
3366 return -ENODEV;
3367 }
3368
Robin Murphyb7862e32016-04-13 18:13:03 +01003369 if ((id & ID0_S1TS) &&
3370 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003371 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003372 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003373 }
3374
Robin Murphybae2c2d2015-07-29 19:46:05 +01003375 /*
3376 * In order for DMA API calls to work properly, we must defer to what
3377 * the DT says about coherency, regardless of what the hardware claims.
3378 * Fortunately, this also opens up a workaround for systems where the
3379 * ID register value has ended up configured incorrectly.
3380 */
3381 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3382 cttw_reg = !!(id & ID0_CTTW);
3383 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003384 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003385 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003386 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003387 cttw_dt ? "" : "non-");
3388 if (cttw_dt != cttw_reg)
3389 dev_notice(smmu->dev,
3390 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003391
3392 if (id & ID0_SMS) {
3393 u32 smr, sid, mask;
3394
3395 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
3396 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
3397 ID0_NUMSMRG_MASK;
3398 if (smmu->num_mapping_groups == 0) {
3399 dev_err(smmu->dev,
3400 "stream-matching supported, but no SMRs present!\n");
3401 return -ENODEV;
3402 }
3403
Dhaval Patel031d7462015-05-09 14:47:29 -07003404 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3405 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
3406 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
3407 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3408 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Will Deacon45ae7cf2013-06-24 18:31:25 +01003409
Dhaval Patel031d7462015-05-09 14:47:29 -07003410 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3411 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
3412 if ((mask & sid) != sid) {
3413 dev_err(smmu->dev,
3414 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
3415 mask, sid);
3416 return -ENODEV;
3417 }
3418
Mitchel Humpherysba822582015-10-20 11:37:41 -07003419 dev_dbg(smmu->dev,
Dhaval Patel031d7462015-05-09 14:47:29 -07003420 "\tstream matching with %u register groups, mask 0x%x",
3421 smmu->num_mapping_groups, mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003422 }
Olav Haugan3c8766d2014-08-22 17:12:32 -07003423 } else {
3424 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
3425 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003426 }
3427
Robin Murphy7602b872016-04-28 17:12:09 +01003428 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3429 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3430 if (!(id & ID0_PTFS_NO_AARCH32S))
3431 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3432 }
3433
Will Deacon45ae7cf2013-06-24 18:31:25 +01003434 /* ID1 */
3435 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003436 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003437
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003438 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003439 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003440 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003441 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003442 dev_warn(smmu->dev,
3443 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3444 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003445
Will Deacon518f7132014-11-14 17:17:54 +00003446 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003447 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3448 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3449 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3450 return -ENODEV;
3451 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003452 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003453 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003454 /*
3455 * Cavium CN88xx erratum #27704.
3456 * Ensure ASID and VMID allocation is unique across all SMMUs in
3457 * the system.
3458 */
3459 if (smmu->model == CAVIUM_SMMUV2) {
3460 smmu->cavium_id_base =
3461 atomic_add_return(smmu->num_context_banks,
3462 &cavium_smmu_context_count);
3463 smmu->cavium_id_base -= smmu->num_context_banks;
3464 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003465
3466 /* ID2 */
3467 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3468 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003469 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003470
Will Deacon518f7132014-11-14 17:17:54 +00003471 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003472 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003473 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003474
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003475 if (id & ID2_VMID16)
3476 smmu->features |= ARM_SMMU_FEAT_VMID16;
3477
Robin Murphyf1d84542015-03-04 16:41:05 +00003478 /*
3479 * What the page table walker can address actually depends on which
3480 * descriptor format is in use, but since a) we don't know that yet,
3481 * and b) it can vary per context bank, this will have to do...
3482 */
3483 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3484 dev_warn(smmu->dev,
3485 "failed to set DMA mask for table walker\n");
3486
Robin Murphyb7862e32016-04-13 18:13:03 +01003487 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003488 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003489 if (smmu->version == ARM_SMMU_V1_64K)
3490 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003491 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003492 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003493 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003494 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003495 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003496 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003497 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003498 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003499 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003500 }
3501
Robin Murphy7602b872016-04-28 17:12:09 +01003502 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003503 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003504 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003505 if (smmu->features &
3506 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003507 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003508 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003509 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003510 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003511 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003512
Robin Murphyd5466352016-05-09 17:20:09 +01003513 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3514 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3515 else
3516 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003517 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003518 smmu->pgsize_bitmap);
3519
Will Deacon518f7132014-11-14 17:17:54 +00003520
Will Deacon28d60072014-09-01 16:24:48 +01003521 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003522 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3523 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003524
3525 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003526 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3527 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003528
Will Deacon45ae7cf2013-06-24 18:31:25 +01003529 return 0;
3530}
3531
Patrick Dalyd7476202016-09-08 18:23:28 -07003532static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
3533{
3534 if (!smmu->arch_ops)
3535 return 0;
3536 if (!smmu->arch_ops->init)
3537 return 0;
3538 return smmu->arch_ops->init(smmu);
3539}
3540
3541static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
3542{
3543 if (!smmu->arch_ops)
3544 return;
3545 if (!smmu->arch_ops->device_reset)
3546 return;
3547 return smmu->arch_ops->device_reset(smmu);
3548}
3549
Robin Murphy67b65a32016-04-13 18:12:57 +01003550struct arm_smmu_match_data {
3551 enum arm_smmu_arch_version version;
3552 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003553 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01003554};
3555
Patrick Dalyd7476202016-09-08 18:23:28 -07003556#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
3557static struct arm_smmu_match_data name = { \
3558.version = ver, \
3559.model = imp, \
3560.arch_ops = ops, \
3561} \
Robin Murphy67b65a32016-04-13 18:12:57 +01003562
Patrick Daly1f8a2882016-09-12 17:32:05 -07003563struct arm_smmu_arch_ops qsmmuv500_arch_ops;
3564
Patrick Dalyd7476202016-09-08 18:23:28 -07003565ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
3566ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
3567ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
3568ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
3569ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003570ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Patrick Daly1f8a2882016-09-12 17:32:05 -07003571ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
3572 &qsmmuv500_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01003573
Joerg Roedel09b52692014-10-02 12:24:45 +02003574static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003575 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3576 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3577 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003578 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003579 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003580 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003581 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Patrick Daly1f8a2882016-09-12 17:32:05 -07003582 { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
Robin Murphy09360402014-08-28 17:51:59 +01003583 { },
3584};
3585MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3586
Patrick Daly1f8a2882016-09-12 17:32:05 -07003587static int qsmmuv500_tbu_register(struct device *dev, void *data);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003588static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3589{
Robin Murphy09360402014-08-28 17:51:59 +01003590 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01003591 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003592 struct resource *res;
3593 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003594 struct device *dev = &pdev->dev;
3595 struct rb_node *node;
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -07003596 int num_irqs, i, err, num_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003597
3598 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3599 if (!smmu) {
3600 dev_err(dev, "failed to allocate arm_smmu_device\n");
3601 return -ENOMEM;
3602 }
3603 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003604 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003605 idr_init(&smmu->asid_idr);
3606 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003607
Robin Murphy09360402014-08-28 17:51:59 +01003608 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01003609 data = of_id->data;
3610 smmu->version = data->version;
3611 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003612 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01003613
Will Deacon45ae7cf2013-06-24 18:31:25 +01003614 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003615 smmu->base = devm_ioremap_resource(dev, res);
3616 if (IS_ERR(smmu->base))
3617 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003618 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003619
3620 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3621 &smmu->num_global_irqs)) {
3622 dev_err(dev, "missing #global-interrupts property\n");
3623 return -ENODEV;
3624 }
3625
3626 num_irqs = 0;
3627 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3628 num_irqs++;
3629 if (num_irqs > smmu->num_global_irqs)
3630 smmu->num_context_irqs++;
3631 }
3632
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003633 if (!smmu->num_context_irqs) {
3634 dev_err(dev, "found %d interrupts but expected at least %d\n",
3635 num_irqs, smmu->num_global_irqs + 1);
3636 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003637 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003638
3639 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3640 GFP_KERNEL);
3641 if (!smmu->irqs) {
3642 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3643 return -ENOMEM;
3644 }
3645
3646 for (i = 0; i < num_irqs; ++i) {
3647 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003648
Will Deacon45ae7cf2013-06-24 18:31:25 +01003649 if (irq < 0) {
3650 dev_err(dev, "failed to get irq index %d\n", i);
3651 return -ENODEV;
3652 }
3653 smmu->irqs[i] = irq;
3654 }
3655
Dhaval Patel031d7462015-05-09 14:47:29 -07003656 parse_driver_options(smmu);
3657
Olav Haugan3c8766d2014-08-22 17:12:32 -07003658
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003659 smmu->pwr = arm_smmu_init_power_resources(pdev);
3660 if (IS_ERR(smmu->pwr))
3661 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003662
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003663 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003664 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003665 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003666
3667 err = arm_smmu_device_cfg_probe(smmu);
3668 if (err)
3669 goto out_power_off;
3670
Will Deacon45ae7cf2013-06-24 18:31:25 +01003671 i = 0;
3672 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003673
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -07003674 err = arm_smmu_parse_iommus_properties(smmu, &num_masters);
3675 if (err)
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003676 goto out_put_masters;
3677
Mitchel Humpherysba822582015-10-20 11:37:41 -07003678 dev_dbg(dev, "registered %d master devices\n", num_masters);
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003679
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003680 err = arm_smmu_parse_impl_def_registers(smmu);
3681 if (err)
3682 goto out_put_masters;
3683
Robin Murphyb7862e32016-04-13 18:13:03 +01003684 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003685 smmu->num_context_banks != smmu->num_context_irqs) {
3686 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08003687 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
3688 smmu->num_context_irqs, smmu->num_context_banks,
3689 smmu->num_context_banks);
3690 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003691 }
3692
Will Deacon45ae7cf2013-06-24 18:31:25 +01003693 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003694 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3695 NULL, arm_smmu_global_fault,
3696 IRQF_ONESHOT | IRQF_SHARED,
3697 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003698 if (err) {
3699 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3700 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08003701 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003702 }
3703 }
3704
3705 INIT_LIST_HEAD(&smmu->list);
3706 spin_lock(&arm_smmu_devices_lock);
3707 list_add(&smmu->list, &arm_smmu_devices);
3708 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01003709
Patrick Dalyd7476202016-09-08 18:23:28 -07003710 err = arm_smmu_arch_init(smmu);
3711 if (err)
3712 goto out_put_masters;
3713
Will Deaconfd90cec2013-08-21 13:56:34 +01003714 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003715 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07003716
Will Deacon45ae7cf2013-06-24 18:31:25 +01003717 return 0;
3718
Will Deacon45ae7cf2013-06-24 18:31:25 +01003719out_put_masters:
3720 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003721 struct arm_smmu_master *master
3722 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003723 of_node_put(master->of_node);
3724 }
3725
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003726out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003727 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003728
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003729out_exit_power_resources:
3730 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003731
Will Deacon45ae7cf2013-06-24 18:31:25 +01003732 return err;
3733}
3734
3735static int arm_smmu_device_remove(struct platform_device *pdev)
3736{
3737 int i;
3738 struct device *dev = &pdev->dev;
3739 struct arm_smmu_device *curr, *smmu = NULL;
3740 struct rb_node *node;
3741
3742 spin_lock(&arm_smmu_devices_lock);
3743 list_for_each_entry(curr, &arm_smmu_devices, list) {
3744 if (curr->dev == dev) {
3745 smmu = curr;
3746 list_del(&smmu->list);
3747 break;
3748 }
3749 }
3750 spin_unlock(&arm_smmu_devices_lock);
3751
3752 if (!smmu)
3753 return -ENODEV;
3754
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003755 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003756 return -EINVAL;
3757
Will Deacon45ae7cf2013-06-24 18:31:25 +01003758 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003759 struct arm_smmu_master *master
3760 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003761 of_node_put(master->of_node);
3762 }
3763
Will Deaconecfadb62013-07-31 19:21:28 +01003764 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003765 dev_err(dev, "removing device with active domains!\n");
3766
3767 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08003768 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003769
Patrick Dalyc190d932016-08-30 17:23:28 -07003770 idr_destroy(&smmu->asid_idr);
3771
Will Deacon45ae7cf2013-06-24 18:31:25 +01003772 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003773 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003774 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003775
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003776 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003777
Will Deacon45ae7cf2013-06-24 18:31:25 +01003778 return 0;
3779}
3780
Will Deacon45ae7cf2013-06-24 18:31:25 +01003781static struct platform_driver arm_smmu_driver = {
3782 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003783 .name = "arm-smmu",
3784 .of_match_table = of_match_ptr(arm_smmu_of_match),
3785 },
3786 .probe = arm_smmu_device_dt_probe,
3787 .remove = arm_smmu_device_remove,
3788};
3789
3790static int __init arm_smmu_init(void)
3791{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003792 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003793 int ret;
3794
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003795 /*
3796 * Play nice with systems that don't have an ARM SMMU by checking that
3797 * an ARM SMMU exists in the system before proceeding with the driver
3798 * and IOMMU bus operation registration.
3799 */
3800 np = of_find_matching_node(NULL, arm_smmu_of_match);
3801 if (!np)
3802 return 0;
3803
3804 of_node_put(np);
3805
Will Deacon45ae7cf2013-06-24 18:31:25 +01003806 ret = platform_driver_register(&arm_smmu_driver);
3807 if (ret)
3808 return ret;
3809
3810 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01003811 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003812 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3813
Will Deacond123cf82014-02-04 22:17:53 +00003814#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01003815 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003816 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00003817#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003818
Will Deacona9a1b0b2014-05-01 18:05:08 +01003819#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08003820 if (!iommu_present(&pci_bus_type)) {
3821 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01003822 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08003823 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01003824#endif
3825
Will Deacon45ae7cf2013-06-24 18:31:25 +01003826 return 0;
3827}
3828
3829static void __exit arm_smmu_exit(void)
3830{
3831 return platform_driver_unregister(&arm_smmu_driver);
3832}
3833
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003834subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003835module_exit(arm_smmu_exit);
3836
Patrick Daly1f8a2882016-09-12 17:32:05 -07003837#define DEBUG_SID_HALT_REG 0x0
3838#define DEBUG_SID_HALT_VAL (0x1 << 16)
3839
3840#define DEBUG_SR_HALT_ACK_REG 0x20
3841#define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
3842
3843#define TBU_DBG_TIMEOUT_US 30000
3844
3845struct qsmmuv500_tbu_device {
3846 struct list_head list;
3847 struct device *dev;
3848 struct arm_smmu_device *smmu;
3849 void __iomem *base;
3850 void __iomem *status_reg;
3851
3852 struct arm_smmu_power_resources *pwr;
3853
3854 /* Protects halt count */
3855 spinlock_t halt_lock;
3856 u32 halt_count;
3857};
3858
3859static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
3860{
3861 struct qsmmuv500_tbu_device *tbu;
3862 struct list_head *list = smmu->archdata;
3863 int ret = 0;
3864
3865 list_for_each_entry(tbu, list, list) {
3866 ret = arm_smmu_power_on(tbu->pwr);
3867 if (ret)
3868 break;
3869 }
3870 if (!ret)
3871 return 0;
3872
3873 list_for_each_entry_continue_reverse(tbu, list, list) {
3874 arm_smmu_power_off(tbu->pwr);
3875 }
3876 return ret;
3877}
3878
3879static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
3880{
3881 struct qsmmuv500_tbu_device *tbu;
3882 struct list_head *list = smmu->archdata;
3883
3884 list_for_each_entry_reverse(tbu, list, list) {
3885 arm_smmu_power_off(tbu->pwr);
3886 }
3887}
3888
3889static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
3890{
3891 unsigned long flags;
3892 u32 val;
3893 void __iomem *base;
3894
3895 spin_lock_irqsave(&tbu->halt_lock, flags);
3896 if (tbu->halt_count) {
3897 tbu->halt_count++;
3898 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3899 return 0;
3900 }
3901
3902 base = tbu->base;
3903 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
3904 val |= DEBUG_SID_HALT_VAL;
3905 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
3906
3907 if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
3908 val, (val & DEBUG_SR_HALT_ACK_VAL),
3909 0, TBU_DBG_TIMEOUT_US)) {
3910 dev_err(tbu->dev, "Couldn't halt TBU!\n");
3911 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3912 return -ETIMEDOUT;
3913 }
3914
3915 tbu->halt_count = 1;
3916 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3917 return 0;
3918}
3919
3920static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
3921{
3922 unsigned long flags;
3923 u32 val;
3924 void __iomem *base;
3925
3926 spin_lock_irqsave(&tbu->halt_lock, flags);
3927 if (!tbu->halt_count) {
3928 WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
3929 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3930 return;
3931
3932 } else if (tbu->halt_count > 1) {
3933 tbu->halt_count--;
3934 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3935 return;
3936 }
3937
3938 base = tbu->base;
3939 val = readl_relaxed(base + DEBUG_SID_HALT_REG);
3940 val &= ~DEBUG_SID_HALT_VAL;
3941 writel_relaxed(val, base + DEBUG_SID_HALT_REG);
3942
3943 tbu->halt_count = 0;
3944 spin_unlock_irqrestore(&tbu->halt_lock, flags);
3945}
3946
3947static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
3948{
3949 struct qsmmuv500_tbu_device *tbu;
3950 struct list_head *list = smmu->archdata;
3951 int ret = 0;
3952
3953 list_for_each_entry(tbu, list, list) {
3954 ret = qsmmuv500_tbu_halt(tbu);
3955 if (ret)
3956 break;
3957 }
3958
3959 if (!ret)
3960 return 0;
3961
3962 list_for_each_entry_continue_reverse(tbu, list, list) {
3963 qsmmuv500_tbu_resume(tbu);
3964 }
3965 return ret;
3966}
3967
3968static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
3969{
3970 struct qsmmuv500_tbu_device *tbu;
3971 struct list_head *list = smmu->archdata;
3972
3973 list_for_each_entry(tbu, list, list) {
3974 qsmmuv500_tbu_resume(tbu);
3975 }
3976}
3977
3978static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
3979{
3980 int i, ret;
3981 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
3982
3983 ret = qsmmuv500_tbu_power_on_all(smmu);
3984 if (ret)
3985 return;
3986
3987 /* Program implementation defined registers */
3988 qsmmuv500_halt_all(smmu);
3989 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
3990 writel_relaxed(regs[i].value,
3991 ARM_SMMU_GR0(smmu) + regs[i].offset);
3992 qsmmuv500_resume_all(smmu);
3993 qsmmuv500_tbu_power_off_all(smmu);
3994}
3995
3996static int qsmmuv500_tbu_register(struct device *dev, void *data)
3997{
3998 struct arm_smmu_device *smmu = data;
3999 struct qsmmuv500_tbu_device *tbu;
4000 struct list_head *list = smmu->archdata;
4001
4002 if (!dev->driver) {
4003 dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
4004 return -EINVAL;
4005 }
4006
4007 tbu = dev_get_drvdata(dev);
4008
4009 INIT_LIST_HEAD(&tbu->list);
4010 tbu->smmu = smmu;
4011 list_add(&tbu->list, list);
4012 return 0;
4013}
4014
4015static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
4016{
4017 struct device *dev = smmu->dev;
4018 struct list_head *list;
4019 int ret;
4020
4021 list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
4022 if (!list)
4023 return -ENOMEM;
4024
4025 INIT_LIST_HEAD(list);
4026 smmu->archdata = list;
4027
4028 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4029 if (ret)
4030 return ret;
4031
4032 /* Attempt to register child devices */
4033 ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
4034 if (ret)
4035 return -EINVAL;
4036
4037 return 0;
4038}
4039
4040struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
4041 .init = qsmmuv500_arch_init,
4042 .device_reset = qsmmuv500_device_reset,
4043};
4044
4045static const struct of_device_id qsmmuv500_tbu_of_match[] = {
4046 {.compatible = "qcom,qsmmuv500-tbu"},
4047 {}
4048};
4049
4050static int qsmmuv500_tbu_probe(struct platform_device *pdev)
4051{
4052 struct resource *res;
4053 struct device *dev = &pdev->dev;
4054 struct qsmmuv500_tbu_device *tbu;
4055
4056 tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
4057 if (!tbu)
4058 return -ENOMEM;
4059
4060 INIT_LIST_HEAD(&tbu->list);
4061 tbu->dev = dev;
4062 spin_lock_init(&tbu->halt_lock);
4063
4064 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
4065 tbu->base = devm_ioremap_resource(dev, res);
4066 if (IS_ERR(tbu->base))
4067 return PTR_ERR(tbu->base);
4068
4069 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
4070 tbu->status_reg = devm_ioremap_resource(dev, res);
4071 if (IS_ERR(tbu->status_reg))
4072 return PTR_ERR(tbu->status_reg);
4073
4074 tbu->pwr = arm_smmu_init_power_resources(pdev);
4075 if (IS_ERR(tbu->pwr))
4076 return PTR_ERR(tbu->pwr);
4077
4078 dev_set_drvdata(dev, tbu);
4079 return 0;
4080}
4081
4082static struct platform_driver qsmmuv500_tbu_driver = {
4083 .driver = {
4084 .name = "qsmmuv500-tbu",
4085 .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
4086 },
4087 .probe = qsmmuv500_tbu_probe,
4088};
4089
4090static int __init qsmmuv500_tbu_init(void)
4091{
4092 return platform_driver_register(&qsmmuv500_tbu_driver);
4093}
4094subsys_initcall(qsmmuv500_tbu_init);
4095
Will Deacon45ae7cf2013-06-24 18:31:25 +01004096MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
4097MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
4098MODULE_LICENSE("GPL v2");