blob: 907645170c57cd702d2acac189403fdb14a7bc4e [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
Patrick Dalyc11d1082016-09-01 15:52:44 -070047#include <soc/qcom/secure_buffer.h>
Patrick Daly2764f952016-09-06 19:22:44 -070048#include <linux/msm-bus.h>
49#include <dt-bindings/msm/msm-bus-ids.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050
51#include <linux/amba/bus.h>
52
Will Deacon518f7132014-11-14 17:17:54 +000053#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020056#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010057
58/* Maximum number of context banks per SMMU */
59#define ARM_SMMU_MAX_CBS 128
60
61/* Maximum number of mapping groups per SMMU */
62#define ARM_SMMU_MAX_SMRS 128
63
Will Deacon45ae7cf2013-06-24 18:31:25 +010064/* SMMU global address space */
65#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010066#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010067
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000068/*
69 * SMMU global address space with conditional offset to access secure
70 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
71 * nsGFSYNR0: 0x450)
72 */
73#define ARM_SMMU_GR0_NS(smmu) \
74 ((smmu)->base + \
75 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
76 ? 0x400 : 0))
77
Robin Murphyf9a05f02016-04-13 18:13:01 +010078/*
79 * Some 64-bit registers only make sense to write atomically, but in such
80 * cases all the data relevant to AArch32 formats lies within the lower word,
81 * therefore this actually makes more sense than it might first appear.
82 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010084#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010085#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010086#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010087#endif
88
Will Deacon45ae7cf2013-06-24 18:31:25 +010089/* Configuration registers */
90#define ARM_SMMU_GR0_sCR0 0x0
91#define sCR0_CLIENTPD (1 << 0)
92#define sCR0_GFRE (1 << 1)
93#define sCR0_GFIE (1 << 2)
94#define sCR0_GCFGFRE (1 << 4)
95#define sCR0_GCFGFIE (1 << 5)
96#define sCR0_USFCFG (1 << 10)
97#define sCR0_VMIDPNE (1 << 11)
98#define sCR0_PTM (1 << 12)
99#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800100#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100101#define sCR0_BSU_SHIFT 14
102#define sCR0_BSU_MASK 0x3
103
Peng Fan3ca37122016-05-03 21:50:30 +0800104/* Auxiliary Configuration register */
105#define ARM_SMMU_GR0_sACR 0x10
106
Will Deacon45ae7cf2013-06-24 18:31:25 +0100107/* Identification registers */
108#define ARM_SMMU_GR0_ID0 0x20
109#define ARM_SMMU_GR0_ID1 0x24
110#define ARM_SMMU_GR0_ID2 0x28
111#define ARM_SMMU_GR0_ID3 0x2c
112#define ARM_SMMU_GR0_ID4 0x30
113#define ARM_SMMU_GR0_ID5 0x34
114#define ARM_SMMU_GR0_ID6 0x38
115#define ARM_SMMU_GR0_ID7 0x3c
116#define ARM_SMMU_GR0_sGFSR 0x48
117#define ARM_SMMU_GR0_sGFSYNR0 0x50
118#define ARM_SMMU_GR0_sGFSYNR1 0x54
119#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100120
121#define ID0_S1TS (1 << 30)
122#define ID0_S2TS (1 << 29)
123#define ID0_NTS (1 << 28)
124#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000125#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100126#define ID0_PTFS_NO_AARCH32 (1 << 25)
127#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128#define ID0_CTTW (1 << 14)
129#define ID0_NUMIRPT_SHIFT 16
130#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700131#define ID0_NUMSIDB_SHIFT 9
132#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100133#define ID0_NUMSMRG_SHIFT 0
134#define ID0_NUMSMRG_MASK 0xff
135
136#define ID1_PAGESIZE (1 << 31)
137#define ID1_NUMPAGENDXB_SHIFT 28
138#define ID1_NUMPAGENDXB_MASK 7
139#define ID1_NUMS2CB_SHIFT 16
140#define ID1_NUMS2CB_MASK 0xff
141#define ID1_NUMCB_SHIFT 0
142#define ID1_NUMCB_MASK 0xff
143
144#define ID2_OAS_SHIFT 4
145#define ID2_OAS_MASK 0xf
146#define ID2_IAS_SHIFT 0
147#define ID2_IAS_MASK 0xf
148#define ID2_UBS_SHIFT 8
149#define ID2_UBS_MASK 0xf
150#define ID2_PTFS_4K (1 << 12)
151#define ID2_PTFS_16K (1 << 13)
152#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800153#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Peng Fan3ca37122016-05-03 21:50:30 +0800155#define ID7_MAJOR_SHIFT 4
156#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100157
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100159#define ARM_SMMU_GR0_TLBIVMID 0x64
160#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
161#define ARM_SMMU_GR0_TLBIALLH 0x6c
162#define ARM_SMMU_GR0_sTLBGSYNC 0x70
163#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
164#define sTLBGSTATUS_GSACTIVE (1 << 0)
Mitchel Humpherys849aa502015-11-09 11:50:58 -0800165#define TLB_LOOP_TIMEOUT 500000 /* 500ms */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167/* Stream mapping registers */
168#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
169#define SMR_VALID (1 << 31)
170#define SMR_MASK_SHIFT 16
171#define SMR_MASK_MASK 0x7fff
172#define SMR_ID_SHIFT 0
173#define SMR_ID_MASK 0x7fff
174
175#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
176#define S2CR_CBNDX_SHIFT 0
177#define S2CR_CBNDX_MASK 0xff
178#define S2CR_TYPE_SHIFT 16
179#define S2CR_TYPE_MASK 0x3
180#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
181#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
182#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
183
184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
Shalaj Jain04059c52015-03-03 13:34:59 -0800203#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
204#define CBFRSYNRA_SID_MASK (0xffff)
205
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600223#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000225#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100226#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227#define ARM_SMMU_CB_FSR 0x58
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -0700228#define ARM_SMMU_CB_FSRRESTORE 0x5c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100229#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100232#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000233#define ARM_SMMU_CB_S1_TLBIVAL 0x620
234#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
235#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Mitchel Humpherysf3007992015-06-19 15:00:14 -0700236#define ARM_SMMU_CB_TLBSYNC 0x7f0
237#define ARM_SMMU_CB_TLBSTATUS 0x7f4
238#define TLBSTATUS_SACTIVE (1 << 0)
Robin Murphy661d9622015-05-27 17:09:34 +0100239#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000240#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241
242#define SCTLR_S1_ASIDPNE (1 << 12)
243#define SCTLR_CFCFG (1 << 7)
244#define SCTLR_CFIE (1 << 6)
245#define SCTLR_CFRE (1 << 5)
246#define SCTLR_E (1 << 4)
247#define SCTLR_AFE (1 << 2)
248#define SCTLR_TRE (1 << 1)
249#define SCTLR_M (1 << 0)
250#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
251
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100252#define ARM_MMU500_ACTLR_CPRE (1 << 1)
253
Peng Fan3ca37122016-05-03 21:50:30 +0800254#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
255
Mitchel Humpherys952f40a2015-08-19 12:13:28 -0700256#define ARM_SMMU_IMPL_DEF0(smmu) \
257 ((smmu)->base + (2 * (1 << (smmu)->pgshift)))
258#define ARM_SMMU_IMPL_DEF1(smmu) \
259 ((smmu)->base + (6 * (1 << (smmu)->pgshift)))
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000260#define CB_PAR_F (1 << 0)
261
262#define ATSR_ACTIVE (1 << 0)
263
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264#define RESUME_RETRY (0 << 0)
265#define RESUME_TERMINATE (1 << 0)
266
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100268#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100269
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100270#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100271
272#define FSR_MULTI (1 << 31)
273#define FSR_SS (1 << 30)
274#define FSR_UUT (1 << 8)
275#define FSR_ASF (1 << 7)
276#define FSR_TLBLKF (1 << 6)
277#define FSR_TLBMCF (1 << 5)
278#define FSR_EF (1 << 4)
279#define FSR_PF (1 << 3)
280#define FSR_AFF (1 << 2)
281#define FSR_TF (1 << 1)
282
Mitchel Humpherys29073202014-07-08 09:52:18 -0700283#define FSR_IGN (FSR_AFF | FSR_ASF | \
284 FSR_TLBMCF | FSR_TLBLKF)
285#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100286 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100287
288#define FSYNR0_WNR (1 << 4)
289
Will Deacon4cf740b2014-07-14 19:47:39 +0100290static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000291module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100292MODULE_PARM_DESC(force_stage,
293 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Patrick Daly69884542016-10-06 21:36:36 -0700294static bool disable_bypass = 1;
Robin Murphy25a1c962016-02-10 14:25:33 +0000295module_param(disable_bypass, bool, S_IRUGO);
296MODULE_PARM_DESC(disable_bypass,
297 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100298
Robin Murphy09360402014-08-28 17:51:59 +0100299enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100300 ARM_SMMU_V1,
301 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100302 ARM_SMMU_V2,
303};
304
Robin Murphy67b65a32016-04-13 18:12:57 +0100305enum arm_smmu_implementation {
306 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100307 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100308 CAVIUM_SMMUV2,
Patrick Dalyf0d4e212016-06-20 15:50:14 -0700309 QCOM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100310};
311
Patrick Dalyd7476202016-09-08 18:23:28 -0700312struct arm_smmu_device;
313struct arm_smmu_arch_ops {
314 int (*init)(struct arm_smmu_device *smmu);
315 void (*device_reset)(struct arm_smmu_device *smmu);
316 phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
317 dma_addr_t iova);
318 void (*iova_to_phys_fault)(struct iommu_domain *domain,
319 dma_addr_t iova, phys_addr_t *phys1,
320 phys_addr_t *phys_post_tlbiall);
321};
322
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700323struct arm_smmu_impl_def_reg {
324 u32 offset;
325 u32 value;
326};
327
Will Deacon45ae7cf2013-06-24 18:31:25 +0100328struct arm_smmu_smr {
329 u8 idx;
330 u16 mask;
331 u16 id;
332};
333
Will Deacona9a1b0b2014-05-01 18:05:08 +0100334struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335 int num_streamids;
336 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100337 struct arm_smmu_smr *smrs;
338};
339
Will Deacona9a1b0b2014-05-01 18:05:08 +0100340struct arm_smmu_master {
341 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100342 struct rb_node node;
343 struct arm_smmu_master_cfg cfg;
344};
345
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700346/*
347 * Describes resources required for on/off power operation.
348 * Separate reference count is provided for atomic/nonatomic
349 * operations.
350 */
351struct arm_smmu_power_resources {
352 struct platform_device *pdev;
353 struct device *dev;
354
355 struct clk **clocks;
356 int num_clocks;
357
358 struct regulator_bulk_data *gdscs;
359 int num_gdscs;
360
361 uint32_t bus_client;
362 struct msm_bus_scale_pdata *bus_dt_data;
363
364 /* Protects power_count */
365 struct mutex power_lock;
366 int power_count;
367
368 /* Protects clock_refs_count */
369 spinlock_t clock_refs_lock;
370 int clock_refs_count;
371};
372
Will Deacon45ae7cf2013-06-24 18:31:25 +0100373struct arm_smmu_device {
374 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100375
376 void __iomem *base;
377 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100378 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100379
380#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
381#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
382#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
383#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
384#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000385#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800386#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100387#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
388#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
389#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
390#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
391#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100392 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000393
394#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800395#define ARM_SMMU_OPT_FATAL_ASF (1 << 1)
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800396#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
Patrick Dalyc190d932016-08-30 17:23:28 -0700397#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000398 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100399 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100400 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100401
402 u32 num_context_banks;
403 u32 num_s2_context_banks;
404 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
405 atomic_t irptndx;
406
407 u32 num_mapping_groups;
408 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
409
Will Deacon518f7132014-11-14 17:17:54 +0000410 unsigned long va_size;
411 unsigned long ipa_size;
412 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100413 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100414
415 u32 num_global_irqs;
416 u32 num_context_irqs;
417 unsigned int *irqs;
418
Will Deacon45ae7cf2013-06-24 18:31:25 +0100419 struct list_head list;
420 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800421
422 u32 cavium_id_base; /* Specific to Cavium */
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -0700423 /* Specific to QCOM */
424 struct arm_smmu_impl_def_reg *impl_def_attach_registers;
425 unsigned int num_impl_def_attach_registers;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800426
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700427 struct arm_smmu_power_resources *pwr;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700428
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -0800429 spinlock_t atos_lock;
Patrick Dalyc190d932016-08-30 17:23:28 -0700430
431 /* protects idr */
432 struct mutex idr_mutex;
433 struct idr asid_idr;
Patrick Dalyd7476202016-09-08 18:23:28 -0700434
435 struct arm_smmu_arch_ops *arch_ops;
436 void *archdata;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100437};
438
Robin Murphy7602b872016-04-28 17:12:09 +0100439enum arm_smmu_context_fmt {
440 ARM_SMMU_CTX_FMT_NONE,
441 ARM_SMMU_CTX_FMT_AARCH64,
442 ARM_SMMU_CTX_FMT_AARCH32_L,
443 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100444};
445
446struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100447 u8 cbndx;
448 u8 irptndx;
449 u32 cbar;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600450 u32 procid;
451 u16 asid;
Robin Murphy7602b872016-04-28 17:12:09 +0100452 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100453};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100454#define INVALID_IRPTNDX 0xff
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600455#define INVALID_CBNDX 0xff
456#define INVALID_ASID 0xffff
Patrick Dalyc190d932016-08-30 17:23:28 -0700457/*
458 * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
459 * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
460 */
461#define MAX_ASID 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100462
Jeremy Gebben8ac927c2015-07-10 16:43:22 -0600463#define ARM_SMMU_CB_ASID(smmu, cfg) ((cfg)->asid)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800464#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100465
Will Deaconc752ce42014-06-25 22:46:31 +0100466enum arm_smmu_domain_stage {
467 ARM_SMMU_DOMAIN_S1 = 0,
468 ARM_SMMU_DOMAIN_S2,
469 ARM_SMMU_DOMAIN_NESTED,
470};
471
Patrick Dalyc11d1082016-09-01 15:52:44 -0700472struct arm_smmu_pte_info {
473 void *virt_addr;
474 size_t size;
475 struct list_head entry;
476};
477
Will Deacon45ae7cf2013-06-24 18:31:25 +0100478struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100479 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000480 struct io_pgtable_ops *pgtbl_ops;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -0700481 struct io_pgtable_cfg pgtbl_cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000482 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100483 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100484 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000485 struct mutex init_mutex; /* Protects smmu pointer */
Patrick Dalyc190d932016-08-30 17:23:28 -0700486 u32 attributes;
Patrick Dalyc11d1082016-09-01 15:52:44 -0700487 u32 secure_vmid;
488 struct list_head pte_info_list;
489 struct list_head unassign_list;
Patrick Dalye271f212016-10-04 13:24:49 -0700490 struct mutex assign_lock;
Patrick Dalyb7dfda72016-10-04 14:42:58 -0700491 struct list_head secure_pool_list;
Joerg Roedel1d672632015-03-26 13:43:10 +0100492 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100493};
494
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200495struct arm_smmu_phandle_args {
496 struct device_node *np;
497 int args_count;
498 uint32_t args[MAX_MASTER_STREAMIDS];
499};
500
Will Deacon45ae7cf2013-06-24 18:31:25 +0100501static DEFINE_SPINLOCK(arm_smmu_devices_lock);
502static LIST_HEAD(arm_smmu_devices);
503
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000504struct arm_smmu_option_prop {
505 u32 opt;
506 const char *prop;
507};
508
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800509static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
510
Mitchel Humpherys29073202014-07-08 09:52:18 -0700511static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000512 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -0800513 { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -0800514 { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
Patrick Dalyc190d932016-08-30 17:23:28 -0700515 { ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000516 { 0, NULL},
517};
518
Mitchel Humpherysb8be4132015-02-06 14:25:10 -0800519static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
520 dma_addr_t iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -0700521static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
522 dma_addr_t iova);
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -0600523static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -0800524
Patrick Dalyc11d1082016-09-01 15:52:44 -0700525static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
526static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
Patrick Dalye271f212016-10-04 13:24:49 -0700527static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -0700528static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
529
Patrick Dalyd7476202016-09-08 18:23:28 -0700530static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
531static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
532
Joerg Roedel1d672632015-03-26 13:43:10 +0100533static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
534{
535 return container_of(dom, struct arm_smmu_domain, domain);
536}
537
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000538static void parse_driver_options(struct arm_smmu_device *smmu)
539{
540 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700541
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000542 do {
543 if (of_property_read_bool(smmu->dev->of_node,
544 arm_smmu_options[i].prop)) {
545 smmu->options |= arm_smmu_options[i].opt;
Mitchel Humpherysba822582015-10-20 11:37:41 -0700546 dev_dbg(smmu->dev, "option %s\n",
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000547 arm_smmu_options[i].prop);
548 }
549 } while (arm_smmu_options[++i].opt);
550}
551
Patrick Dalyc190d932016-08-30 17:23:28 -0700552static bool is_dynamic_domain(struct iommu_domain *domain)
553{
554 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
555
556 return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
557}
558
Patrick Dalye271f212016-10-04 13:24:49 -0700559static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
560{
561 return (smmu_domain->secure_vmid != VMID_INVAL);
562}
563
564static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
565{
566 if (arm_smmu_is_domain_secure(smmu_domain))
567 mutex_lock(&smmu_domain->assign_lock);
568}
569
570static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
571{
572 if (arm_smmu_is_domain_secure(smmu_domain))
573 mutex_unlock(&smmu_domain->assign_lock);
574}
575
Will Deacon8f68f8e2014-07-15 11:27:08 +0100576static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100577{
578 if (dev_is_pci(dev)) {
579 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700580
Will Deacona9a1b0b2014-05-01 18:05:08 +0100581 while (!pci_is_root_bus(bus))
582 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100583 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100584 }
585
Will Deacon8f68f8e2014-07-15 11:27:08 +0100586 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100587}
588
Will Deacon45ae7cf2013-06-24 18:31:25 +0100589static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
590 struct device_node *dev_node)
591{
592 struct rb_node *node = smmu->masters.rb_node;
593
594 while (node) {
595 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700596
Will Deacon45ae7cf2013-06-24 18:31:25 +0100597 master = container_of(node, struct arm_smmu_master, node);
598
599 if (dev_node < master->of_node)
600 node = node->rb_left;
601 else if (dev_node > master->of_node)
602 node = node->rb_right;
603 else
604 return master;
605 }
606
607 return NULL;
608}
609
Will Deacona9a1b0b2014-05-01 18:05:08 +0100610static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100611find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100612{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100613 struct arm_smmu_master_cfg *cfg = NULL;
614 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100615
Will Deacon8f68f8e2014-07-15 11:27:08 +0100616 if (group) {
617 cfg = iommu_group_get_iommudata(group);
618 iommu_group_put(group);
619 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100620
Will Deacon8f68f8e2014-07-15 11:27:08 +0100621 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100622}
623
Will Deacon45ae7cf2013-06-24 18:31:25 +0100624static int insert_smmu_master(struct arm_smmu_device *smmu,
625 struct arm_smmu_master *master)
626{
627 struct rb_node **new, *parent;
628
629 new = &smmu->masters.rb_node;
630 parent = NULL;
631 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700632 struct arm_smmu_master *this
633 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100634
635 parent = *new;
636 if (master->of_node < this->of_node)
637 new = &((*new)->rb_left);
638 else if (master->of_node > this->of_node)
639 new = &((*new)->rb_right);
640 else
641 return -EEXIST;
642 }
643
644 rb_link_node(&master->node, parent, new);
645 rb_insert_color(&master->node, &smmu->masters);
646 return 0;
647}
648
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700649struct iommus_entry {
650 struct list_head list;
651 struct device_node *node;
652 u16 streamids[MAX_MASTER_STREAMIDS];
653 int num_sids;
654};
655
Will Deacon45ae7cf2013-06-24 18:31:25 +0100656static int register_smmu_master(struct arm_smmu_device *smmu,
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700657 struct iommus_entry *entry)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100658{
659 int i;
660 struct arm_smmu_master *master;
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700661 struct device *dev = smmu->dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100662
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700663 master = find_smmu_master(smmu, entry->node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100664 if (master) {
665 dev_err(dev,
666 "rejecting multiple registrations for master device %s\n",
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700667 entry->node->name);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100668 return -EBUSY;
669 }
670
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700671 if (entry->num_sids > MAX_MASTER_STREAMIDS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100672 dev_err(dev,
673 "reached maximum number (%d) of stream IDs for master device %s\n",
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700674 MAX_MASTER_STREAMIDS, entry->node->name);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100675 return -ENOSPC;
676 }
677
678 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
679 if (!master)
680 return -ENOMEM;
681
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700682 master->of_node = entry->node;
683 master->cfg.num_streamids = entry->num_sids;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100684
Olav Haugan3c8766d2014-08-22 17:12:32 -0700685 for (i = 0; i < master->cfg.num_streamids; ++i) {
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700686 u16 streamid = entry->streamids[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100687
Olav Haugan3c8766d2014-08-22 17:12:32 -0700688 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
689 (streamid >= smmu->num_mapping_groups)) {
690 dev_err(dev,
691 "stream ID for master device %s greater than maximum allowed (%d)\n",
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700692 entry->node->name, smmu->num_mapping_groups);
Olav Haugan3c8766d2014-08-22 17:12:32 -0700693 return -ERANGE;
694 }
695 master->cfg.streamids[i] = streamid;
696 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100697 return insert_smmu_master(smmu, master);
698}
699
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700700static int arm_smmu_parse_iommus_properties(struct arm_smmu_device *smmu,
701 int *num_masters)
702{
703 struct of_phandle_args iommuspec;
704 struct device_node *master;
705
706 *num_masters = 0;
707
708 for_each_node_with_property(master, "iommus") {
709 int arg_ind = 0;
710 struct iommus_entry *entry, *n;
711 LIST_HEAD(iommus);
712
713 while (!of_parse_phandle_with_args(
714 master, "iommus", "#iommu-cells",
715 arg_ind, &iommuspec)) {
716 if (iommuspec.np != smmu->dev->of_node) {
717 arg_ind++;
718 continue;
719 }
720
721 list_for_each_entry(entry, &iommus, list)
722 if (entry->node == master)
723 break;
724 if (&entry->list == &iommus) {
725 entry = devm_kzalloc(smmu->dev, sizeof(*entry),
726 GFP_KERNEL);
727 if (!entry)
728 return -ENOMEM;
729 entry->node = master;
730 list_add(&entry->list, &iommus);
731 }
Patrick Dalya571f732016-09-26 15:12:36 -0700732 switch (iommuspec.args_count) {
733 case 0:
734 /*
735 * For pci-e devices the SIDs are provided
736 * at device attach time.
737 */
738 break;
739 case 1:
740 entry->num_sids++;
741 entry->streamids[entry->num_sids - 1]
742 = iommuspec.args[0];
743 break;
744 default:
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700745 dev_err(smmu->dev, "iommus property has wrong #iommu-cells");
746 return -EINVAL;
747 }
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700748 arg_ind++;
749 }
750
751 list_for_each_entry_safe(entry, n, &iommus, list) {
Mitchel Humpherys4c775602014-10-02 17:55:41 -0700752 int rc = register_smmu_master(smmu, entry);
753
754 if (rc) {
755 dev_err(smmu->dev, "Couldn't register %s\n",
756 entry->node->name);
757 } else {
758 (*num_masters)++;
759 }
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -0700760 list_del(&entry->list);
761 devm_kfree(smmu->dev, entry);
762 }
763 }
764
765 return 0;
766}
767
Will Deacon44680ee2014-06-25 11:29:12 +0100768static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100769{
Will Deacon44680ee2014-06-25 11:29:12 +0100770 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100771 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100772 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100773
774 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100775 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100776 master = find_smmu_master(smmu, dev_node);
777 if (master)
778 break;
779 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100780 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100781
Will Deacona9a1b0b2014-05-01 18:05:08 +0100782 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100783}
784
785static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
786{
787 int idx;
788
789 do {
790 idx = find_next_zero_bit(map, end, start);
791 if (idx == end)
792 return -ENOSPC;
793 } while (test_and_set_bit(idx, map));
794
795 return idx;
796}
797
798static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
799{
800 clear_bit(idx, map);
801}
802
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700803static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700804{
805 int i, ret = 0;
806
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700807 for (i = 0; i < pwr->num_clocks; ++i) {
808 ret = clk_prepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700809 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700810 dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700811 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700812 clk_unprepare(pwr->clocks[i]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700813 break;
814 }
815 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700816 return ret;
817}
818
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700819static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700820{
821 int i;
822
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700823 for (i = pwr->num_clocks; i; --i)
824 clk_unprepare(pwr->clocks[i - 1]);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700825}
826
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700827static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700828{
829 int i, ret = 0;
Patrick Daly8befb662016-08-17 20:03:28 -0700830
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700831 for (i = 0; i < pwr->num_clocks; ++i) {
832 ret = clk_enable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700833 if (ret) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700834 dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
Patrick Daly8befb662016-08-17 20:03:28 -0700835 while (i--)
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700836 clk_disable(pwr->clocks[i]);
Patrick Daly8befb662016-08-17 20:03:28 -0700837 break;
838 }
839 }
840
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700841 return ret;
842}
Patrick Daly8befb662016-08-17 20:03:28 -0700843
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700844static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
845{
846 int i;
847
848 for (i = pwr->num_clocks; i; --i)
849 clk_disable(pwr->clocks[i - 1]);
850}
851
852static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
853{
854 if (!pwr->bus_client)
855 return 0;
856 return msm_bus_scale_client_update_request(pwr->bus_client, 1);
857}
858
859static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
860{
861 if (!pwr->bus_client)
862 return;
863 WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
864}
865
866/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
867static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
868{
869 int ret = 0;
870 unsigned long flags;
871
872 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
873 if (pwr->clock_refs_count > 0) {
874 pwr->clock_refs_count++;
875 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
876 return 0;
877 }
878
879 ret = arm_smmu_enable_clocks(pwr);
880 if (!ret)
881 pwr->clock_refs_count = 1;
882
883 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700884 return ret;
885}
886
887/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700888static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700889{
Patrick Daly8befb662016-08-17 20:03:28 -0700890 unsigned long flags;
891
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700892 spin_lock_irqsave(&pwr->clock_refs_lock, flags);
893 if (pwr->clock_refs_count == 0) {
894 WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
895 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
896 return;
897
898 } else if (pwr->clock_refs_count > 1) {
899 pwr->clock_refs_count--;
900 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700901 return;
902 }
903
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700904 arm_smmu_disable_clocks(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700905
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700906 pwr->clock_refs_count = 0;
907 spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
Patrick Daly8befb662016-08-17 20:03:28 -0700908}
909
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700910static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700911{
912 int ret;
913
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700914 mutex_lock(&pwr->power_lock);
915 if (pwr->power_count > 0) {
916 pwr->power_count += 1;
917 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700918 return 0;
919 }
920
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700921 ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700922 if (ret)
923 goto out_unlock;
924
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700925 ret = arm_smmu_request_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700926 if (ret)
927 goto out_disable_regulators;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700928
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700929 ret = arm_smmu_prepare_clocks(pwr);
Patrick Daly2764f952016-09-06 19:22:44 -0700930 if (ret)
931 goto out_disable_bus;
932
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700933 pwr->power_count = 1;
934 mutex_unlock(&pwr->power_lock);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700935 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700936
Patrick Daly2764f952016-09-06 19:22:44 -0700937out_disable_bus:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700938 arm_smmu_unrequest_bus(pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700939out_disable_regulators:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700940 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -0700941out_unlock:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700942 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700943 return ret;
944}
945
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700946static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700947{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700948 mutex_lock(&pwr->power_lock);
949 if (pwr->power_count == 0) {
950 WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
951 mutex_unlock(&pwr->power_lock);
952 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700953
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700954 } else if (pwr->power_count > 1) {
955 pwr->power_count--;
956 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700957 return;
958 }
959
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700960 arm_smmu_unprepare_clocks(pwr);
961 arm_smmu_unrequest_bus(pwr);
962 regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700963
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700964 mutex_unlock(&pwr->power_lock);
Patrick Daly3a8a88a2016-07-22 12:24:05 -0700965}
966
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700967static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700968{
969 int ret;
970
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700971 ret = arm_smmu_power_on_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700972 if (ret)
973 return ret;
974
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700975 ret = arm_smmu_power_on_atomic(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700976 if (ret)
977 goto out_disable;
978
979 return 0;
980
981out_disable:
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700982 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700983 return ret;
984}
985
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700986static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
Patrick Daly8befb662016-08-17 20:03:28 -0700987{
Patrick Daly5b3d8c62016-11-01 15:34:11 -0700988 arm_smmu_power_off_atomic(pwr);
989 arm_smmu_power_off_slow(pwr);
Patrick Daly8befb662016-08-17 20:03:28 -0700990}
991
992/*
993 * Must be used instead of arm_smmu_power_on if it may be called from
994 * atomic context
995 */
996static int arm_smmu_domain_power_on(struct iommu_domain *domain,
997 struct arm_smmu_device *smmu)
998{
999 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1000 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1001
1002 if (atomic_domain)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001003 return arm_smmu_power_on_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001004
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001005 return arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001006}
1007
1008/*
1009 * Must be used instead of arm_smmu_power_on if it may be called from
1010 * atomic context
1011 */
1012static void arm_smmu_domain_power_off(struct iommu_domain *domain,
1013 struct arm_smmu_device *smmu)
1014{
1015 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1016 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
1017
1018 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001019 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001020 return;
1021 }
1022
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001023 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07001024}
1025
Will Deacon45ae7cf2013-06-24 18:31:25 +01001026/* Wait for any pending TLB invalidations to complete */
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001027static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
1028 int cbndx)
1029{
1030 void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
1031 u32 val;
1032
1033 writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
1034 if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
1035 !(val & TLBSTATUS_SACTIVE),
Mitchel Humpherys9b1b8942015-06-25 18:17:15 -07001036 0, TLB_LOOP_TIMEOUT))
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001037 dev_err(smmu->dev, "TLBSYNC timeout!\n");
1038}
1039
Will Deacon518f7132014-11-14 17:17:54 +00001040static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001041{
1042 int count = 0;
1043 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1044
1045 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
1046 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
1047 & sTLBGSTATUS_GSACTIVE) {
1048 cpu_relax();
1049 if (++count == TLB_LOOP_TIMEOUT) {
1050 dev_err_ratelimited(smmu->dev,
1051 "TLB sync timed out -- SMMU may be deadlocked\n");
1052 return;
1053 }
1054 udelay(1);
1055 }
1056}
1057
Will Deacon518f7132014-11-14 17:17:54 +00001058static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +01001059{
Will Deacon518f7132014-11-14 17:17:54 +00001060 struct arm_smmu_domain *smmu_domain = cookie;
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001061 arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
Will Deacon518f7132014-11-14 17:17:54 +00001062}
1063
Patrick Daly8befb662016-08-17 20:03:28 -07001064/* Must be called with clocks/regulators enabled */
Will Deacon518f7132014-11-14 17:17:54 +00001065static void arm_smmu_tlb_inv_context(void *cookie)
1066{
1067 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +01001068 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1069 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +01001070 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +00001071 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +01001072
1073 if (stage1) {
1074 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001075 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001076 base + ARM_SMMU_CB_S1_TLBIASID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001077 arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001078 } else {
1079 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001080 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +01001081 base + ARM_SMMU_GR0_TLBIVMID);
Mitchel Humpherysf3007992015-06-19 15:00:14 -07001082 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +01001083 }
Will Deacon1463fe42013-07-31 19:21:27 +01001084}
1085
Will Deacon518f7132014-11-14 17:17:54 +00001086static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +00001087 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +00001088{
1089 struct arm_smmu_domain *smmu_domain = cookie;
1090 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1091 struct arm_smmu_device *smmu = smmu_domain->smmu;
1092 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1093 void __iomem *reg;
1094
1095 if (stage1) {
1096 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1097 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
1098
Robin Murphy7602b872016-04-28 17:12:09 +01001099 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001100 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001101 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +00001102 do {
1103 writel_relaxed(iova, reg);
1104 iova += granule;
1105 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001106 } else {
1107 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001108 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +00001109 do {
1110 writeq_relaxed(iova, reg);
1111 iova += granule >> 12;
1112 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001113 }
Will Deacon518f7132014-11-14 17:17:54 +00001114 } else if (smmu->version == ARM_SMMU_V2) {
1115 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1116 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
1117 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +00001118 iova >>= 12;
1119 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +01001120 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +00001121 iova += granule >> 12;
1122 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +00001123 } else {
1124 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001125 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +00001126 }
1127}
1128
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001129struct arm_smmu_secure_pool_chunk {
1130 void *addr;
1131 size_t size;
1132 struct list_head list;
1133};
1134
1135static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
1136 size_t size)
1137{
1138 struct arm_smmu_secure_pool_chunk *it;
1139
1140 list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
1141 if (it->size == size) {
1142 void *addr = it->addr;
1143
1144 list_del(&it->list);
1145 kfree(it);
1146 return addr;
1147 }
1148 }
1149
1150 return NULL;
1151}
1152
1153static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
1154 void *addr, size_t size)
1155{
1156 struct arm_smmu_secure_pool_chunk *chunk;
1157
1158 chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
1159 if (!chunk)
1160 return -ENOMEM;
1161
1162 chunk->addr = addr;
1163 chunk->size = size;
1164 memset(addr, 0, size);
1165 list_add(&chunk->list, &smmu_domain->secure_pool_list);
1166
1167 return 0;
1168}
1169
1170static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
1171{
1172 struct arm_smmu_secure_pool_chunk *it, *i;
1173
1174 list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
1175 arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
1176 /* pages will be freed later (after being unassigned) */
1177 kfree(it);
1178 }
1179}
1180
Patrick Dalyc11d1082016-09-01 15:52:44 -07001181static void *arm_smmu_alloc_pages_exact(void *cookie,
1182 size_t size, gfp_t gfp_mask)
1183{
1184 int ret;
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001185 void *page;
1186 struct arm_smmu_domain *smmu_domain = cookie;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001187
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001188 if (!arm_smmu_is_domain_secure(smmu_domain))
1189 return alloc_pages_exact(size, gfp_mask);
1190
1191 page = arm_smmu_secure_pool_remove(smmu_domain, size);
1192 if (page)
1193 return page;
1194
1195 page = alloc_pages_exact(size, gfp_mask);
1196 if (page) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07001197 ret = arm_smmu_prepare_pgtable(page, cookie);
1198 if (ret) {
1199 free_pages_exact(page, size);
1200 return NULL;
1201 }
1202 }
1203
1204 return page;
1205}
1206
1207static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
1208{
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001209 struct arm_smmu_domain *smmu_domain = cookie;
1210
1211 if (!arm_smmu_is_domain_secure(smmu_domain)) {
1212 free_pages_exact(virt, size);
1213 return;
1214 }
1215
1216 if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
1217 arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001218}
1219
Will Deacon518f7132014-11-14 17:17:54 +00001220static struct iommu_gather_ops arm_smmu_gather_ops = {
1221 .tlb_flush_all = arm_smmu_tlb_inv_context,
1222 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1223 .tlb_sync = arm_smmu_tlb_sync,
Patrick Dalyc11d1082016-09-01 15:52:44 -07001224 .alloc_pages_exact = arm_smmu_alloc_pages_exact,
1225 .free_pages_exact = arm_smmu_free_pages_exact,
Will Deacon518f7132014-11-14 17:17:54 +00001226};
1227
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001228static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
1229 dma_addr_t iova, u32 fsr)
1230{
1231 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001232 struct arm_smmu_device *smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001233 phys_addr_t phys;
Patrick Dalyad441dd2016-09-15 15:50:46 -07001234 phys_addr_t phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001235
1236 smmu = smmu_domain->smmu;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001237
Patrick Dalyad441dd2016-09-15 15:50:46 -07001238 if (smmu->arch_ops && smmu->arch_ops->iova_to_phys_fault) {
1239 smmu->arch_ops->iova_to_phys_fault(domain, iova, &phys,
1240 &phys_post_tlbiall);
1241 } else {
1242 phys = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001243 arm_smmu_tlb_inv_context(smmu_domain);
Patrick Dalyad441dd2016-09-15 15:50:46 -07001244 phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001245 }
1246
Patrick Dalyad441dd2016-09-15 15:50:46 -07001247 if (phys != phys_post_tlbiall) {
1248 dev_err(smmu->dev,
1249 "ATOS results differed across TLBIALL...\n"
1250 "Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
1251 }
1252 if (!phys_post_tlbiall) {
1253 dev_err(smmu->dev,
1254 "ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
1255 }
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001256
Patrick Dalyad441dd2016-09-15 15:50:46 -07001257 return phys_post_tlbiall;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001258}
1259
Will Deacon45ae7cf2013-06-24 18:31:25 +01001260static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
1261{
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001262 int flags, ret, tmp;
Patrick Daly5ba28112016-08-30 19:18:52 -07001263 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001264 unsigned long iova;
1265 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +01001266 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001267 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1268 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001269 void __iomem *cb_base;
Shalaj Jain04059c52015-03-03 13:34:59 -08001270 void __iomem *gr1_base;
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001271 bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001272 phys_addr_t phys_soft;
Shalaj Jain04059c52015-03-03 13:34:59 -08001273 u32 frsynra;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001274 bool non_fatal_fault = !!(smmu_domain->attributes &
1275 DOMAIN_ATTR_NON_FATAL_FAULTS);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001276
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001277 static DEFINE_RATELIMIT_STATE(_rs,
1278 DEFAULT_RATELIMIT_INTERVAL,
1279 DEFAULT_RATELIMIT_BURST);
1280
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001281 ret = arm_smmu_power_on(smmu->pwr);
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001282 if (ret)
1283 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001284
Shalaj Jain04059c52015-03-03 13:34:59 -08001285 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001286 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001287 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
1288
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001289 if (!(fsr & FSR_FAULT)) {
1290 ret = IRQ_NONE;
1291 goto out_power_off;
1292 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001293
Mitchel Humpherys07ba44b2015-01-30 14:58:52 -08001294 if (fatal_asf && (fsr & FSR_ASF)) {
1295 dev_err(smmu->dev,
1296 "Took an address size fault. Refusing to recover.\n");
1297 BUG();
1298 }
1299
Will Deacon45ae7cf2013-06-24 18:31:25 +01001300 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Patrick Daly5ba28112016-08-30 19:18:52 -07001301 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001302 if (fsr & FSR_TF)
1303 flags |= IOMMU_FAULT_TRANSLATION;
1304 if (fsr & FSR_PF)
1305 flags |= IOMMU_FAULT_PERMISSION;
Mitchel Humpherysdd75e332015-08-19 11:02:33 -07001306 if (fsr & FSR_EF)
1307 flags |= IOMMU_FAULT_EXTERNAL;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001308 if (fsr & FSR_SS)
1309 flags |= IOMMU_FAULT_TRANSACTION_STALLED;
Patrick Daly5ba28112016-08-30 19:18:52 -07001310
Robin Murphyf9a05f02016-04-13 18:13:01 +01001311 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001312 phys_soft = arm_smmu_iova_to_phys(domain, iova);
Shalaj Jain04059c52015-03-03 13:34:59 -08001313 frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
1314 frsynra &= CBFRSYNRA_SID_MASK;
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001315 tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
1316 if (!tmp || (tmp == -EBUSY)) {
Mitchel Humpherysb8be4132015-02-06 14:25:10 -08001317 dev_dbg(smmu->dev,
1318 "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1319 iova, fsr, fsynr, cfg->cbndx);
1320 dev_dbg(smmu->dev,
1321 "soft iova-to-phys=%pa\n", &phys_soft);
Patrick Daly5ba28112016-08-30 19:18:52 -07001322 ret = IRQ_HANDLED;
Shrenuj Bansald5083c02015-09-18 14:59:09 -07001323 resume = RESUME_TERMINATE;
Patrick Daly5ba28112016-08-30 19:18:52 -07001324 } else {
Patrick Dalyd54eafd2016-08-23 17:01:43 -07001325 phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
1326 fsr);
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001327 if (__ratelimit(&_rs)) {
1328 dev_err(smmu->dev,
1329 "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
1330 iova, fsr, fsynr, cfg->cbndx);
1331 dev_err(smmu->dev, "FAR = %016lx\n",
1332 (unsigned long)iova);
1333 dev_err(smmu->dev,
1334 "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
1335 fsr,
1336 (fsr & 0x02) ? "TF " : "",
1337 (fsr & 0x04) ? "AFF " : "",
1338 (fsr & 0x08) ? "PF " : "",
1339 (fsr & 0x10) ? "EF " : "",
1340 (fsr & 0x20) ? "TLBMCF " : "",
1341 (fsr & 0x40) ? "TLBLKF " : "",
1342 (fsr & 0x80) ? "MHF " : "",
1343 (fsr & 0x40000000) ? "SS " : "",
1344 (fsr & 0x80000000) ? "MULTI " : "");
1345 dev_err(smmu->dev,
1346 "soft iova-to-phys=%pa\n", &phys_soft);
Mitchel Humpherysd03b65d2015-11-05 11:50:29 -08001347 if (!phys_soft)
1348 dev_err(smmu->dev,
1349 "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
1350 dev_name(smmu->dev));
Mitchel Humpherysa8dabc92015-09-14 12:08:09 -07001351 dev_err(smmu->dev,
1352 "hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
1353 dev_err(smmu->dev, "SID=0x%x\n", frsynra);
1354 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001355 ret = IRQ_NONE;
1356 resume = RESUME_TERMINATE;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07001357 if (!non_fatal_fault) {
1358 dev_err(smmu->dev,
1359 "Unhandled arm-smmu context fault!\n");
1360 BUG();
1361 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001362 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001363
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001364 /*
1365 * If the client returns -EBUSY, do not clear FSR and do not RESUME
1366 * if stalled. This is required to keep the IOMMU client stalled on
1367 * the outstanding fault. This gives the client a chance to take any
1368 * debug action and then terminate the stalled transaction.
1369 * So, the sequence in case of stall on fault should be:
1370 * 1) Do not clear FSR or write to RESUME here
1371 * 2) Client takes any debug action
1372 * 3) Client terminates the stalled transaction and resumes the IOMMU
1373 * 4) Client clears FSR. The FSR should only be cleared after 3) and
1374 * not before so that the fault remains outstanding. This ensures
1375 * SCTLR.HUPCF has the desired effect if subsequent transactions also
1376 * need to be terminated.
1377 */
1378 if (tmp != -EBUSY) {
1379 /* Clear the faulting FSR */
1380 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
Patrick Daly5ba28112016-08-30 19:18:52 -07001381
Sushmita Susheelendraa474ae12015-06-02 15:46:24 -06001382 /*
1383 * Barrier required to ensure that the FSR is cleared
1384 * before resuming SMMU operation
1385 */
1386 wmb();
1387
1388 /* Retry or terminate any stalled transactions */
1389 if (fsr & FSR_SS)
1390 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
1391 }
Patrick Daly5ba28112016-08-30 19:18:52 -07001392
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001393out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001394 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001395
Patrick Daly5ba28112016-08-30 19:18:52 -07001396 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001397}
1398
1399static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
1400{
1401 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
1402 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001403 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001405 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07001406 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001407
Will Deacon45ae7cf2013-06-24 18:31:25 +01001408 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
1409 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
1410 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
1411 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
1412
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001413 if (!gfsr) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001414 arm_smmu_power_off(smmu->pwr);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001415 return IRQ_NONE;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001416 }
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001417
Will Deacon45ae7cf2013-06-24 18:31:25 +01001418 dev_err_ratelimited(smmu->dev,
1419 "Unexpected global fault, this could be serious\n");
1420 dev_err_ratelimited(smmu->dev,
1421 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
1422 gfsr, gfsynr0, gfsynr1, gfsynr2);
1423
1424 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001425 arm_smmu_power_off(smmu->pwr);
Will Deaconadaba322013-07-31 19:21:26 +01001426 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001427}
1428
Will Deacon518f7132014-11-14 17:17:54 +00001429static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
1430 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001431{
1432 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001433 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001434 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +01001435 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1436 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +01001437 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001438
Will Deacon45ae7cf2013-06-24 18:31:25 +01001439 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +01001440 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
1441 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001442
Will Deacon4a1c93c2015-03-04 12:21:03 +00001443 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +01001444 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1445 reg = CBA2R_RW64_64BIT;
1446 else
1447 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001448 /* 16-bit VMIDs live in CBA2R */
1449 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001450 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001451
Will Deacon4a1c93c2015-03-04 12:21:03 +00001452 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
1453 }
1454
Will Deacon45ae7cf2013-06-24 18:31:25 +01001455 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +01001456 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +01001457 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001458 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001459
Will Deacon57ca90f2014-02-06 14:59:05 +00001460 /*
1461 * Use the weakest shareability/memory types, so they are
1462 * overridden by the ttbcr/pte.
1463 */
1464 if (stage1) {
1465 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1466 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001467 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
1468 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001469 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +00001470 }
Will Deacon44680ee2014-06-25 11:29:12 +01001471 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001472
Will Deacon518f7132014-11-14 17:17:54 +00001473 /* TTBRs */
1474 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001475 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001476
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001477 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001478 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001479
1480 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001481 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001482 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +00001483 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001484 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +01001485 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +00001486 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001487
Will Deacon518f7132014-11-14 17:17:54 +00001488 /* TTBCR */
1489 if (stage1) {
1490 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1491 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
1492 if (smmu->version > ARM_SMMU_V1) {
1493 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +01001494 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +00001495 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001496 }
1497 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001498 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1499 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001500 }
1501
Will Deacon518f7132014-11-14 17:17:54 +00001502 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001503 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +00001504 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001505 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +00001506 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
1507 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001508 }
1509
Will Deacon45ae7cf2013-06-24 18:31:25 +01001510 /* SCTLR */
Patrick Dalye62d3362016-03-15 18:58:28 -07001511 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_EAE_SBOP;
1512
1513 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
1514 !stage1)
1515 reg |= SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001516 if (stage1)
1517 reg |= SCTLR_S1_ASIDPNE;
1518#ifdef __BIG_ENDIAN
1519 reg |= SCTLR_E;
1520#endif
Will Deacon25724842013-08-21 13:49:53 +01001521 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001522}
1523
Patrick Dalyc190d932016-08-30 17:23:28 -07001524static int arm_smmu_init_asid(struct iommu_domain *domain,
1525 struct arm_smmu_device *smmu)
1526{
1527 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1528 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1529 bool dynamic = is_dynamic_domain(domain);
1530 int ret;
1531
1532 if (!dynamic) {
1533 cfg->asid = cfg->cbndx + 1;
1534 } else {
1535 mutex_lock(&smmu->idr_mutex);
1536 ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
1537 smmu->num_context_banks + 2,
1538 MAX_ASID + 1, GFP_KERNEL);
1539
1540 mutex_unlock(&smmu->idr_mutex);
1541 if (ret < 0) {
1542 dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
1543 ret);
1544 return ret;
1545 }
1546 cfg->asid = ret;
1547 }
1548 return 0;
1549}
1550
1551static void arm_smmu_free_asid(struct iommu_domain *domain)
1552{
1553 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1554 struct arm_smmu_device *smmu = smmu_domain->smmu;
1555 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1556 bool dynamic = is_dynamic_domain(domain);
1557
1558 if (cfg->asid == INVALID_ASID || !dynamic)
1559 return;
1560
1561 mutex_lock(&smmu->idr_mutex);
1562 idr_remove(&smmu->asid_idr, cfg->asid);
1563 mutex_unlock(&smmu->idr_mutex);
1564}
1565
Will Deacon45ae7cf2013-06-24 18:31:25 +01001566static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +01001567 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001568{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001569 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001570 unsigned long ias, oas;
1571 struct io_pgtable_ops *pgtbl_ops;
Will Deacon518f7132014-11-14 17:17:54 +00001572 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +01001573 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001574 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001575 bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
Patrick Dalyc190d932016-08-30 17:23:28 -07001576 bool dynamic;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001577
Will Deacon518f7132014-11-14 17:17:54 +00001578 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001579 if (smmu_domain->smmu)
1580 goto out_unlock;
1581
Patrick Dalyc190d932016-08-30 17:23:28 -07001582 smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
1583 smmu_domain->cfg.asid = INVALID_ASID;
1584
Robin Murphy98006992016-04-20 14:53:33 +01001585 /* We're bypassing these SIDs, so don't allocate an actual context */
1586 if (domain->type == IOMMU_DOMAIN_DMA) {
1587 smmu_domain->smmu = smmu;
1588 goto out_unlock;
1589 }
1590
Patrick Dalyc190d932016-08-30 17:23:28 -07001591 dynamic = is_dynamic_domain(domain);
1592 if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
1593 dev_err(smmu->dev, "dynamic domains not supported\n");
1594 ret = -EPERM;
1595 goto out_unlock;
1596 }
1597
Will Deaconc752ce42014-06-25 22:46:31 +01001598 /*
1599 * Mapping the requested stage onto what we support is surprisingly
1600 * complicated, mainly because the spec allows S1+S2 SMMUs without
1601 * support for nested translation. That means we end up with the
1602 * following table:
1603 *
1604 * Requested Supported Actual
1605 * S1 N S1
1606 * S1 S1+S2 S1
1607 * S1 S2 S2
1608 * S1 S1 S1
1609 * N N N
1610 * N S1+S2 S2
1611 * N S2 S2
1612 * N S1 S1
1613 *
1614 * Note that you can't actually request stage-2 mappings.
1615 */
1616 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1617 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1618 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1619 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1620
Robin Murphy7602b872016-04-28 17:12:09 +01001621 /*
1622 * Choosing a suitable context format is even more fiddly. Until we
1623 * grow some way for the caller to express a preference, and/or move
1624 * the decision into the io-pgtable code where it arguably belongs,
1625 * just aim for the closest thing to the rest of the system, and hope
1626 * that the hardware isn't esoteric enough that we can't assume AArch64
1627 * support to be a superset of AArch32 support...
1628 */
1629 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
1630 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
1631 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
1632 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
1633 ARM_SMMU_FEAT_FMT_AARCH64_16K |
1634 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
1635 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
1636
1637 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
1638 ret = -EINVAL;
1639 goto out_unlock;
1640 }
1641
Will Deaconc752ce42014-06-25 22:46:31 +01001642 switch (smmu_domain->stage) {
1643 case ARM_SMMU_DOMAIN_S1:
1644 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
1645 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +00001646 ias = smmu->va_size;
1647 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001648 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001649 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001650 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001651 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +01001652 ias = min(ias, 32UL);
1653 oas = min(oas, 40UL);
1654 }
Will Deaconc752ce42014-06-25 22:46:31 +01001655 break;
1656 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +01001657 /*
1658 * We will likely want to change this if/when KVM gets
1659 * involved.
1660 */
Will Deaconc752ce42014-06-25 22:46:31 +01001661 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +01001662 cfg->cbar = CBAR_TYPE_S2_TRANS;
1663 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +00001664 ias = smmu->ipa_size;
1665 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +01001666 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +00001667 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001668 } else {
Will Deacon518f7132014-11-14 17:17:54 +00001669 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +01001670 ias = min(ias, 40UL);
1671 oas = min(oas, 40UL);
1672 }
Will Deaconc752ce42014-06-25 22:46:31 +01001673 break;
1674 default:
1675 ret = -EINVAL;
1676 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001677 }
1678
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07001679 if (is_fast)
1680 fmt = ARM_V8L_FAST;
1681
1682
Patrick Dalyc190d932016-08-30 17:23:28 -07001683 /* Dynamic domains must set cbndx through domain attribute */
1684 if (!dynamic) {
1685 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
Will Deacon45ae7cf2013-06-24 18:31:25 +01001686 smmu->num_context_banks);
Patrick Dalyc190d932016-08-30 17:23:28 -07001687 if (ret < 0)
1688 goto out_unlock;
1689 cfg->cbndx = ret;
1690 }
Robin Murphyb7862e32016-04-13 18:13:03 +01001691 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +01001692 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1693 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001694 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001695 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001696 }
1697
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001698 smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +01001699 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +00001700 .ias = ias,
1701 .oas = oas,
1702 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +01001703 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +00001704 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001705
Will Deacon518f7132014-11-14 17:17:54 +00001706 smmu_domain->smmu = smmu;
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001707 pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
1708 smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00001709 if (!pgtbl_ops) {
1710 ret = -ENOMEM;
1711 goto out_clear_smmu;
1712 }
1713
Patrick Dalyc11d1082016-09-01 15:52:44 -07001714 /*
1715 * assign any page table memory that might have been allocated
1716 * during alloc_io_pgtable_ops
1717 */
Patrick Dalye271f212016-10-04 13:24:49 -07001718 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001719 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001720 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001721
Robin Murphyd5466352016-05-09 17:20:09 +01001722 /* Update the domain's page sizes to reflect the page table format */
Mitchel Humpherys39e9c912015-04-15 15:14:15 -07001723 domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001724
Patrick Dalyc190d932016-08-30 17:23:28 -07001725 /* Assign an asid */
1726 ret = arm_smmu_init_asid(domain, smmu);
1727 if (ret)
1728 goto out_clear_smmu;
Will Deacon518f7132014-11-14 17:17:54 +00001729
Patrick Dalyc190d932016-08-30 17:23:28 -07001730 if (!dynamic) {
1731 /* Initialise the context bank with our page table cfg */
1732 arm_smmu_init_context_bank(smmu_domain,
1733 &smmu_domain->pgtbl_cfg);
1734
1735 /*
1736 * Request context fault interrupt. Do this last to avoid the
1737 * handler seeing a half-initialised domain state.
1738 */
1739 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1740 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
Mitchel Humpheryscca60112015-01-13 13:38:12 -08001741 arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
1742 "arm-smmu-context-fault", domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001743 if (ret < 0) {
1744 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1745 cfg->irptndx, irq);
1746 cfg->irptndx = INVALID_IRPTNDX;
1747 goto out_clear_smmu;
1748 }
1749 } else {
Will Deacon44680ee2014-06-25 11:29:12 +01001750 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001751 }
Will Deacon518f7132014-11-14 17:17:54 +00001752 mutex_unlock(&smmu_domain->init_mutex);
1753
1754 /* Publish page table ops for map/unmap */
1755 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001756 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001757
Will Deacon518f7132014-11-14 17:17:54 +00001758out_clear_smmu:
Jeremy Gebbenfa24b0c2015-06-16 12:45:31 -06001759 arm_smmu_destroy_domain_context(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001760 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001761out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001762 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001763 return ret;
1764}
1765
1766static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1767{
Joerg Roedel1d672632015-03-26 13:43:10 +01001768 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001769 struct arm_smmu_device *smmu = smmu_domain->smmu;
1770 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001771 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772 int irq;
Patrick Dalyc190d932016-08-30 17:23:28 -07001773 bool dynamic;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001774 int ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775
Robin Murphy98006992016-04-20 14:53:33 +01001776 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001777 return;
1778
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001779 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001780 if (ret) {
1781 WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
1782 smmu);
1783 return;
1784 }
1785
Patrick Dalyc190d932016-08-30 17:23:28 -07001786 dynamic = is_dynamic_domain(domain);
1787 if (dynamic) {
1788 arm_smmu_free_asid(domain);
1789 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001790 arm_smmu_power_off(smmu->pwr);
Patrick Dalye271f212016-10-04 13:24:49 -07001791 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001792 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001793 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001794 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc190d932016-08-30 17:23:28 -07001795 return;
1796 }
1797
Will Deacon518f7132014-11-14 17:17:54 +00001798 /*
1799 * Disable the context bank and free the page tables before freeing
1800 * it.
1801 */
Will Deacon44680ee2014-06-25 11:29:12 +01001802 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001803 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001804
Will Deacon44680ee2014-06-25 11:29:12 +01001805 if (cfg->irptndx != INVALID_IRPTNDX) {
1806 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001807 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001808 }
1809
Markus Elfring44830b02015-11-06 18:32:41 +01001810 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Patrick Dalye271f212016-10-04 13:24:49 -07001811 arm_smmu_secure_domain_lock(smmu_domain);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001812 arm_smmu_secure_pool_destroy(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07001813 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07001814 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001815 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07001816
Patrick Daly5b3d8c62016-11-01 15:34:11 -07001817 arm_smmu_power_off(smmu->pwr);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001818}
1819
Joerg Roedel1d672632015-03-26 13:43:10 +01001820static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001821{
1822 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823
Patrick Daly09801312016-08-29 17:02:52 -07001824 /* Do not support DOMAIN_DMA for now */
1825 if (type != IOMMU_DOMAIN_UNMANAGED)
Joerg Roedel1d672632015-03-26 13:43:10 +01001826 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001827 /*
1828 * Allocate the domain and initialise some of its data structures.
1829 * We can't really do anything meaningful until we've added a
1830 * master.
1831 */
1832 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1833 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001834 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001835
Robin Murphy9adb9592016-01-26 18:06:36 +00001836 if (type == IOMMU_DOMAIN_DMA &&
1837 iommu_get_dma_cookie(&smmu_domain->domain)) {
1838 kfree(smmu_domain);
1839 return NULL;
1840 }
1841
Will Deacon518f7132014-11-14 17:17:54 +00001842 mutex_init(&smmu_domain->init_mutex);
1843 spin_lock_init(&smmu_domain->pgtbl_lock);
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06001844 smmu_domain->cfg.cbndx = INVALID_CBNDX;
Patrick Dalyc11d1082016-09-01 15:52:44 -07001845 smmu_domain->secure_vmid = VMID_INVAL;
1846 INIT_LIST_HEAD(&smmu_domain->pte_info_list);
1847 INIT_LIST_HEAD(&smmu_domain->unassign_list);
Patrick Dalye271f212016-10-04 13:24:49 -07001848 mutex_init(&smmu_domain->assign_lock);
Patrick Dalyb7dfda72016-10-04 14:42:58 -07001849 INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
Joerg Roedel1d672632015-03-26 13:43:10 +01001850
1851 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001852}
1853
Joerg Roedel1d672632015-03-26 13:43:10 +01001854static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001855{
Joerg Roedel1d672632015-03-26 13:43:10 +01001856 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001857
1858 /*
1859 * Free the domain resources. We assume that all devices have
1860 * already been detached.
1861 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001862 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001863 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001864 kfree(smmu_domain);
1865}
1866
1867static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001868 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001869{
1870 int i;
1871 struct arm_smmu_smr *smrs;
1872 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1873
1874 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1875 return 0;
1876
Will Deacona9a1b0b2014-05-01 18:05:08 +01001877 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001878 return -EEXIST;
1879
Mitchel Humpherys29073202014-07-08 09:52:18 -07001880 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001881 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001882 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1883 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001884 return -ENOMEM;
1885 }
1886
Will Deacon44680ee2014-06-25 11:29:12 +01001887 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001888 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001889 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1890 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001891 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001892 dev_err(smmu->dev, "failed to allocate free SMR\n");
1893 goto err_free_smrs;
1894 }
1895
1896 smrs[i] = (struct arm_smmu_smr) {
1897 .idx = idx,
1898 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001899 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900 };
1901 }
1902
1903 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001904 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001905 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1906 smrs[i].mask << SMR_MASK_SHIFT;
1907 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1908 }
1909
Will Deacona9a1b0b2014-05-01 18:05:08 +01001910 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001911 return 0;
1912
1913err_free_smrs:
1914 while (--i >= 0)
1915 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1916 kfree(smrs);
1917 return -ENOSPC;
1918}
1919
1920static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001921 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001922{
1923 int i;
1924 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001925 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001926
Will Deacon43b412b2014-07-15 11:22:24 +01001927 if (!smrs)
1928 return;
1929
Will Deacon45ae7cf2013-06-24 18:31:25 +01001930 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001931 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001932 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001933
Will Deacon45ae7cf2013-06-24 18:31:25 +01001934 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1935 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1936 }
1937
Will Deacona9a1b0b2014-05-01 18:05:08 +01001938 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001939 kfree(smrs);
1940}
1941
Will Deacon45ae7cf2013-06-24 18:31:25 +01001942static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001943 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001944{
1945 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001946 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001947 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1948
Will Deacon5f634952016-04-20 14:53:32 +01001949 /*
1950 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1951 * for all devices behind the SMMU. Note that we need to take
1952 * care configuring SMRs for devices both a platform_device and
1953 * and a PCI device (i.e. a PCI host controller)
1954 */
1955 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1956 return 0;
1957
Will Deacon8f68f8e2014-07-15 11:27:08 +01001958 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001959 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001960 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001961 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001962
Will Deacona9a1b0b2014-05-01 18:05:08 +01001963 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001964 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001965
Will Deacona9a1b0b2014-05-01 18:05:08 +01001966 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Patrick Dalyf4930442016-06-27 20:50:14 -07001967 s2cr = S2CR_TYPE_TRANS |
Will Deacon44680ee2014-06-25 11:29:12 +01001968 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001969 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1970 }
1971
1972 return 0;
1973}
1974
1975static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001976 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001977{
Will Deacon43b412b2014-07-15 11:22:24 +01001978 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001979 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001980 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001981
Will Deacon8f68f8e2014-07-15 11:27:08 +01001982 /* An IOMMU group is torn down by the first device to be removed */
1983 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1984 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001985
1986 /*
1987 * We *must* clear the S2CR first, because freeing the SMR means
1988 * that it can be re-allocated immediately.
1989 */
Will Deacon43b412b2014-07-15 11:22:24 +01001990 for (i = 0; i < cfg->num_streamids; ++i) {
1991 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001992 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001993
Robin Murphy25a1c962016-02-10 14:25:33 +00001994 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001995 }
1996
Will Deacona9a1b0b2014-05-01 18:05:08 +01001997 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001998}
1999
Patrick Daly09801312016-08-29 17:02:52 -07002000static void arm_smmu_detach_dev(struct iommu_domain *domain,
2001 struct device *dev)
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002002{
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002003 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly09801312016-08-29 17:02:52 -07002004 struct arm_smmu_device *smmu = smmu_domain->smmu;
2005 struct arm_smmu_master_cfg *cfg;
2006 int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
Patrick Daly8befb662016-08-17 20:03:28 -07002007 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Patrick Daly09801312016-08-29 17:02:52 -07002008
2009 if (dynamic)
2010 return;
2011
2012 cfg = find_smmu_master_cfg(dev);
2013 if (!cfg)
2014 return;
2015
2016 if (!smmu) {
2017 dev_err(dev, "Domain not attached; cannot detach!\n");
2018 return;
2019 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002020
2021 dev->archdata.iommu = NULL;
2022 arm_smmu_domain_remove_master(smmu_domain, cfg);
Patrick Daly8befb662016-08-17 20:03:28 -07002023
2024 /* Remove additional vote for atomic power */
2025 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002026 WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
2027 arm_smmu_power_off(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002028 }
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002029}
2030
Patrick Dalye271f212016-10-04 13:24:49 -07002031static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
Patrick Dalyc11d1082016-09-01 15:52:44 -07002032{
Patrick Dalye271f212016-10-04 13:24:49 -07002033 int ret = 0;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002034 int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2035 int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
2036 int source_vmid = VMID_HLOS;
2037 struct arm_smmu_pte_info *pte_info, *temp;
2038
Patrick Dalye271f212016-10-04 13:24:49 -07002039 if (!arm_smmu_is_domain_secure(smmu_domain))
2040 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002041
Patrick Dalye271f212016-10-04 13:24:49 -07002042 list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
Patrick Dalyc11d1082016-09-01 15:52:44 -07002043 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2044 PAGE_SIZE, &source_vmid, 1,
2045 dest_vmids, dest_perms, 2);
2046 if (WARN_ON(ret))
2047 break;
2048 }
2049
2050 list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
2051 entry) {
2052 list_del(&pte_info->entry);
2053 kfree(pte_info);
2054 }
Patrick Dalye271f212016-10-04 13:24:49 -07002055 return ret;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002056}
2057
2058static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
2059{
2060 int ret;
2061 int dest_vmids = VMID_HLOS;
Neeti Desai61007372015-07-28 11:02:02 -07002062 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002063 int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
2064 struct arm_smmu_pte_info *pte_info, *temp;
2065
Patrick Dalye271f212016-10-04 13:24:49 -07002066 if (!arm_smmu_is_domain_secure(smmu_domain))
Patrick Dalyc11d1082016-09-01 15:52:44 -07002067 return;
2068
2069 list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
2070 ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
2071 PAGE_SIZE, source_vmlist, 2,
2072 &dest_vmids, &dest_perms, 1);
2073 if (WARN_ON(ret))
2074 break;
2075 free_pages_exact(pte_info->virt_addr, pte_info->size);
2076 }
2077
2078 list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
2079 entry) {
2080 list_del(&pte_info->entry);
2081 kfree(pte_info);
2082 }
2083}
2084
2085static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
2086{
2087 struct arm_smmu_domain *smmu_domain = cookie;
2088 struct arm_smmu_pte_info *pte_info;
2089
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002090 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002091
2092 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2093 if (!pte_info)
2094 return;
2095
2096 pte_info->virt_addr = addr;
2097 pte_info->size = size;
2098 list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
2099}
2100
2101static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
2102{
2103 struct arm_smmu_domain *smmu_domain = cookie;
2104 struct arm_smmu_pte_info *pte_info;
2105
Patrick Dalyb7dfda72016-10-04 14:42:58 -07002106 BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
Patrick Dalyc11d1082016-09-01 15:52:44 -07002107
2108 pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
2109 if (!pte_info)
2110 return -ENOMEM;
2111 pte_info->virt_addr = addr;
2112 list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
2113 return 0;
2114}
2115
Will Deacon45ae7cf2013-06-24 18:31:25 +01002116static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2117{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002118 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01002119 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002120 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002121 struct arm_smmu_master_cfg *cfg;
Patrick Daly8befb662016-08-17 20:03:28 -07002122 int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002123
Will Deacon8f68f8e2014-07-15 11:27:08 +01002124 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01002125 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002126 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
2127 return -ENXIO;
2128 }
2129
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002130 /* Enable Clocks and Power */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002131 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002132 if (ret)
2133 return ret;
2134
Patrick Daly8befb662016-08-17 20:03:28 -07002135 /*
2136 * Keep an additional vote for non-atomic power until domain is
2137 * detached
2138 */
2139 if (atomic_domain) {
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002140 ret = arm_smmu_power_on(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002141 if (ret)
2142 goto out_power_off;
2143
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002144 arm_smmu_power_off_atomic(smmu->pwr);
Patrick Daly8befb662016-08-17 20:03:28 -07002145 }
2146
Will Deacon518f7132014-11-14 17:17:54 +00002147 /* Ensure that the domain is finalised */
2148 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02002149 if (ret < 0)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002150 goto out_power_off;
Will Deacon518f7132014-11-14 17:17:54 +00002151
Patrick Dalyc190d932016-08-30 17:23:28 -07002152 /* Do not modify the SIDs, HW is still running */
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002153 if (is_dynamic_domain(domain)) {
2154 ret = 0;
2155 goto out_power_off;
2156 }
Patrick Dalyc190d932016-08-30 17:23:28 -07002157
Will Deacon45ae7cf2013-06-24 18:31:25 +01002158 /*
Will Deacon44680ee2014-06-25 11:29:12 +01002159 * Sanity check the domain. We don't support domains across
2160 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01002161 */
Will Deacon518f7132014-11-14 17:17:54 +00002162 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002163 dev_err(dev,
2164 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01002165 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002166 ret = -EINVAL;
2167 goto out_power_off;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002168 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002169
2170 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01002171 cfg = find_smmu_master_cfg(dev);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002172 if (!cfg) {
2173 ret = -ENODEV;
2174 goto out_power_off;
2175 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002176
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002177 /* Detach the dev from its current domain */
2178 if (dev->archdata.iommu)
Patrick Daly09801312016-08-29 17:02:52 -07002179 arm_smmu_detach_dev(dev->archdata.iommu, dev);
Will Deaconbc7f2ce2016-02-17 17:41:57 +00002180
Will Deacon844e35b2014-07-17 11:23:51 +01002181 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
2182 if (!ret)
2183 dev->archdata.iommu = domain;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002184
2185out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002186 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002187
Will Deacon45ae7cf2013-06-24 18:31:25 +01002188 return ret;
2189}
2190
Will Deacon45ae7cf2013-06-24 18:31:25 +01002191static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00002192 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002193{
Will Deacon518f7132014-11-14 17:17:54 +00002194 int ret;
2195 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002196 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002197 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002198
Will Deacon518f7132014-11-14 17:17:54 +00002199 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002200 return -ENODEV;
2201
Patrick Dalye271f212016-10-04 13:24:49 -07002202 arm_smmu_secure_domain_lock(smmu_domain);
2203
Will Deacon518f7132014-11-14 17:17:54 +00002204 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2205 ret = ops->map(ops, iova, paddr, size, prot);
2206 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002207
2208 arm_smmu_assign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002209 arm_smmu_secure_domain_unlock(smmu_domain);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002210
Will Deacon518f7132014-11-14 17:17:54 +00002211 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002212}
2213
2214static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
2215 size_t size)
2216{
Will Deacon518f7132014-11-14 17:17:54 +00002217 size_t ret;
2218 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002219 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002220 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002221
Will Deacon518f7132014-11-14 17:17:54 +00002222 if (!ops)
2223 return 0;
2224
Patrick Daly8befb662016-08-17 20:03:28 -07002225 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002226 if (ret)
2227 return ret;
2228
Patrick Dalye271f212016-10-04 13:24:49 -07002229 arm_smmu_secure_domain_lock(smmu_domain);
2230
Will Deacon518f7132014-11-14 17:17:54 +00002231 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2232 ret = ops->unmap(ops, iova, size);
2233 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002234
Patrick Daly8befb662016-08-17 20:03:28 -07002235 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002236 /*
2237 * While splitting up block mappings, we might allocate page table
2238 * memory during unmap, so the vmids needs to be assigned to the
2239 * memory here as well.
2240 */
2241 arm_smmu_assign_table(smmu_domain);
2242 /* Also unassign any pages that were free'd during unmap */
2243 arm_smmu_unassign_table(smmu_domain);
Patrick Dalye271f212016-10-04 13:24:49 -07002244 arm_smmu_secure_domain_unlock(smmu_domain);
Will Deacon518f7132014-11-14 17:17:54 +00002245 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002246}
2247
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002248static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
2249 struct scatterlist *sg, unsigned int nents, int prot)
2250{
2251 int ret;
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002252 size_t size;
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002253 unsigned long flags;
2254 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2255 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2256
2257 if (!ops)
2258 return -ENODEV;
2259
Patrick Daly8befb662016-08-17 20:03:28 -07002260 ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002261 if (ret)
2262 return ret;
2263
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002264 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002265 ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002266 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -07002267
2268 if (!ret)
2269 arm_smmu_unmap(domain, iova, size);
2270
Patrick Daly8befb662016-08-17 20:03:28 -07002271 arm_smmu_domain_power_off(domain, smmu_domain->smmu);
Patrick Dalyc11d1082016-09-01 15:52:44 -07002272 arm_smmu_assign_table(smmu_domain);
2273
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002274 return ret;
2275}
2276
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002277static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Patrick Dalyad441dd2016-09-15 15:50:46 -07002278 dma_addr_t iova)
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002279{
Joerg Roedel1d672632015-03-26 13:43:10 +01002280 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002281 struct arm_smmu_device *smmu = smmu_domain->smmu;
2282 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2283 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
2284 struct device *dev = smmu->dev;
2285 void __iomem *cb_base;
2286 u32 tmp;
2287 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01002288 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002289
2290 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2291
Robin Murphy661d9622015-05-27 17:09:34 +01002292 /* ATS1 registers can only be written atomically */
2293 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01002294 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01002295 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
2296 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01002297 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002298
2299 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
2300 !(tmp & ATSR_ACTIVE), 5, 50)) {
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002301 phys = ops->iova_to_phys(ops, iova);
Mitchel Humpherysd7e09712015-02-04 21:30:58 -08002302 dev_err(dev,
2303 "iova to phys timed out on %pad. software table walk result=%pa.\n",
2304 &iova, &phys);
2305 phys = 0;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002306 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002307 }
2308
Robin Murphyf9a05f02016-04-13 18:13:01 +01002309 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002310 if (phys & CB_PAR_F) {
2311 dev_err(dev, "translation fault!\n");
2312 dev_err(dev, "PAR = 0x%llx\n", phys);
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002313 phys = 0;
2314 } else {
2315 phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002316 }
Patrick Dalyad441dd2016-09-15 15:50:46 -07002317
Mitchel Humpherys0ed5da62014-12-04 11:47:49 -08002318 return phys;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002319}
2320
Will Deacon45ae7cf2013-06-24 18:31:25 +01002321static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002322 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002323{
Will Deacon518f7132014-11-14 17:17:54 +00002324 phys_addr_t ret;
2325 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01002326 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00002327 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002328
Will Deacon518f7132014-11-14 17:17:54 +00002329 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00002330 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002331
Will Deacon518f7132014-11-14 17:17:54 +00002332 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Patrick Dalya85a2fb2016-06-21 19:23:06 -07002333 ret = ops->iova_to_phys(ops, iova);
Will Deacon518f7132014-11-14 17:17:54 +00002334 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00002335
Will Deacon518f7132014-11-14 17:17:54 +00002336 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002337}
2338
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002339/*
2340 * This function can sleep, and cannot be called from atomic context. Will
2341 * power on register block if required. This restriction does not apply to the
2342 * original iova_to_phys() op.
2343 */
2344static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
2345 dma_addr_t iova)
2346{
2347 phys_addr_t ret = 0;
2348 unsigned long flags;
2349 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002350
Patrick Dalyad441dd2016-09-15 15:50:46 -07002351 if (smmu_domain->smmu->arch_ops &&
2352 smmu_domain->smmu->arch_ops->iova_to_phys_hard)
2353 return smmu_domain->smmu->arch_ops->iova_to_phys_hard(
2354 domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002355
2356 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2357 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
2358 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
Patrick Dalyad441dd2016-09-15 15:50:46 -07002359 ret = __arm_smmu_iova_to_phys_hard(domain, iova);
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002360
2361 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2362
2363 return ret;
2364}
2365
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002366static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002367{
Will Deacond0948942014-06-24 17:30:10 +01002368 switch (cap) {
2369 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002370 /*
2371 * Return true here as the SMMU can always send out coherent
2372 * requests.
2373 */
2374 return true;
Will Deacond0948942014-06-24 17:30:10 +01002375 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002376 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01002377 case IOMMU_CAP_NOEXEC:
2378 return true;
Will Deacond0948942014-06-24 17:30:10 +01002379 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02002380 return false;
Will Deacond0948942014-06-24 17:30:10 +01002381 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002382}
Will Deacon45ae7cf2013-06-24 18:31:25 +01002383
Will Deacona9a1b0b2014-05-01 18:05:08 +01002384static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
2385{
2386 *((u16 *)data) = alias;
2387 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01002388}
2389
Will Deacon8f68f8e2014-07-15 11:27:08 +01002390static void __arm_smmu_release_pci_iommudata(void *data)
2391{
2392 kfree(data);
2393}
2394
Joerg Roedelaf659932015-10-21 23:51:41 +02002395static int arm_smmu_init_pci_device(struct pci_dev *pdev,
2396 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01002397{
Will Deacon03edb222015-01-19 14:27:33 +00002398 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02002399 u16 sid;
2400 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002401
Will Deacon03edb222015-01-19 14:27:33 +00002402 cfg = iommu_group_get_iommudata(group);
2403 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01002404 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002405 if (!cfg)
2406 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002407
Will Deacon03edb222015-01-19 14:27:33 +00002408 iommu_group_set_iommudata(group, cfg,
2409 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01002410 }
2411
Joerg Roedelaf659932015-10-21 23:51:41 +02002412 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
2413 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01002414
Will Deacon03edb222015-01-19 14:27:33 +00002415 /*
2416 * Assume Stream ID == Requester ID for now.
2417 * We need a way to describe the ID mappings in FDT.
2418 */
2419 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
2420 for (i = 0; i < cfg->num_streamids; ++i)
2421 if (cfg->streamids[i] == sid)
2422 break;
2423
2424 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
2425 if (i == cfg->num_streamids)
2426 cfg->streamids[cfg->num_streamids++] = sid;
2427
2428 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002429}
2430
Joerg Roedelaf659932015-10-21 23:51:41 +02002431static int arm_smmu_init_platform_device(struct device *dev,
2432 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00002433{
Will Deacon03edb222015-01-19 14:27:33 +00002434 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02002435 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00002436
2437 if (!smmu)
2438 return -ENODEV;
2439
2440 master = find_smmu_master(smmu, dev->of_node);
2441 if (!master)
2442 return -ENODEV;
2443
Will Deacon03edb222015-01-19 14:27:33 +00002444 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02002445
2446 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002447}
2448
2449static int arm_smmu_add_device(struct device *dev)
2450{
Joerg Roedelaf659932015-10-21 23:51:41 +02002451 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00002452
Joerg Roedelaf659932015-10-21 23:51:41 +02002453 group = iommu_group_get_for_dev(dev);
2454 if (IS_ERR(group))
2455 return PTR_ERR(group);
2456
Peng Fan9a4a9d82015-11-20 16:56:18 +08002457 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02002458 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00002459}
2460
Will Deacon45ae7cf2013-06-24 18:31:25 +01002461static void arm_smmu_remove_device(struct device *dev)
2462{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01002463 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002464}
2465
Joerg Roedelaf659932015-10-21 23:51:41 +02002466static struct iommu_group *arm_smmu_device_group(struct device *dev)
2467{
2468 struct iommu_group *group;
2469 int ret;
2470
2471 if (dev_is_pci(dev))
2472 group = pci_device_group(dev);
2473 else
2474 group = generic_device_group(dev);
2475
2476 if (IS_ERR(group))
2477 return group;
2478
2479 if (dev_is_pci(dev))
2480 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
2481 else
2482 ret = arm_smmu_init_platform_device(dev, group);
2483
2484 if (ret) {
2485 iommu_group_put(group);
2486 group = ERR_PTR(ret);
2487 }
2488
2489 return group;
2490}
2491
Will Deaconc752ce42014-06-25 22:46:31 +01002492static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2493 enum iommu_attr attr, void *data)
2494{
Joerg Roedel1d672632015-03-26 13:43:10 +01002495 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002496 int ret = 0;
Will Deaconc752ce42014-06-25 22:46:31 +01002497
2498 switch (attr) {
2499 case DOMAIN_ATTR_NESTING:
2500 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2501 return 0;
Mitchel Humpheryscd9f07a2014-11-12 15:11:33 -08002502 case DOMAIN_ATTR_PT_BASE_ADDR:
2503 *((phys_addr_t *)data) =
2504 smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2505 return 0;
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002506 case DOMAIN_ATTR_CONTEXT_BANK:
2507 /* context bank index isn't valid until we are attached */
2508 if (smmu_domain->smmu == NULL)
2509 return -ENODEV;
2510
2511 *((unsigned int *) data) = smmu_domain->cfg.cbndx;
2512 ret = 0;
2513 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002514 case DOMAIN_ATTR_TTBR0: {
2515 u64 val;
2516 struct arm_smmu_device *smmu = smmu_domain->smmu;
2517 /* not valid until we are attached */
2518 if (smmu == NULL)
2519 return -ENODEV;
2520
2521 val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
2522 if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
2523 val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
2524 << (TTBRn_ASID_SHIFT);
2525 *((u64 *)data) = val;
2526 ret = 0;
2527 break;
2528 }
2529 case DOMAIN_ATTR_CONTEXTIDR:
2530 /* not valid until attached */
2531 if (smmu_domain->smmu == NULL)
2532 return -ENODEV;
2533 *((u32 *)data) = smmu_domain->cfg.procid;
2534 ret = 0;
2535 break;
2536 case DOMAIN_ATTR_PROCID:
2537 *((u32 *)data) = smmu_domain->cfg.procid;
2538 ret = 0;
2539 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002540 case DOMAIN_ATTR_DYNAMIC:
2541 *((int *)data) = !!(smmu_domain->attributes
2542 & (1 << DOMAIN_ATTR_DYNAMIC));
2543 ret = 0;
2544 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002545 case DOMAIN_ATTR_NON_FATAL_FAULTS:
2546 *((int *)data) = !!(smmu_domain->attributes
2547 & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
2548 ret = 0;
2549 break;
Patrick Dalye62d3362016-03-15 18:58:28 -07002550 case DOMAIN_ATTR_S1_BYPASS:
2551 *((int *)data) = !!(smmu_domain->attributes
2552 & (1 << DOMAIN_ATTR_S1_BYPASS));
2553 ret = 0;
2554 break;
Patrick Dalyc11d1082016-09-01 15:52:44 -07002555 case DOMAIN_ATTR_SECURE_VMID:
2556 *((int *)data) = smmu_domain->secure_vmid;
2557 ret = 0;
2558 break;
Mitchel Humpherysb9dda592016-02-12 14:18:02 -08002559 case DOMAIN_ATTR_PGTBL_INFO: {
2560 struct iommu_pgtbl_info *info = data;
2561
2562 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
2563 ret = -ENODEV;
2564 break;
2565 }
2566 info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
2567 ret = 0;
2568 break;
2569 }
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002570 case DOMAIN_ATTR_FAST:
2571 *((int *)data) = !!(smmu_domain->attributes
2572 & (1 << DOMAIN_ATTR_FAST));
2573 ret = 0;
2574 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002575 default:
2576 return -ENODEV;
2577 }
Jeremy Gebben7e47f9b2015-06-16 10:59:29 -06002578 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002579}
2580
2581static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2582 enum iommu_attr attr, void *data)
2583{
Will Deacon518f7132014-11-14 17:17:54 +00002584 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01002585 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01002586
Will Deacon518f7132014-11-14 17:17:54 +00002587 mutex_lock(&smmu_domain->init_mutex);
2588
Will Deaconc752ce42014-06-25 22:46:31 +01002589 switch (attr) {
2590 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00002591 if (smmu_domain->smmu) {
2592 ret = -EPERM;
2593 goto out_unlock;
2594 }
2595
Will Deaconc752ce42014-06-25 22:46:31 +01002596 if (*(int *)data)
2597 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2598 else
2599 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2600
Will Deacon518f7132014-11-14 17:17:54 +00002601 break;
Jeremy Gebben8ac927c2015-07-10 16:43:22 -06002602 case DOMAIN_ATTR_PROCID:
2603 if (smmu_domain->smmu != NULL) {
2604 dev_err(smmu_domain->smmu->dev,
2605 "cannot change procid attribute while attached\n");
2606 ret = -EBUSY;
2607 break;
2608 }
2609 smmu_domain->cfg.procid = *((u32 *)data);
2610 ret = 0;
2611 break;
Patrick Dalyc190d932016-08-30 17:23:28 -07002612 case DOMAIN_ATTR_DYNAMIC: {
2613 int dynamic = *((int *)data);
2614
2615 if (smmu_domain->smmu != NULL) {
2616 dev_err(smmu_domain->smmu->dev,
2617 "cannot change dynamic attribute while attached\n");
2618 ret = -EBUSY;
2619 break;
2620 }
2621
2622 if (dynamic)
2623 smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
2624 else
2625 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
2626 ret = 0;
2627 break;
2628 }
2629 case DOMAIN_ATTR_CONTEXT_BANK:
2630 /* context bank can't be set while attached */
2631 if (smmu_domain->smmu != NULL) {
2632 ret = -EBUSY;
2633 break;
2634 }
2635 /* ... and it can only be set for dynamic contexts. */
2636 if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
2637 ret = -EINVAL;
2638 break;
2639 }
2640
2641 /* this will be validated during attach */
2642 smmu_domain->cfg.cbndx = *((unsigned int *)data);
2643 ret = 0;
2644 break;
Mitchel Humpheryscc8d12f2015-09-25 17:29:27 -07002645 case DOMAIN_ATTR_NON_FATAL_FAULTS: {
2646 u32 non_fatal_faults = *((int *)data);
2647
2648 if (non_fatal_faults)
2649 smmu_domain->attributes |=
2650 1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
2651 else
2652 smmu_domain->attributes &=
2653 ~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
2654 ret = 0;
2655 break;
2656 }
Patrick Dalye62d3362016-03-15 18:58:28 -07002657 case DOMAIN_ATTR_S1_BYPASS: {
2658 int bypass = *((int *)data);
2659
2660 /* bypass can't be changed while attached */
2661 if (smmu_domain->smmu != NULL) {
2662 ret = -EBUSY;
2663 break;
2664 }
2665 if (bypass)
2666 smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
2667 else
2668 smmu_domain->attributes &=
2669 ~(1 << DOMAIN_ATTR_S1_BYPASS);
2670
2671 ret = 0;
2672 break;
2673 }
Patrick Daly8befb662016-08-17 20:03:28 -07002674 case DOMAIN_ATTR_ATOMIC:
2675 {
2676 int atomic_ctx = *((int *)data);
2677
2678 /* can't be changed while attached */
2679 if (smmu_domain->smmu != NULL) {
2680 ret = -EBUSY;
2681 break;
2682 }
2683 if (atomic_ctx)
2684 smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
2685 else
2686 smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
2687 break;
2688 }
Patrick Dalyc11d1082016-09-01 15:52:44 -07002689 case DOMAIN_ATTR_SECURE_VMID:
2690 if (smmu_domain->secure_vmid != VMID_INVAL) {
2691 ret = -ENODEV;
2692 WARN(1, "secure vmid already set!");
2693 break;
2694 }
2695 smmu_domain->secure_vmid = *((int *)data);
2696 break;
Mitchel Humpherysc625ce02015-10-07 14:03:50 -07002697 case DOMAIN_ATTR_FAST:
2698 if (*((int *)data))
2699 smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
2700 ret = 0;
2701 break;
Will Deaconc752ce42014-06-25 22:46:31 +01002702 default:
Will Deacon518f7132014-11-14 17:17:54 +00002703 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01002704 }
Will Deacon518f7132014-11-14 17:17:54 +00002705
2706out_unlock:
2707 mutex_unlock(&smmu_domain->init_mutex);
2708 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01002709}
2710
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002711static void arm_smmu_trigger_fault(struct iommu_domain *domain,
2712 unsigned long flags)
2713{
2714 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2715 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2716 struct arm_smmu_device *smmu;
2717 void __iomem *cb_base;
2718
2719 if (!smmu_domain->smmu) {
2720 pr_err("Can't trigger faults on non-attached domains\n");
2721 return;
2722 }
2723
2724 smmu = smmu_domain->smmu;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002725 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002726 return;
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002727
2728 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2729 dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
2730 flags, cfg->cbndx);
2731 writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
Mitchel Humpherys017ee4b2015-10-21 13:59:50 -07002732 /* give the interrupt time to fire... */
2733 msleep(1000);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002734
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002735 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002736}
2737
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002738static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
2739 unsigned long offset)
2740{
2741 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2742 struct arm_smmu_device *smmu;
2743 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2744 void __iomem *cb_base;
2745 unsigned long val;
2746
2747 if (offset >= SZ_4K) {
2748 pr_err("Invalid offset: 0x%lx\n", offset);
2749 return 0;
2750 }
2751
2752 smmu = smmu_domain->smmu;
2753 if (!smmu) {
2754 WARN(1, "Can't read registers of a detached domain\n");
2755 val = 0;
2756 return val;
2757 }
2758
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002759 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002760 return 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002761
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002762 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2763 val = readl_relaxed(cb_base + offset);
2764
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002765 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002766 return val;
2767}
2768
2769static void arm_smmu_reg_write(struct iommu_domain *domain,
2770 unsigned long offset, unsigned long val)
2771{
2772 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2773 struct arm_smmu_device *smmu;
2774 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2775 void __iomem *cb_base;
2776
2777 if (offset >= SZ_4K) {
2778 pr_err("Invalid offset: 0x%lx\n", offset);
2779 return;
2780 }
2781
2782 smmu = smmu_domain->smmu;
2783 if (!smmu) {
2784 WARN(1, "Can't read registers of a detached domain\n");
2785 return;
2786 }
2787
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002788 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07002789 return;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002790
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002791 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2792 writel_relaxed(val, cb_base + offset);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07002793
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002794 arm_smmu_power_off(smmu->pwr);
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002795}
2796
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002797static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
2798{
2799 arm_smmu_tlb_inv_context(to_smmu_domain(domain));
2800}
2801
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002802static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
2803{
2804 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2805
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002806 return arm_smmu_power_on(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002807}
2808
2809static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
2810{
2811 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2812
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002813 arm_smmu_power_off(smmu_domain->smmu->pwr);
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002814}
2815
Will Deacon518f7132014-11-14 17:17:54 +00002816static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01002817 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01002818 .domain_alloc = arm_smmu_domain_alloc,
2819 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01002820 .attach_dev = arm_smmu_attach_dev,
Patrick Daly09801312016-08-29 17:02:52 -07002821 .detach_dev = arm_smmu_detach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01002822 .map = arm_smmu_map,
2823 .unmap = arm_smmu_unmap,
Mitchel Humpherys622bc042015-04-23 16:29:23 -07002824 .map_sg = arm_smmu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01002825 .iova_to_phys = arm_smmu_iova_to_phys,
Mitchel Humpherysfb11f702015-07-06 13:53:51 -07002826 .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
Will Deaconc752ce42014-06-25 22:46:31 +01002827 .add_device = arm_smmu_add_device,
2828 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02002829 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01002830 .domain_get_attr = arm_smmu_domain_get_attr,
2831 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00002832 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Mitchel Humpherysa52d6cc2015-07-09 17:26:15 -07002833 .trigger_fault = arm_smmu_trigger_fault,
Mitchel Humpherysfd557002015-08-21 14:07:59 -07002834 .reg_read = arm_smmu_reg_read,
2835 .reg_write = arm_smmu_reg_write,
Mitchel Humpherys2dcc2342015-12-03 11:20:03 -08002836 .tlbi_domain = arm_smmu_tlbi_domain,
Mitchel Humpherys74299ca2015-12-14 16:12:00 -08002837 .enable_config_clocks = arm_smmu_enable_config_clocks,
2838 .disable_config_clocks = arm_smmu_disable_config_clocks,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002839};
2840
Patrick Dalyad441dd2016-09-15 15:50:46 -07002841#define IMPL_DEF1_MICRO_MMU_CTRL 0
2842#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2)
2843#define MICRO_MMU_CTRL_IDLE (1 << 3)
2844
2845/* Definitions for implementation-defined registers */
2846#define ACTLR_QCOM_OSH_SHIFT 28
2847#define ACTLR_QCOM_OSH 1
2848
2849#define ACTLR_QCOM_ISH_SHIFT 29
2850#define ACTLR_QCOM_ISH 1
2851
2852#define ACTLR_QCOM_NSH_SHIFT 30
2853#define ACTLR_QCOM_NSH 1
2854
2855static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002856{
2857 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002858 u32 tmp;
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002859
2860 if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
2861 tmp, (tmp & MICRO_MMU_CTRL_IDLE),
2862 0, 30000)) {
2863 dev_err(smmu->dev, "Couldn't halt SMMU!\n");
2864 return -EBUSY;
2865 }
2866
2867 return 0;
2868}
2869
Patrick Dalyad441dd2016-09-15 15:50:46 -07002870static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002871{
2872 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2873 u32 reg;
2874
2875 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2876 reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2877 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2878
Patrick Dalyad441dd2016-09-15 15:50:46 -07002879 return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002880}
2881
Patrick Dalyad441dd2016-09-15 15:50:46 -07002882static int qsmmuv2_halt(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002883{
Patrick Dalyad441dd2016-09-15 15:50:46 -07002884 return __qsmmuv2_halt(smmu, true);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002885}
2886
Patrick Dalyad441dd2016-09-15 15:50:46 -07002887static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002888{
Patrick Dalyad441dd2016-09-15 15:50:46 -07002889 return __qsmmuv2_halt(smmu, false);
Patrick Dalyd54eafd2016-08-23 17:01:43 -07002890}
2891
Patrick Dalyad441dd2016-09-15 15:50:46 -07002892static void qsmmuv2_resume(struct arm_smmu_device *smmu)
Mitchel Humpherys952f40a2015-08-19 12:13:28 -07002893{
2894 void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
2895 u32 reg;
2896
2897 reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2898 reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
2899 writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
2900}
2901
Patrick Dalyad441dd2016-09-15 15:50:46 -07002902static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002903{
2904 int i;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002905 u32 val;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002906 struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
Patrick Dalyad441dd2016-09-15 15:50:46 -07002907 void __iomem *cb_base;
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002908
Patrick Dalyad441dd2016-09-15 15:50:46 -07002909 /*
2910 * SCTLR.M must be disabled here per ARM SMMUv2 spec
2911 * to prevent table walks with an inconsistent state.
2912 */
2913 for (i = 0; i < smmu->num_context_banks; ++i) {
2914 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
2915 val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
2916 ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
2917 ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
2918 writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
2919 }
2920
2921 /* Program implementation defined registers */
2922 qsmmuv2_halt(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002923 for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
2924 writel_relaxed(regs[i].value,
2925 ARM_SMMU_GR0(smmu) + regs[i].offset);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002926 qsmmuv2_resume(smmu);
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07002927}
2928
Patrick Dalyad441dd2016-09-15 15:50:46 -07002929static phys_addr_t __qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
2930 dma_addr_t iova, bool halt)
2931{
2932 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2933 struct arm_smmu_device *smmu = smmu_domain->smmu;
2934 int ret;
2935 phys_addr_t phys = 0;
2936 unsigned long flags;
2937
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002938 ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002939 if (ret)
2940 return 0;
2941
2942 if (halt) {
2943 ret = qsmmuv2_halt(smmu);
2944 if (ret)
2945 goto out_power_off;
2946 }
2947
2948 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
2949 spin_lock(&smmu->atos_lock);
2950 phys = __arm_smmu_iova_to_phys_hard(domain, iova);
2951 spin_unlock(&smmu->atos_lock);
2952 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
2953
2954 if (halt)
2955 qsmmuv2_resume(smmu);
2956
2957out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07002958 arm_smmu_power_off(smmu_domain->smmu->pwr);
Patrick Dalyad441dd2016-09-15 15:50:46 -07002959 return phys;
2960}
2961
2962static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
2963 dma_addr_t iova)
2964{
2965 return __qsmmuv2_iova_to_phys_hard(domain, iova, true);
2966}
2967
2968static void qsmmuv2_iova_to_phys_fault(
2969 struct iommu_domain *domain,
2970 dma_addr_t iova, phys_addr_t *phys,
2971 phys_addr_t *phys_post_tlbiall)
2972{
2973 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2974 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
2975 struct arm_smmu_device *smmu;
2976 void __iomem *cb_base;
2977 u64 sctlr, sctlr_orig;
2978 u32 fsr;
2979
2980 smmu = smmu_domain->smmu;
2981 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
2982
2983 qsmmuv2_halt_nowait(smmu);
2984
2985 writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
2986
2987 qsmmuv2_wait_for_halt(smmu);
2988
2989 /* clear FSR to allow ATOS to log any faults */
2990 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
2991 writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
2992
2993 /* disable stall mode momentarily */
2994 sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
2995 sctlr = sctlr_orig & ~SCTLR_CFCFG;
2996 writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
2997
2998 *phys = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
2999 arm_smmu_tlb_inv_context(smmu_domain);
3000 *phys_post_tlbiall = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
3001
3002 /* restore SCTLR */
3003 writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
3004
3005 qsmmuv2_resume(smmu);
3006}
3007
3008struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
3009 .device_reset = qsmmuv2_device_reset,
3010 .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
3011 .iova_to_phys_fault = qsmmuv2_iova_to_phys_fault,
3012};
3013
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003014static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003015{
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003016 int i;
3017 u32 reg, major;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003018 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003019 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003020
Peng Fan3ca37122016-05-03 21:50:30 +08003021 /*
3022 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
3023 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
3024 * bit is only present in MMU-500r2 onwards.
3025 */
3026 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
3027 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
3028 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
3029 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
3030 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
3031 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
3032 }
3033
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003034 /* Make sure all context banks are disabled and clear CB_FSR */
3035 for (i = 0; i < smmu->num_context_banks; ++i) {
3036 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
3037 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
3038 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003039 /*
3040 * Disable MMU-500's not-particularly-beneficial next-page
3041 * prefetcher for the sake of errata #841119 and #826419.
3042 */
3043 if (smmu->model == ARM_MMU500) {
3044 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
3045 reg &= ~ARM_MMU500_ACTLR_CPRE;
3046 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
3047 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003048 }
Mitchel Humpherys9c2f6482015-01-13 15:28:40 -08003049}
3050
3051static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
3052{
3053 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3054 int i = 0;
3055 u32 reg;
3056
3057 /* clear global FSR */
3058 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3059 writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
3060
3061 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3062 /*
3063 * Mark all SMRn as invalid and all S2CRn as bypass unless
3064 * overridden
3065 */
3066 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
3067 for (i = 0; i < smmu->num_mapping_groups; ++i) {
3068 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
3069 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
3070 }
3071
3072 arm_smmu_context_bank_reset(smmu);
3073 }
Will Deacon1463fe42013-07-31 19:21:27 +01003074
Will Deacon45ae7cf2013-06-24 18:31:25 +01003075 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003076 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
3077 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
3078
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003079 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003080
Will Deacon45ae7cf2013-06-24 18:31:25 +01003081 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003082 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003083
3084 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003085 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003086
Robin Murphy25a1c962016-02-10 14:25:33 +00003087 /* Enable client access, handling unmatched streams as appropriate */
3088 reg &= ~sCR0_CLIENTPD;
3089 if (disable_bypass)
3090 reg |= sCR0_USFCFG;
3091 else
3092 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003093
3094 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003095 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003096
3097 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01003098 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003099
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003100 if (smmu->features & ARM_SMMU_FEAT_VMID16)
3101 reg |= sCR0_VMID16EN;
3102
Will Deacon45ae7cf2013-06-24 18:31:25 +01003103 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00003104 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00003105 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Dalyd7476202016-09-08 18:23:28 -07003106
3107 /* Manage any implementation defined features */
3108 arm_smmu_arch_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003109}
3110
3111static int arm_smmu_id_size_to_bits(int size)
3112{
3113 switch (size) {
3114 case 0:
3115 return 32;
3116 case 1:
3117 return 36;
3118 case 2:
3119 return 40;
3120 case 3:
3121 return 42;
3122 case 4:
3123 return 44;
3124 case 5:
3125 default:
3126 return 48;
3127 }
3128}
3129
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003130static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
3131{
3132 struct device *dev = smmu->dev;
3133 int i, ntuples, ret;
3134 u32 *tuples;
3135 struct arm_smmu_impl_def_reg *regs, *regit;
3136
3137 if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
3138 return 0;
3139
3140 ntuples /= sizeof(u32);
3141 if (ntuples % 2) {
3142 dev_err(dev,
3143 "Invalid number of attach-impl-defs registers: %d\n",
3144 ntuples);
3145 return -EINVAL;
3146 }
3147
3148 regs = devm_kmalloc(
3149 dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
3150 GFP_KERNEL);
3151 if (!regs)
3152 return -ENOMEM;
3153
3154 tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
3155 if (!tuples)
3156 return -ENOMEM;
3157
3158 ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
3159 tuples, ntuples);
3160 if (ret)
3161 return ret;
3162
3163 for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
3164 regit->offset = tuples[i];
3165 regit->value = tuples[i + 1];
3166 }
3167
3168 devm_kfree(dev, tuples);
3169
3170 smmu->impl_def_attach_registers = regs;
3171 smmu->num_impl_def_attach_registers = ntuples / 2;
3172
3173 return 0;
3174}
3175
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003176
3177static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003178{
3179 const char *cname;
3180 struct property *prop;
3181 int i;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003182 struct device *dev = pwr->dev;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003183
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003184 pwr->num_clocks =
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003185 of_property_count_strings(dev->of_node, "clock-names");
3186
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003187 if (pwr->num_clocks < 1) {
3188 pwr->num_clocks = 0;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003189 return 0;
Patrick Dalyf0c58e12016-10-12 22:15:36 -07003190 }
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003191
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003192 pwr->clocks = devm_kzalloc(
3193 dev, sizeof(*pwr->clocks) * pwr->num_clocks,
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003194 GFP_KERNEL);
3195
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003196 if (!pwr->clocks)
3197 return -ENOMEM;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003198
3199 i = 0;
3200 of_property_for_each_string(dev->of_node, "clock-names",
3201 prop, cname) {
3202 struct clk *c = devm_clk_get(dev, cname);
3203
3204 if (IS_ERR(c)) {
3205 dev_err(dev, "Couldn't get clock: %s",
3206 cname);
Mathew Joseph Karimpanal0c4fd1b2016-03-16 11:48:34 -07003207 return PTR_ERR(c);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003208 }
3209
3210 if (clk_get_rate(c) == 0) {
3211 long rate = clk_round_rate(c, 1000);
3212
3213 clk_set_rate(c, rate);
3214 }
3215
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003216 pwr->clocks[i] = c;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003217
3218 ++i;
3219 }
3220 return 0;
3221}
3222
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003223static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003224{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003225 const char *cname;
3226 struct property *prop;
3227 int i, ret = 0;
3228 struct device *dev = pwr->dev;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003229
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003230 pwr->num_gdscs =
3231 of_property_count_strings(dev->of_node, "qcom,regulator-names");
3232
3233 if (pwr->num_gdscs < 1) {
3234 pwr->num_gdscs = 0;
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003235 return 0;
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003236 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003237
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003238 pwr->gdscs = devm_kzalloc(
3239 dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
3240
3241 if (!pwr->gdscs)
3242 return -ENOMEM;
3243
3244 i = 0;
3245 of_property_for_each_string(dev->of_node, "qcom,regulator-names",
3246 prop, cname)
3247 pwr->gdscs[i].supply = cname;
3248
3249 ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
3250 return ret;
3251}
3252
3253static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
3254{
3255 struct device *dev = pwr->dev;
3256
3257 /* We don't want the bus APIs to print an error message */
3258 if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
3259 dev_dbg(dev, "No bus scaling info\n");
3260 return 0;
3261 }
3262
3263 pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
3264 if (!pwr->bus_dt_data) {
3265 dev_err(dev, "Unable to read bus-scaling from devicetree\n");
3266 return -EINVAL;
3267 }
3268
3269 pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
3270 if (!pwr->bus_client) {
3271 dev_err(dev, "Bus client registration failed\n");
3272 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
3273 return -EINVAL;
3274 }
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003275
3276 return 0;
3277}
3278
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003279/*
3280 * Cleanup done by devm. Any non-devm resources must clean up themselves.
3281 */
3282static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
3283 struct platform_device *pdev)
Patrick Daly2764f952016-09-06 19:22:44 -07003284{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003285 struct arm_smmu_power_resources *pwr;
3286 int ret;
Patrick Daly2764f952016-09-06 19:22:44 -07003287
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003288 pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
3289 if (!pwr)
3290 return ERR_PTR(-ENOMEM);
Patrick Daly2764f952016-09-06 19:22:44 -07003291
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003292 pwr->dev = &pdev->dev;
3293 pwr->pdev = pdev;
3294 mutex_init(&pwr->power_lock);
3295 spin_lock_init(&pwr->clock_refs_lock);
Patrick Daly2764f952016-09-06 19:22:44 -07003296
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003297 ret = arm_smmu_init_clocks(pwr);
3298 if (ret)
3299 return ERR_PTR(ret);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003300
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003301 ret = arm_smmu_init_regulators(pwr);
3302 if (ret)
3303 return ERR_PTR(ret);
3304
3305 ret = arm_smmu_init_bus_scaling(pwr);
3306 if (ret)
3307 return ERR_PTR(ret);
3308
3309 return pwr;
Patrick Daly2764f952016-09-06 19:22:44 -07003310}
3311
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003312/*
3313 * Bus APIs are not devm-safe.
3314 */
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003315static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003316{
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003317 msm_bus_scale_unregister_client(pwr->bus_client);
3318 msm_bus_cl_clear_pdata(pwr->bus_dt_data);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003319}
3320
Will Deacon45ae7cf2013-06-24 18:31:25 +01003321static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
3322{
3323 unsigned long size;
3324 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
3325 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003326 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003327
Mitchel Humpherysba822582015-10-20 11:37:41 -07003328 dev_dbg(smmu->dev, "probing hardware configuration...\n");
3329 dev_dbg(smmu->dev, "SMMUv%d with:\n",
Robin Murphyb7862e32016-04-13 18:13:03 +01003330 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003331
3332 /* ID0 */
3333 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01003334
3335 /* Restrict available stages based on module parameter */
3336 if (force_stage == 1)
3337 id &= ~(ID0_S2TS | ID0_NTS);
3338 else if (force_stage == 2)
3339 id &= ~(ID0_S1TS | ID0_NTS);
3340
Will Deacon45ae7cf2013-06-24 18:31:25 +01003341 if (id & ID0_S1TS) {
3342 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003343 dev_dbg(smmu->dev, "\tstage 1 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003344 }
3345
3346 if (id & ID0_S2TS) {
3347 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003348 dev_dbg(smmu->dev, "\tstage 2 translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003349 }
3350
3351 if (id & ID0_NTS) {
3352 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003353 dev_dbg(smmu->dev, "\tnested translation\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003354 }
3355
3356 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01003357 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003358 dev_err(smmu->dev, "\tno translation support!\n");
3359 return -ENODEV;
3360 }
3361
Robin Murphyb7862e32016-04-13 18:13:03 +01003362 if ((id & ID0_S1TS) &&
3363 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003364 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003365 dev_dbg(smmu->dev, "\taddress translation ops\n");
Mitchel Humpherys859a7322014-10-29 21:13:40 +00003366 }
3367
Robin Murphybae2c2d2015-07-29 19:46:05 +01003368 /*
3369 * In order for DMA API calls to work properly, we must defer to what
3370 * the DT says about coherency, regardless of what the hardware claims.
3371 * Fortunately, this also opens up a workaround for systems where the
3372 * ID register value has ended up configured incorrectly.
3373 */
3374 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
3375 cttw_reg = !!(id & ID0_CTTW);
3376 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01003377 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01003378 if (cttw_dt || cttw_reg)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003379 dev_dbg(smmu->dev, "\t%scoherent table walk\n",
Robin Murphybae2c2d2015-07-29 19:46:05 +01003380 cttw_dt ? "" : "non-");
3381 if (cttw_dt != cttw_reg)
3382 dev_notice(smmu->dev,
3383 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01003384
3385 if (id & ID0_SMS) {
3386 u32 smr, sid, mask;
3387
3388 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
3389 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
3390 ID0_NUMSMRG_MASK;
3391 if (smmu->num_mapping_groups == 0) {
3392 dev_err(smmu->dev,
3393 "stream-matching supported, but no SMRs present!\n");
3394 return -ENODEV;
3395 }
3396
Dhaval Patel031d7462015-05-09 14:47:29 -07003397 if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
3398 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
3399 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
3400 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
3401 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Will Deacon45ae7cf2013-06-24 18:31:25 +01003402
Dhaval Patel031d7462015-05-09 14:47:29 -07003403 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
3404 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
3405 if ((mask & sid) != sid) {
3406 dev_err(smmu->dev,
3407 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
3408 mask, sid);
3409 return -ENODEV;
3410 }
3411
Mitchel Humpherysba822582015-10-20 11:37:41 -07003412 dev_dbg(smmu->dev,
Dhaval Patel031d7462015-05-09 14:47:29 -07003413 "\tstream matching with %u register groups, mask 0x%x",
3414 smmu->num_mapping_groups, mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003415 }
Olav Haugan3c8766d2014-08-22 17:12:32 -07003416 } else {
3417 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
3418 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003419 }
3420
Robin Murphy7602b872016-04-28 17:12:09 +01003421 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
3422 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
3423 if (!(id & ID0_PTFS_NO_AARCH32S))
3424 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
3425 }
3426
Will Deacon45ae7cf2013-06-24 18:31:25 +01003427 /* ID1 */
3428 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01003429 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003430
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003431 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00003432 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01003433 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01003434 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07003435 dev_warn(smmu->dev,
3436 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
3437 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003438
Will Deacon518f7132014-11-14 17:17:54 +00003439 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003440 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
3441 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
3442 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
3443 return -ENODEV;
3444 }
Mitchel Humpherysba822582015-10-20 11:37:41 -07003445 dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +01003446 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01003447 /*
3448 * Cavium CN88xx erratum #27704.
3449 * Ensure ASID and VMID allocation is unique across all SMMUs in
3450 * the system.
3451 */
3452 if (smmu->model == CAVIUM_SMMUV2) {
3453 smmu->cavium_id_base =
3454 atomic_add_return(smmu->num_context_banks,
3455 &cavium_smmu_context_count);
3456 smmu->cavium_id_base -= smmu->num_context_banks;
3457 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003458
3459 /* ID2 */
3460 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
3461 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003462 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003463
Will Deacon518f7132014-11-14 17:17:54 +00003464 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01003465 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00003466 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003467
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08003468 if (id & ID2_VMID16)
3469 smmu->features |= ARM_SMMU_FEAT_VMID16;
3470
Robin Murphyf1d84542015-03-04 16:41:05 +00003471 /*
3472 * What the page table walker can address actually depends on which
3473 * descriptor format is in use, but since a) we don't know that yet,
3474 * and b) it can vary per context bank, this will have to do...
3475 */
3476 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
3477 dev_warn(smmu->dev,
3478 "failed to set DMA mask for table walker\n");
3479
Robin Murphyb7862e32016-04-13 18:13:03 +01003480 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00003481 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01003482 if (smmu->version == ARM_SMMU_V1_64K)
3483 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003484 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003485 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00003486 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00003487 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01003488 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00003489 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01003490 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00003491 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01003492 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003493 }
3494
Robin Murphy7602b872016-04-28 17:12:09 +01003495 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01003496 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01003497 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01003498 if (smmu->features &
3499 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01003500 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01003501 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01003502 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01003503 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01003504 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01003505
Robin Murphyd5466352016-05-09 17:20:09 +01003506 if (arm_smmu_ops.pgsize_bitmap == -1UL)
3507 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3508 else
3509 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
Mitchel Humpherysba822582015-10-20 11:37:41 -07003510 dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
Robin Murphyd5466352016-05-09 17:20:09 +01003511 smmu->pgsize_bitmap);
3512
Will Deacon518f7132014-11-14 17:17:54 +00003513
Will Deacon28d60072014-09-01 16:24:48 +01003514 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003515 dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
3516 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003517
3518 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
Mitchel Humpherysba822582015-10-20 11:37:41 -07003519 dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
3520 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01003521
Will Deacon45ae7cf2013-06-24 18:31:25 +01003522 return 0;
3523}
3524
Patrick Dalyd7476202016-09-08 18:23:28 -07003525static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
3526{
3527 if (!smmu->arch_ops)
3528 return 0;
3529 if (!smmu->arch_ops->init)
3530 return 0;
3531 return smmu->arch_ops->init(smmu);
3532}
3533
3534static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
3535{
3536 if (!smmu->arch_ops)
3537 return;
3538 if (!smmu->arch_ops->device_reset)
3539 return;
3540 return smmu->arch_ops->device_reset(smmu);
3541}
3542
Robin Murphy67b65a32016-04-13 18:12:57 +01003543struct arm_smmu_match_data {
3544 enum arm_smmu_arch_version version;
3545 enum arm_smmu_implementation model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003546 struct arm_smmu_arch_ops *arch_ops;
Robin Murphy67b65a32016-04-13 18:12:57 +01003547};
3548
Patrick Dalyd7476202016-09-08 18:23:28 -07003549#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops) \
3550static struct arm_smmu_match_data name = { \
3551.version = ver, \
3552.model = imp, \
3553.arch_ops = ops, \
3554} \
Robin Murphy67b65a32016-04-13 18:12:57 +01003555
Patrick Dalyd7476202016-09-08 18:23:28 -07003556ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
3557ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
3558ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
3559ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
3560ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
Patrick Dalyad441dd2016-09-15 15:50:46 -07003561ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
Robin Murphy67b65a32016-04-13 18:12:57 +01003562
Joerg Roedel09b52692014-10-02 12:24:45 +02003563static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01003564 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
3565 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
3566 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01003567 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01003568 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01003569 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Patrick Dalyf0d4e212016-06-20 15:50:14 -07003570 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01003571 { },
3572};
3573MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
3574
Will Deacon45ae7cf2013-06-24 18:31:25 +01003575static int arm_smmu_device_dt_probe(struct platform_device *pdev)
3576{
Robin Murphy09360402014-08-28 17:51:59 +01003577 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01003578 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003579 struct resource *res;
3580 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003581 struct device *dev = &pdev->dev;
3582 struct rb_node *node;
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -07003583 int num_irqs, i, err, num_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003584
3585 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3586 if (!smmu) {
3587 dev_err(dev, "failed to allocate arm_smmu_device\n");
3588 return -ENOMEM;
3589 }
3590 smmu->dev = dev;
Mitchel Humpherys2fbae2a2014-12-04 11:46:24 -08003591 spin_lock_init(&smmu->atos_lock);
Patrick Dalyc190d932016-08-30 17:23:28 -07003592 idr_init(&smmu->asid_idr);
3593 mutex_init(&smmu->idr_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003594
Robin Murphy09360402014-08-28 17:51:59 +01003595 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01003596 data = of_id->data;
3597 smmu->version = data->version;
3598 smmu->model = data->model;
Patrick Dalyd7476202016-09-08 18:23:28 -07003599 smmu->arch_ops = data->arch_ops;
Robin Murphy09360402014-08-28 17:51:59 +01003600
Will Deacon45ae7cf2013-06-24 18:31:25 +01003601 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01003602 smmu->base = devm_ioremap_resource(dev, res);
3603 if (IS_ERR(smmu->base))
3604 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003605 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003606
3607 if (of_property_read_u32(dev->of_node, "#global-interrupts",
3608 &smmu->num_global_irqs)) {
3609 dev_err(dev, "missing #global-interrupts property\n");
3610 return -ENODEV;
3611 }
3612
3613 num_irqs = 0;
3614 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
3615 num_irqs++;
3616 if (num_irqs > smmu->num_global_irqs)
3617 smmu->num_context_irqs++;
3618 }
3619
Andreas Herrmann44a08de2013-10-01 13:39:07 +01003620 if (!smmu->num_context_irqs) {
3621 dev_err(dev, "found %d interrupts but expected at least %d\n",
3622 num_irqs, smmu->num_global_irqs + 1);
3623 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003624 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01003625
3626 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
3627 GFP_KERNEL);
3628 if (!smmu->irqs) {
3629 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
3630 return -ENOMEM;
3631 }
3632
3633 for (i = 0; i < num_irqs; ++i) {
3634 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07003635
Will Deacon45ae7cf2013-06-24 18:31:25 +01003636 if (irq < 0) {
3637 dev_err(dev, "failed to get irq index %d\n", i);
3638 return -ENODEV;
3639 }
3640 smmu->irqs[i] = irq;
3641 }
3642
Dhaval Patel031d7462015-05-09 14:47:29 -07003643 parse_driver_options(smmu);
3644
Olav Haugan3c8766d2014-08-22 17:12:32 -07003645
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003646 smmu->pwr = arm_smmu_init_power_resources(pdev);
3647 if (IS_ERR(smmu->pwr))
3648 return PTR_ERR(smmu->pwr);
Mitchel Humpherysf7666ae2014-07-23 17:35:07 -07003649
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003650 err = arm_smmu_power_on(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003651 if (err)
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003652 goto out_exit_power_resources;
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003653
3654 err = arm_smmu_device_cfg_probe(smmu);
3655 if (err)
3656 goto out_power_off;
3657
Will Deacon45ae7cf2013-06-24 18:31:25 +01003658 i = 0;
3659 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003660
Mitchel Humpherysc6dd1ed2014-08-04 16:45:53 -07003661 err = arm_smmu_parse_iommus_properties(smmu, &num_masters);
3662 if (err)
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003663 goto out_put_masters;
3664
Mitchel Humpherysba822582015-10-20 11:37:41 -07003665 dev_dbg(dev, "registered %d master devices\n", num_masters);
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02003666
Mitchel Humpherys5494a5e2014-08-14 17:44:49 -07003667 err = arm_smmu_parse_impl_def_registers(smmu);
3668 if (err)
3669 goto out_put_masters;
3670
Robin Murphyb7862e32016-04-13 18:13:03 +01003671 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01003672 smmu->num_context_banks != smmu->num_context_irqs) {
3673 dev_err(dev,
Mitchel Humpherys73230ce2014-12-11 17:18:02 -08003674 "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
3675 smmu->num_context_irqs, smmu->num_context_banks,
3676 smmu->num_context_banks);
3677 smmu->num_context_irqs = smmu->num_context_banks;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003678 }
3679
Will Deacon45ae7cf2013-06-24 18:31:25 +01003680 for (i = 0; i < smmu->num_global_irqs; ++i) {
Mitchel Humpherysc4f2a802015-02-04 21:29:46 -08003681 err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
3682 NULL, arm_smmu_global_fault,
3683 IRQF_ONESHOT | IRQF_SHARED,
3684 "arm-smmu global fault", smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003685 if (err) {
3686 dev_err(dev, "failed to request global IRQ %d (%u)\n",
3687 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08003688 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003689 }
3690 }
3691
3692 INIT_LIST_HEAD(&smmu->list);
3693 spin_lock(&arm_smmu_devices_lock);
3694 list_add(&smmu->list, &arm_smmu_devices);
3695 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01003696
Patrick Dalyd7476202016-09-08 18:23:28 -07003697 err = arm_smmu_arch_init(smmu);
3698 if (err)
3699 goto out_put_masters;
3700
Will Deaconfd90cec2013-08-21 13:56:34 +01003701 arm_smmu_device_reset(smmu);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003702 arm_smmu_power_off(smmu->pwr);
Patrick Dalyd7476202016-09-08 18:23:28 -07003703
Will Deacon45ae7cf2013-06-24 18:31:25 +01003704 return 0;
3705
Will Deacon45ae7cf2013-06-24 18:31:25 +01003706out_put_masters:
3707 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003708 struct arm_smmu_master *master
3709 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003710 of_node_put(master->of_node);
3711 }
3712
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003713out_power_off:
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003714 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003715
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003716out_exit_power_resources:
3717 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Dalycf7b0de2016-10-06 17:04:49 -07003718
Will Deacon45ae7cf2013-06-24 18:31:25 +01003719 return err;
3720}
3721
3722static int arm_smmu_device_remove(struct platform_device *pdev)
3723{
3724 int i;
3725 struct device *dev = &pdev->dev;
3726 struct arm_smmu_device *curr, *smmu = NULL;
3727 struct rb_node *node;
3728
3729 spin_lock(&arm_smmu_devices_lock);
3730 list_for_each_entry(curr, &arm_smmu_devices, list) {
3731 if (curr->dev == dev) {
3732 smmu = curr;
3733 list_del(&smmu->list);
3734 break;
3735 }
3736 }
3737 spin_unlock(&arm_smmu_devices_lock);
3738
3739 if (!smmu)
3740 return -ENODEV;
3741
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003742 if (arm_smmu_power_on(smmu->pwr))
Mitchel Humpherys3e52a7e2015-10-19 17:13:47 -07003743 return -EINVAL;
3744
Will Deacon45ae7cf2013-06-24 18:31:25 +01003745 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07003746 struct arm_smmu_master *master
3747 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003748 of_node_put(master->of_node);
3749 }
3750
Will Deaconecfadb62013-07-31 19:21:28 +01003751 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003752 dev_err(dev, "removing device with active domains!\n");
3753
3754 for (i = 0; i < smmu->num_global_irqs; ++i)
Peng Fanbee14002016-07-04 17:38:22 +08003755 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003756
Patrick Dalyc190d932016-08-30 17:23:28 -07003757 idr_destroy(&smmu->asid_idr);
3758
Will Deacon45ae7cf2013-06-24 18:31:25 +01003759 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07003760 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003761 arm_smmu_power_off(smmu->pwr);
Patrick Daly3a8a88a2016-07-22 12:24:05 -07003762
Patrick Daly5b3d8c62016-11-01 15:34:11 -07003763 arm_smmu_exit_power_resources(smmu->pwr);
Patrick Daly2764f952016-09-06 19:22:44 -07003764
Will Deacon45ae7cf2013-06-24 18:31:25 +01003765 return 0;
3766}
3767
Will Deacon45ae7cf2013-06-24 18:31:25 +01003768static struct platform_driver arm_smmu_driver = {
3769 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01003770 .name = "arm-smmu",
3771 .of_match_table = of_match_ptr(arm_smmu_of_match),
3772 },
3773 .probe = arm_smmu_device_dt_probe,
3774 .remove = arm_smmu_device_remove,
3775};
3776
3777static int __init arm_smmu_init(void)
3778{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003779 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01003780 int ret;
3781
Thierry Reding0e7d37a2014-11-07 15:26:18 +00003782 /*
3783 * Play nice with systems that don't have an ARM SMMU by checking that
3784 * an ARM SMMU exists in the system before proceeding with the driver
3785 * and IOMMU bus operation registration.
3786 */
3787 np = of_find_matching_node(NULL, arm_smmu_of_match);
3788 if (!np)
3789 return 0;
3790
3791 of_node_put(np);
3792
Will Deacon45ae7cf2013-06-24 18:31:25 +01003793 ret = platform_driver_register(&arm_smmu_driver);
3794 if (ret)
3795 return ret;
3796
3797 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01003798 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003799 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3800
Will Deacond123cf82014-02-04 22:17:53 +00003801#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01003802 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01003803 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00003804#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01003805
Will Deacona9a1b0b2014-05-01 18:05:08 +01003806#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08003807 if (!iommu_present(&pci_bus_type)) {
3808 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01003809 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08003810 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01003811#endif
3812
Will Deacon45ae7cf2013-06-24 18:31:25 +01003813 return 0;
3814}
3815
3816static void __exit arm_smmu_exit(void)
3817{
3818 return platform_driver_unregister(&arm_smmu_driver);
3819}
3820
Andreas Herrmannb1950b22013-10-01 13:39:05 +01003821subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01003822module_exit(arm_smmu_exit);
3823
3824MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
3825MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
3826MODULE_LICENSE("GPL v2");