blob: 3bdb799d3b4b1f5ee1de1e2505d3d0024273c658 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +030027 * - Extended Stream ID (16 bit)
Will Deacon45ae7cf2013-06-24 18:31:25 +010028 */
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +000032#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010034#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010035#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000036#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010037#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010041#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010042#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000043#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/module.h>
45#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010046#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010047#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010048#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010049#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010050#include <linux/platform_device.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53
54#include <linux/amba/bus.h>
55
Will Deacon518f7132014-11-14 17:17:54 +000056#include "io-pgtable.h"
Rob Clark2b037742017-08-09 10:43:03 -040057#include "arm-smmu-regs.h"
58
59#define ARM_MMU500_ACTLR_CPRE (1 << 1)
60
61#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
62#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
63
64#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
65#define TLB_SPIN_COUNT 10
Will Deacon45ae7cf2013-06-24 18:31:25 +010066
Will Deacon45ae7cf2013-06-24 18:31:25 +010067/* Maximum number of context banks per SMMU */
68#define ARM_SMMU_MAX_CBS 128
69
Will Deacon45ae7cf2013-06-24 18:31:25 +010070/* SMMU global address space */
71#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010072#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010073
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000074/*
75 * SMMU global address space with conditional offset to access secure
76 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
77 * nsGFSYNR0: 0x450)
78 */
79#define ARM_SMMU_GR0_NS(smmu) \
80 ((smmu)->base + \
81 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
82 ? 0x400 : 0))
83
Robin Murphyf9a05f02016-04-13 18:13:01 +010084/*
85 * Some 64-bit registers only make sense to write atomically, but in such
86 * cases all the data relevant to AArch32 formats lies within the lower word,
87 * therefore this actually makes more sense than it might first appear.
88 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010089#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010090#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010091#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010092#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010093#endif
94
Will Deacon45ae7cf2013-06-24 18:31:25 +010095/* Translation context bank */
Robin Murphy452107c2017-03-30 17:56:30 +010096#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010097
Eric Augerf3ebee82017-01-19 20:57:55 +000098#define MSI_IOVA_BASE 0x8000000
99#define MSI_IOVA_LENGTH 0x100000
100
Will Deacon4cf740b2014-07-14 19:47:39 +0100101static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000102module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100103MODULE_PARM_DESC(force_stage,
104 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000105static bool disable_bypass;
106module_param(disable_bypass, bool, S_IRUGO);
107MODULE_PARM_DESC(disable_bypass,
108 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100109
Robin Murphy09360402014-08-28 17:51:59 +0100110enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100111 ARM_SMMU_V1,
112 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100113 ARM_SMMU_V2,
114};
115
Robin Murphy67b65a32016-04-13 18:12:57 +0100116enum arm_smmu_implementation {
117 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100118 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100119 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100120};
121
Robin Murphy84c24372017-06-19 16:41:56 +0100122/* Until ACPICA headers cover IORT rev. C */
123#ifndef ACPI_IORT_SMMU_CORELINK_MMU401
124#define ACPI_IORT_SMMU_CORELINK_MMU401 0x4
125#endif
126#ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX
127#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5
128#endif
129
Robin Murphy8e8b2032016-09-12 17:13:50 +0100130struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100131 struct iommu_group *group;
132 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100133 enum arm_smmu_s2cr_type type;
134 enum arm_smmu_s2cr_privcfg privcfg;
135 u8 cbndx;
136};
137
138#define s2cr_init_val (struct arm_smmu_s2cr){ \
139 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
140}
141
Will Deacon45ae7cf2013-06-24 18:31:25 +0100142struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100143 u16 mask;
144 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100145 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100146};
147
Robin Murphy90df3732017-08-08 14:56:14 +0100148struct arm_smmu_cb {
149 u64 ttbr[2];
150 u32 tcr[2];
151 u32 mair[2];
152 struct arm_smmu_cfg *cfg;
153};
154
Will Deacona9a1b0b2014-05-01 18:05:08 +0100155struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100156 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100157 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100158};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100159#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100160#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
161#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000162#define fwspec_smendx(fw, i) \
163 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
Robin Murphyadfec2e2016-09-12 17:13:55 +0100164#define for_each_cfg_sme(fw, i, idx) \
Robin Murphy8c82d6e2016-11-07 18:25:09 +0000165 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167struct arm_smmu_device {
168 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100169
170 void __iomem *base;
Robin Murphy452107c2017-03-30 17:56:30 +0100171 void __iomem *cb_base;
Will Deaconc757e852014-07-30 11:33:25 +0100172 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100173
174#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
175#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
176#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
177#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
178#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000179#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800180#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100181#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
182#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
183#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
184#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
185#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300186#define ARM_SMMU_FEAT_EXIDS (1 << 12)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100187 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000188
189#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
190 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100191 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100192 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100193
194 u32 num_context_banks;
195 u32 num_s2_context_banks;
196 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
Robin Murphy90df3732017-08-08 14:56:14 +0100197 struct arm_smmu_cb *cbs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100198 atomic_t irptndx;
199
200 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100201 u16 streamid_mask;
202 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100203 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100204 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100205 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206
Will Deacon518f7132014-11-14 17:17:54 +0000207 unsigned long va_size;
208 unsigned long ipa_size;
209 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100210 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212 u32 num_global_irqs;
213 u32 num_context_irqs;
214 unsigned int *irqs;
215
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800216 u32 cavium_id_base; /* Specific to Cavium */
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100217
Will Deacon8e517e72017-07-06 15:55:48 +0100218 spinlock_t global_sync_lock;
219
Joerg Roedel9648cbc2017-02-01 18:11:36 +0100220 /* IOMMU core code handle */
221 struct iommu_device iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222};
223
Robin Murphy7602b872016-04-28 17:12:09 +0100224enum arm_smmu_context_fmt {
225 ARM_SMMU_CTX_FMT_NONE,
226 ARM_SMMU_CTX_FMT_AARCH64,
227 ARM_SMMU_CTX_FMT_AARCH32_L,
228 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229};
230
231struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100232 u8 cbndx;
233 u8 irptndx;
Robin Murphy280b6832017-03-30 17:56:29 +0100234 union {
235 u16 asid;
236 u16 vmid;
237 };
Will Deacon45ae7cf2013-06-24 18:31:25 +0100238 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100239 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100240};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100241#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100242
Will Deaconc752ce42014-06-25 22:46:31 +0100243enum arm_smmu_domain_stage {
244 ARM_SMMU_DOMAIN_S1 = 0,
245 ARM_SMMU_DOMAIN_S2,
246 ARM_SMMU_DOMAIN_NESTED,
Will Deacon61bc6712017-01-06 16:56:03 +0000247 ARM_SMMU_DOMAIN_BYPASS,
Will Deaconc752ce42014-06-25 22:46:31 +0100248};
249
Will Deacon45ae7cf2013-06-24 18:31:25 +0100250struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100251 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000252 struct io_pgtable_ops *pgtbl_ops;
Will Deacon44680ee2014-06-25 11:29:12 +0100253 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100254 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000255 struct mutex init_mutex; /* Protects smmu pointer */
Will Deacon8e517e72017-07-06 15:55:48 +0100256 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
Joerg Roedel1d672632015-03-26 13:43:10 +0100257 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100258};
259
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000260struct arm_smmu_option_prop {
261 u32 opt;
262 const char *prop;
263};
264
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800265static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
266
Robin Murphy021bb842016-09-14 15:26:46 +0100267static bool using_legacy_binding, using_generic_binding;
268
Mitchel Humpherys29073202014-07-08 09:52:18 -0700269static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000270 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
271 { 0, NULL},
272};
273
Joerg Roedel1d672632015-03-26 13:43:10 +0100274static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
275{
276 return container_of(dom, struct arm_smmu_domain, domain);
277}
278
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000279static void parse_driver_options(struct arm_smmu_device *smmu)
280{
281 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700282
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000283 do {
284 if (of_property_read_bool(smmu->dev->of_node,
285 arm_smmu_options[i].prop)) {
286 smmu->options |= arm_smmu_options[i].opt;
287 dev_notice(smmu->dev, "option %s\n",
288 arm_smmu_options[i].prop);
289 }
290 } while (arm_smmu_options[++i].opt);
291}
292
Will Deacon8f68f8e2014-07-15 11:27:08 +0100293static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100294{
295 if (dev_is_pci(dev)) {
296 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700297
Will Deacona9a1b0b2014-05-01 18:05:08 +0100298 while (!pci_is_root_bus(bus))
299 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100300 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100301 }
302
Robin Murphyf80cd882016-09-14 15:21:39 +0100303 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100304}
305
Robin Murphyf80cd882016-09-14 15:21:39 +0100306static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100307{
Robin Murphyf80cd882016-09-14 15:21:39 +0100308 *((__be32 *)data) = cpu_to_be32(alias);
309 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100310}
311
Robin Murphyf80cd882016-09-14 15:21:39 +0100312static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100313{
Robin Murphyf80cd882016-09-14 15:21:39 +0100314 struct of_phandle_iterator *it = *(void **)data;
315 struct device_node *np = it->node;
316 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100317
Robin Murphyf80cd882016-09-14 15:21:39 +0100318 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
319 "#stream-id-cells", 0)
320 if (it->node == np) {
321 *(void **)data = dev;
322 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700323 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100324 it->node = np;
325 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100326}
327
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100328static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100329static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100330
Robin Murphyadfec2e2016-09-12 17:13:55 +0100331static int arm_smmu_register_legacy_master(struct device *dev,
332 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100333{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100334 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100335 struct device_node *np;
336 struct of_phandle_iterator it;
337 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100338 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100339 __be32 pci_sid;
340 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100341
Robin Murphyf80cd882016-09-14 15:21:39 +0100342 np = dev_get_dev_node(dev);
343 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
344 of_node_put(np);
345 return -ENODEV;
346 }
347
348 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100349 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
350 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100351 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100352 of_node_put(np);
353 if (err == 0)
354 return -ENODEV;
355 if (err < 0)
356 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100357
Robin Murphyf80cd882016-09-14 15:21:39 +0100358 if (dev_is_pci(dev)) {
359 /* "mmu-masters" assumes Stream ID == Requester ID */
360 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
361 &pci_sid);
362 it.cur = &pci_sid;
363 it.cur_count = 1;
364 }
365
Robin Murphyadfec2e2016-09-12 17:13:55 +0100366 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
367 &arm_smmu_ops);
368 if (err)
369 return err;
370
371 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
372 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100373 return -ENOMEM;
374
Robin Murphyadfec2e2016-09-12 17:13:55 +0100375 *smmu = dev_get_drvdata(smmu_dev);
376 of_phandle_iterator_args(&it, sids, it.cur_count);
377 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
378 kfree(sids);
379 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100380}
381
382static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
383{
384 int idx;
385
386 do {
387 idx = find_next_zero_bit(map, end, start);
388 if (idx == end)
389 return -ENOSPC;
390 } while (test_and_set_bit(idx, map));
391
392 return idx;
393}
394
395static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
396{
397 clear_bit(idx, map);
398}
399
400/* Wait for any pending TLB invalidations to complete */
Robin Murphy11febfc2017-03-30 17:56:31 +0100401static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
402 void __iomem *sync, void __iomem *status)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100403{
Robin Murphy8513c892017-03-30 17:56:32 +0100404 unsigned int spin_cnt, delay;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100405
Robin Murphy11febfc2017-03-30 17:56:31 +0100406 writel_relaxed(0, sync);
Robin Murphy8513c892017-03-30 17:56:32 +0100407 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
408 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
409 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
410 return;
411 cpu_relax();
Will Deacon45ae7cf2013-06-24 18:31:25 +0100412 }
Robin Murphy8513c892017-03-30 17:56:32 +0100413 udelay(delay);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100414 }
Robin Murphy8513c892017-03-30 17:56:32 +0100415 dev_err_ratelimited(smmu->dev,
416 "TLB sync timed out -- SMMU may be deadlocked\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +0100417}
418
Robin Murphy11febfc2017-03-30 17:56:31 +0100419static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
Will Deacon1463fe42013-07-31 19:21:27 +0100420{
Robin Murphy11febfc2017-03-30 17:56:31 +0100421 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon8e517e72017-07-06 15:55:48 +0100422 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100423
Will Deacon8e517e72017-07-06 15:55:48 +0100424 spin_lock_irqsave(&smmu->global_sync_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100425 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
426 base + ARM_SMMU_GR0_sTLBGSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100427 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000428}
429
Robin Murphy11febfc2017-03-30 17:56:31 +0100430static void arm_smmu_tlb_sync_context(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100431{
Will Deacon518f7132014-11-14 17:17:54 +0000432 struct arm_smmu_domain *smmu_domain = cookie;
Robin Murphy11febfc2017-03-30 17:56:31 +0100433 struct arm_smmu_device *smmu = smmu_domain->smmu;
434 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
Will Deacon8e517e72017-07-06 15:55:48 +0100435 unsigned long flags;
Robin Murphy11febfc2017-03-30 17:56:31 +0100436
Will Deacon8e517e72017-07-06 15:55:48 +0100437 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy11febfc2017-03-30 17:56:31 +0100438 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
439 base + ARM_SMMU_CB_TLBSTATUS);
Will Deacon8e517e72017-07-06 15:55:48 +0100440 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Will Deacon518f7132014-11-14 17:17:54 +0000441}
442
Robin Murphy11febfc2017-03-30 17:56:31 +0100443static void arm_smmu_tlb_sync_vmid(void *cookie)
444{
445 struct arm_smmu_domain *smmu_domain = cookie;
446
447 arm_smmu_tlb_sync_global(smmu_domain->smmu);
448}
449
450static void arm_smmu_tlb_inv_context_s1(void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000451{
452 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100453 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100454 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
455
456 writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
457 arm_smmu_tlb_sync_context(cookie);
458}
459
460static void arm_smmu_tlb_inv_context_s2(void *cookie)
461{
462 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100463 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy11febfc2017-03-30 17:56:31 +0100464 void __iomem *base = ARM_SMMU_GR0(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100465
Robin Murphy11febfc2017-03-30 17:56:31 +0100466 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
467 arm_smmu_tlb_sync_global(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100468}
469
Will Deacon518f7132014-11-14 17:17:54 +0000470static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000471 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000472{
473 struct arm_smmu_domain *smmu_domain = cookie;
474 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon518f7132014-11-14 17:17:54 +0000475 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Robin Murphy11febfc2017-03-30 17:56:31 +0100476 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000477
478 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000479 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
480
Robin Murphy7602b872016-04-28 17:12:09 +0100481 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000482 iova &= ~12UL;
Robin Murphy280b6832017-03-30 17:56:29 +0100483 iova |= cfg->asid;
Robin Murphy75df1382015-12-07 18:18:52 +0000484 do {
485 writel_relaxed(iova, reg);
486 iova += granule;
487 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000488 } else {
489 iova >>= 12;
Robin Murphy280b6832017-03-30 17:56:29 +0100490 iova |= (u64)cfg->asid << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000491 do {
492 writeq_relaxed(iova, reg);
493 iova += granule >> 12;
494 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000495 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100496 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000497 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
498 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000499 iova >>= 12;
500 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100501 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000502 iova += granule >> 12;
503 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000504 }
505}
506
Robin Murphy11febfc2017-03-30 17:56:31 +0100507/*
508 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
509 * almost negligible, but the benefit of getting the first one in as far ahead
510 * of the sync as possible is significant, hence we don't just make this a
511 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
512 */
513static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
514 size_t granule, bool leaf, void *cookie)
515{
516 struct arm_smmu_domain *smmu_domain = cookie;
517 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
518
519 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
520}
521
522static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
523 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
Will Deacon518f7132014-11-14 17:17:54 +0000524 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
Robin Murphy11febfc2017-03-30 17:56:31 +0100525 .tlb_sync = arm_smmu_tlb_sync_context,
526};
527
528static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
529 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
530 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
531 .tlb_sync = arm_smmu_tlb_sync_context,
532};
533
534static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
535 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
536 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
537 .tlb_sync = arm_smmu_tlb_sync_vmid,
Will Deacon518f7132014-11-14 17:17:54 +0000538};
539
Will Deacon45ae7cf2013-06-24 18:31:25 +0100540static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
541{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100542 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100543 unsigned long iova;
544 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100545 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100546 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
547 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100548 void __iomem *cb_base;
549
Robin Murphy452107c2017-03-30 17:56:30 +0100550 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100551 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
552
553 if (!(fsr & FSR_FAULT))
554 return IRQ_NONE;
555
Will Deacon45ae7cf2013-06-24 18:31:25 +0100556 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100557 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100558
Will Deacon3714ce1d2016-08-05 19:49:45 +0100559 dev_err_ratelimited(smmu->dev,
560 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
561 fsr, iova, fsynr, cfg->cbndx);
562
Will Deacon45ae7cf2013-06-24 18:31:25 +0100563 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100564 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100565}
566
567static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
568{
569 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
570 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000571 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100572
573 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
574 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
575 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
576 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
577
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000578 if (!gfsr)
579 return IRQ_NONE;
580
Will Deacon45ae7cf2013-06-24 18:31:25 +0100581 dev_err_ratelimited(smmu->dev,
582 "Unexpected global fault, this could be serious\n");
583 dev_err_ratelimited(smmu->dev,
584 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
585 gfsr, gfsynr0, gfsynr1, gfsynr2);
586
587 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100588 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100589}
590
Will Deacon518f7132014-11-14 17:17:54 +0000591static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
592 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100593{
Will Deacon44680ee2014-06-25 11:29:12 +0100594 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy90df3732017-08-08 14:56:14 +0100595 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
596 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
597
598 cb->cfg = cfg;
599
600 /* TTBCR */
601 if (stage1) {
602 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
603 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
604 } else {
605 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
606 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
607 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
608 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
609 cb->tcr[1] |= TTBCR2_AS;
610 }
611 } else {
612 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
613 }
614
615 /* TTBRs */
616 if (stage1) {
617 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
618 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
619 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
620 } else {
621 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
622 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
623 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
624 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
625 }
626 } else {
627 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
628 }
629
630 /* MAIRs (stage-1 only) */
631 if (stage1) {
632 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
633 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
634 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
635 } else {
636 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
637 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
638 }
639 }
640}
641
642static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
643{
644 u32 reg;
645 bool stage1;
646 struct arm_smmu_cb *cb = &smmu->cbs[idx];
647 struct arm_smmu_cfg *cfg = cb->cfg;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100648 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100649
Robin Murphy90df3732017-08-08 14:56:14 +0100650 cb_base = ARM_SMMU_CB(smmu, idx);
651
652 /* Unassigned context banks only need disabling */
653 if (!cfg) {
654 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
655 return;
656 }
657
Will Deacon45ae7cf2013-06-24 18:31:25 +0100658 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100659 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100660
Robin Murphy90df3732017-08-08 14:56:14 +0100661 /* CBA2R */
Will Deacon4a1c93c2015-03-04 12:21:03 +0000662 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100663 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
664 reg = CBA2R_RW64_64BIT;
665 else
666 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800667 /* 16-bit VMIDs live in CBA2R */
668 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Robin Murphy280b6832017-03-30 17:56:29 +0100669 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800670
Robin Murphy90df3732017-08-08 14:56:14 +0100671 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
Will Deacon4a1c93c2015-03-04 12:21:03 +0000672 }
673
Will Deacon45ae7cf2013-06-24 18:31:25 +0100674 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100675 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100676 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700677 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100678
Will Deacon57ca90f2014-02-06 14:59:05 +0000679 /*
680 * Use the weakest shareability/memory types, so they are
681 * overridden by the ttbcr/pte.
682 */
683 if (stage1) {
684 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
685 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800686 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
687 /* 8-bit VMIDs live in CBAR */
Robin Murphy280b6832017-03-30 17:56:29 +0100688 reg |= cfg->vmid << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000689 }
Robin Murphy90df3732017-08-08 14:56:14 +0100690 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100691
Sunil Goutham125458a2017-03-28 16:11:12 +0530692 /*
693 * TTBCR
694 * We must write this before the TTBRs, since it determines the
695 * access behaviour of some fields (in particular, ASID[15:8]).
696 */
Robin Murphy90df3732017-08-08 14:56:14 +0100697 if (stage1 && smmu->version > ARM_SMMU_V1)
698 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
699 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100700
Will Deacon45ae7cf2013-06-24 18:31:25 +0100701 /* TTBRs */
Robin Murphy90df3732017-08-08 14:56:14 +0100702 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
703 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
704 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
705 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100706 } else {
Robin Murphy90df3732017-08-08 14:56:14 +0100707 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
708 if (stage1)
709 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100710 }
711
Will Deacon518f7132014-11-14 17:17:54 +0000712 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100713 if (stage1) {
Robin Murphy90df3732017-08-08 14:56:14 +0100714 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
715 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100716 }
717
Will Deacon45ae7cf2013-06-24 18:31:25 +0100718 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100719 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100720 if (stage1)
721 reg |= SCTLR_S1_ASIDPNE;
Robin Murphy90df3732017-08-08 14:56:14 +0100722 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
723 reg |= SCTLR_E;
724
Will Deacon25724842013-08-21 13:49:53 +0100725 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100726}
727
728static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100729 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100730{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100731 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000732 unsigned long ias, oas;
733 struct io_pgtable_ops *pgtbl_ops;
734 struct io_pgtable_cfg pgtbl_cfg;
735 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100736 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100737 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Robin Murphy11febfc2017-03-30 17:56:31 +0100738 const struct iommu_gather_ops *tlb_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100739
Will Deacon518f7132014-11-14 17:17:54 +0000740 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100741 if (smmu_domain->smmu)
742 goto out_unlock;
743
Will Deacon61bc6712017-01-06 16:56:03 +0000744 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
745 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
746 smmu_domain->smmu = smmu;
747 goto out_unlock;
748 }
749
Will Deaconc752ce42014-06-25 22:46:31 +0100750 /*
751 * Mapping the requested stage onto what we support is surprisingly
752 * complicated, mainly because the spec allows S1+S2 SMMUs without
753 * support for nested translation. That means we end up with the
754 * following table:
755 *
756 * Requested Supported Actual
757 * S1 N S1
758 * S1 S1+S2 S1
759 * S1 S2 S2
760 * S1 S1 S1
761 * N N N
762 * N S1+S2 S2
763 * N S2 S2
764 * N S1 S1
765 *
766 * Note that you can't actually request stage-2 mappings.
767 */
768 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
769 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
770 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
771 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
772
Robin Murphy7602b872016-04-28 17:12:09 +0100773 /*
774 * Choosing a suitable context format is even more fiddly. Until we
775 * grow some way for the caller to express a preference, and/or move
776 * the decision into the io-pgtable code where it arguably belongs,
777 * just aim for the closest thing to the rest of the system, and hope
778 * that the hardware isn't esoteric enough that we can't assume AArch64
779 * support to be a superset of AArch32 support...
780 */
781 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
782 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100783 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
784 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
785 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
786 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
787 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100788 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
789 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
790 ARM_SMMU_FEAT_FMT_AARCH64_16K |
791 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
792 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
793
794 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
795 ret = -EINVAL;
796 goto out_unlock;
797 }
798
Will Deaconc752ce42014-06-25 22:46:31 +0100799 switch (smmu_domain->stage) {
800 case ARM_SMMU_DOMAIN_S1:
801 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
802 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000803 ias = smmu->va_size;
804 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100805 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000806 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100807 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000808 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100809 ias = min(ias, 32UL);
810 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100811 } else {
812 fmt = ARM_V7S;
813 ias = min(ias, 32UL);
814 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100815 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100816 tlb_ops = &arm_smmu_s1_tlb_ops;
Will Deaconc752ce42014-06-25 22:46:31 +0100817 break;
818 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100819 /*
820 * We will likely want to change this if/when KVM gets
821 * involved.
822 */
Will Deaconc752ce42014-06-25 22:46:31 +0100823 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100824 cfg->cbar = CBAR_TYPE_S2_TRANS;
825 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000826 ias = smmu->ipa_size;
827 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100828 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000829 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100830 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000831 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100832 ias = min(ias, 40UL);
833 oas = min(oas, 40UL);
834 }
Robin Murphy11febfc2017-03-30 17:56:31 +0100835 if (smmu->version == ARM_SMMU_V2)
836 tlb_ops = &arm_smmu_s2_tlb_ops_v2;
837 else
838 tlb_ops = &arm_smmu_s2_tlb_ops_v1;
Will Deaconc752ce42014-06-25 22:46:31 +0100839 break;
840 default:
841 ret = -EINVAL;
842 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100843 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100844 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
845 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200846 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100847 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100848
Will Deacon44680ee2014-06-25 11:29:12 +0100849 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100850 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100851 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
852 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100853 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100854 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100855 }
856
Robin Murphy280b6832017-03-30 17:56:29 +0100857 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
858 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
859 else
860 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
861
Will Deacon518f7132014-11-14 17:17:54 +0000862 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100863 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000864 .ias = ias,
865 .oas = oas,
Robin Murphy11febfc2017-03-30 17:56:31 +0100866 .tlb = tlb_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100867 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000868 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100869
Robin Murphy81b3c252017-06-22 16:53:53 +0100870 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
871 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
872
Will Deacon518f7132014-11-14 17:17:54 +0000873 smmu_domain->smmu = smmu;
874 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
875 if (!pgtbl_ops) {
876 ret = -ENOMEM;
877 goto out_clear_smmu;
878 }
879
Robin Murphyd5466352016-05-09 17:20:09 +0100880 /* Update the domain's page sizes to reflect the page table format */
881 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Robin Murphy455eb7d2016-09-12 17:13:58 +0100882 domain->geometry.aperture_end = (1UL << ias) - 1;
883 domain->geometry.force_aperture = true;
Will Deacon518f7132014-11-14 17:17:54 +0000884
885 /* Initialise the context bank with our page table cfg */
886 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
Robin Murphy90df3732017-08-08 14:56:14 +0100887 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon518f7132014-11-14 17:17:54 +0000888
889 /*
890 * Request context fault interrupt. Do this last to avoid the
891 * handler seeing a half-initialised domain state.
892 */
Will Deacon44680ee2014-06-25 11:29:12 +0100893 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800894 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
895 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200896 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100897 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100898 cfg->irptndx, irq);
899 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100900 }
901
Will Deacon518f7132014-11-14 17:17:54 +0000902 mutex_unlock(&smmu_domain->init_mutex);
903
904 /* Publish page table ops for map/unmap */
905 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100906 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100907
Will Deacon518f7132014-11-14 17:17:54 +0000908out_clear_smmu:
909 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100910out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000911 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100912 return ret;
913}
914
915static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
916{
Joerg Roedel1d672632015-03-26 13:43:10 +0100917 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100918 struct arm_smmu_device *smmu = smmu_domain->smmu;
919 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100920 int irq;
921
Will Deacon61bc6712017-01-06 16:56:03 +0000922 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100923 return;
924
Will Deacon518f7132014-11-14 17:17:54 +0000925 /*
926 * Disable the context bank and free the page tables before freeing
927 * it.
928 */
Robin Murphy90df3732017-08-08 14:56:14 +0100929 smmu->cbs[cfg->cbndx].cfg = NULL;
930 arm_smmu_write_context_bank(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100931
Will Deacon44680ee2014-06-25 11:29:12 +0100932 if (cfg->irptndx != INVALID_IRPTNDX) {
933 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800934 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100935 }
936
Markus Elfring44830b02015-11-06 18:32:41 +0100937 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100938 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100939}
940
Joerg Roedel1d672632015-03-26 13:43:10 +0100941static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100942{
943 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100944
Will Deacon61bc6712017-01-06 16:56:03 +0000945 if (type != IOMMU_DOMAIN_UNMANAGED &&
946 type != IOMMU_DOMAIN_DMA &&
947 type != IOMMU_DOMAIN_IDENTITY)
Joerg Roedel1d672632015-03-26 13:43:10 +0100948 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100949 /*
950 * Allocate the domain and initialise some of its data structures.
951 * We can't really do anything meaningful until we've added a
952 * master.
953 */
954 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
955 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100956 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100957
Robin Murphy021bb842016-09-14 15:26:46 +0100958 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
959 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +0000960 kfree(smmu_domain);
961 return NULL;
962 }
963
Will Deacon518f7132014-11-14 17:17:54 +0000964 mutex_init(&smmu_domain->init_mutex);
Robin Murphy523d7422017-06-22 16:53:56 +0100965 spin_lock_init(&smmu_domain->cb_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100966
967 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100968}
969
Joerg Roedel1d672632015-03-26 13:43:10 +0100970static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100971{
Joerg Roedel1d672632015-03-26 13:43:10 +0100972 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +0100973
974 /*
975 * Free the domain resources. We assume that all devices have
976 * already been detached.
977 */
Robin Murphy9adb9592016-01-26 18:06:36 +0000978 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100979 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100980 kfree(smmu_domain);
981}
982
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100983static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
984{
985 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +0100986 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100987
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +0300988 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100989 reg |= SMR_VALID;
990 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
991}
992
Robin Murphy8e8b2032016-09-12 17:13:50 +0100993static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
994{
995 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
996 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
997 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
998 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
999
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001000 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1001 smmu->smrs[idx].valid)
1002 reg |= S2CR_EXIDVALID;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001003 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1004}
1005
1006static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1007{
1008 arm_smmu_write_s2cr(smmu, idx);
1009 if (smmu->smrs)
1010 arm_smmu_write_smr(smmu, idx);
1011}
1012
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001013/*
1014 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1015 * should be called after sCR0 is written.
1016 */
1017static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1018{
1019 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1020 u32 smr;
1021
1022 if (!smmu->smrs)
1023 return;
1024
1025 /*
1026 * SMR.ID bits may not be preserved if the corresponding MASK
1027 * bits are set, so check each one separately. We can reject
1028 * masters later if they try to claim IDs outside these masks.
1029 */
1030 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1031 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1032 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1033 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1034
1035 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1036 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1037 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1038 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1039}
1040
Robin Murphy588888a2016-09-12 17:13:54 +01001041static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001042{
1043 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001044 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001045
Robin Murphy588888a2016-09-12 17:13:54 +01001046 /* Stream indexing is blissfully easy */
1047 if (!smrs)
1048 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001049
Robin Murphy588888a2016-09-12 17:13:54 +01001050 /* Validating SMRs is... less so */
1051 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1052 if (!smrs[i].valid) {
1053 /*
1054 * Note the first free entry we come across, which
1055 * we'll claim in the end if nothing else matches.
1056 */
1057 if (free_idx < 0)
1058 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001059 continue;
1060 }
Robin Murphy588888a2016-09-12 17:13:54 +01001061 /*
1062 * If the new entry is _entirely_ matched by an existing entry,
1063 * then reuse that, with the guarantee that there also cannot
1064 * be any subsequent conflicting entries. In normal use we'd
1065 * expect simply identical entries for this case, but there's
1066 * no harm in accommodating the generalisation.
1067 */
1068 if ((mask & smrs[i].mask) == mask &&
1069 !((id ^ smrs[i].id) & ~smrs[i].mask))
1070 return i;
1071 /*
1072 * If the new entry has any other overlap with an existing one,
1073 * though, then there always exists at least one stream ID
1074 * which would cause a conflict, and we can't allow that risk.
1075 */
1076 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1077 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001078 }
1079
Robin Murphy588888a2016-09-12 17:13:54 +01001080 return free_idx;
1081}
1082
1083static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1084{
1085 if (--smmu->s2crs[idx].count)
1086 return false;
1087
1088 smmu->s2crs[idx] = s2cr_init_val;
1089 if (smmu->smrs)
1090 smmu->smrs[idx].valid = false;
1091
1092 return true;
1093}
1094
1095static int arm_smmu_master_alloc_smes(struct device *dev)
1096{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001097 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1098 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001099 struct arm_smmu_device *smmu = cfg->smmu;
1100 struct arm_smmu_smr *smrs = smmu->smrs;
1101 struct iommu_group *group;
1102 int i, idx, ret;
1103
1104 mutex_lock(&smmu->stream_map_mutex);
1105 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001106 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001107 u16 sid = fwspec->ids[i];
1108 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1109
Robin Murphy588888a2016-09-12 17:13:54 +01001110 if (idx != INVALID_SMENDX) {
1111 ret = -EEXIST;
1112 goto out_err;
1113 }
1114
Robin Murphy021bb842016-09-14 15:26:46 +01001115 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001116 if (ret < 0)
1117 goto out_err;
1118
1119 idx = ret;
1120 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001121 smrs[idx].id = sid;
1122 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001123 smrs[idx].valid = true;
1124 }
1125 smmu->s2crs[idx].count++;
1126 cfg->smendx[i] = (s16)idx;
1127 }
1128
1129 group = iommu_group_get_for_dev(dev);
1130 if (!group)
1131 group = ERR_PTR(-ENOMEM);
1132 if (IS_ERR(group)) {
1133 ret = PTR_ERR(group);
1134 goto out_err;
1135 }
1136 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001137
Will Deacon45ae7cf2013-06-24 18:31:25 +01001138 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001139 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001140 arm_smmu_write_sme(smmu, idx);
1141 smmu->s2crs[idx].group = group;
1142 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001143
Robin Murphy588888a2016-09-12 17:13:54 +01001144 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001145 return 0;
1146
Robin Murphy588888a2016-09-12 17:13:54 +01001147out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001148 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001149 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001150 cfg->smendx[i] = INVALID_SMENDX;
1151 }
Robin Murphy588888a2016-09-12 17:13:54 +01001152 mutex_unlock(&smmu->stream_map_mutex);
1153 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001154}
1155
Robin Murphyadfec2e2016-09-12 17:13:55 +01001156static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001157{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001158 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1159 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001160 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001161
Robin Murphy588888a2016-09-12 17:13:54 +01001162 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001163 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001164 if (arm_smmu_free_sme(smmu, idx))
1165 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001166 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001167 }
Robin Murphy588888a2016-09-12 17:13:54 +01001168 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001169}
1170
Will Deacon45ae7cf2013-06-24 18:31:25 +01001171static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001172 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001173{
Will Deacon44680ee2014-06-25 11:29:12 +01001174 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001175 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001176 u8 cbndx = smmu_domain->cfg.cbndx;
Will Deacon61bc6712017-01-06 16:56:03 +00001177 enum arm_smmu_s2cr_type type;
Robin Murphy588888a2016-09-12 17:13:54 +01001178 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001179
Will Deacon61bc6712017-01-06 16:56:03 +00001180 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1181 type = S2CR_TYPE_BYPASS;
1182 else
1183 type = S2CR_TYPE_TRANS;
1184
Robin Murphyadfec2e2016-09-12 17:13:55 +01001185 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001186 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001187 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001188
Robin Murphy8e8b2032016-09-12 17:13:50 +01001189 s2cr[idx].type = type;
Sricharan Re1989802017-01-06 18:58:15 +05301190 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001191 s2cr[idx].cbndx = cbndx;
1192 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001193 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001194 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001195}
1196
Will Deacon45ae7cf2013-06-24 18:31:25 +01001197static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1198{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001199 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001200 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1201 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001202 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001203
Robin Murphyadfec2e2016-09-12 17:13:55 +01001204 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1206 return -ENXIO;
1207 }
1208
Robin Murphyfba4f8e2016-10-17 12:06:21 +01001209 /*
1210 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1211 * domains between of_xlate() and add_device() - we have no way to cope
1212 * with that, so until ARM gets converted to rely on groups and default
1213 * domains, just say no (but more politely than by dereferencing NULL).
1214 * This should be at least a WARN_ON once that's sorted.
1215 */
1216 if (!fwspec->iommu_priv)
1217 return -ENODEV;
1218
Robin Murphyadfec2e2016-09-12 17:13:55 +01001219 smmu = fwspec_smmu(fwspec);
Will Deacon518f7132014-11-14 17:17:54 +00001220 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001221 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001222 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001223 return ret;
1224
Will Deacon45ae7cf2013-06-24 18:31:25 +01001225 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001226 * Sanity check the domain. We don't support domains across
1227 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001228 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001229 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001230 dev_err(dev,
1231 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001232 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001233 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001234 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001235
1236 /* Looks ok, so add the device to the domain */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001237 return arm_smmu_domain_add_master(smmu_domain, fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238}
1239
Will Deacon45ae7cf2013-06-24 18:31:25 +01001240static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001241 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001242{
Robin Murphy523d7422017-06-22 16:53:56 +01001243 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001244
Will Deacon518f7132014-11-14 17:17:54 +00001245 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001246 return -ENODEV;
1247
Robin Murphy523d7422017-06-22 16:53:56 +01001248 return ops->map(ops, iova, paddr, size, prot);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001249}
1250
1251static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1252 size_t size)
1253{
Robin Murphy523d7422017-06-22 16:53:56 +01001254 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001255
Will Deacon518f7132014-11-14 17:17:54 +00001256 if (!ops)
1257 return 0;
1258
Robin Murphy523d7422017-06-22 16:53:56 +01001259 return ops->unmap(ops, iova, size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001260}
1261
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001262static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1263 dma_addr_t iova)
1264{
Joerg Roedel1d672632015-03-26 13:43:10 +01001265 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001266 struct arm_smmu_device *smmu = smmu_domain->smmu;
1267 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1268 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1269 struct device *dev = smmu->dev;
1270 void __iomem *cb_base;
1271 u32 tmp;
1272 u64 phys;
Robin Murphy523d7422017-06-22 16:53:56 +01001273 unsigned long va, flags;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001274
Robin Murphy452107c2017-03-30 17:56:30 +01001275 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001276
Robin Murphy523d7422017-06-22 16:53:56 +01001277 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
Robin Murphy661d9622015-05-27 17:09:34 +01001278 /* ATS1 registers can only be written atomically */
1279 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001280 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001281 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1282 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001283 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001284
1285 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1286 !(tmp & ATSR_ACTIVE), 5, 50)) {
Robin Murphy523d7422017-06-22 16:53:56 +01001287 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001288 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001289 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001290 &iova);
1291 return ops->iova_to_phys(ops, iova);
1292 }
1293
Robin Murphyf9a05f02016-04-13 18:13:01 +01001294 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Robin Murphy523d7422017-06-22 16:53:56 +01001295 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001296 if (phys & CB_PAR_F) {
1297 dev_err(dev, "translation fault!\n");
1298 dev_err(dev, "PAR = 0x%llx\n", phys);
1299 return 0;
1300 }
1301
1302 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1303}
1304
Will Deacon45ae7cf2013-06-24 18:31:25 +01001305static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001306 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001307{
Joerg Roedel1d672632015-03-26 13:43:10 +01001308 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphy523d7422017-06-22 16:53:56 +01001309 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001310
Sunil Gouthambdf95922017-04-25 15:27:52 +05301311 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1312 return iova;
1313
Will Deacon518f7132014-11-14 17:17:54 +00001314 if (!ops)
Will Deacona44a9792013-11-07 18:47:50 +00001315 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001316
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001317 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
Robin Murphy523d7422017-06-22 16:53:56 +01001318 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1319 return arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001320
Robin Murphy523d7422017-06-22 16:53:56 +01001321 return ops->iova_to_phys(ops, iova);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001322}
1323
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001324static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001325{
Will Deacond0948942014-06-24 17:30:10 +01001326 switch (cap) {
1327 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001328 /*
1329 * Return true here as the SMMU can always send out coherent
1330 * requests.
1331 */
1332 return true;
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001333 case IOMMU_CAP_NOEXEC:
1334 return true;
Will Deacond0948942014-06-24 17:30:10 +01001335 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001336 return false;
Will Deacond0948942014-06-24 17:30:10 +01001337 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001338}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001339
Robin Murphy021bb842016-09-14 15:26:46 +01001340static int arm_smmu_match_node(struct device *dev, void *data)
1341{
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001342 return dev->fwnode == data;
Robin Murphy021bb842016-09-14 15:26:46 +01001343}
1344
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001345static
1346struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
Robin Murphy021bb842016-09-14 15:26:46 +01001347{
1348 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001349 fwnode, arm_smmu_match_node);
Robin Murphy021bb842016-09-14 15:26:46 +01001350 put_device(dev);
1351 return dev ? dev_get_drvdata(dev) : NULL;
1352}
1353
Will Deacon03edb222015-01-19 14:27:33 +00001354static int arm_smmu_add_device(struct device *dev)
1355{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001356 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001357 struct arm_smmu_master_cfg *cfg;
Robin Murphy021bb842016-09-14 15:26:46 +01001358 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001359 int i, ret;
1360
Robin Murphy021bb842016-09-14 15:26:46 +01001361 if (using_legacy_binding) {
1362 ret = arm_smmu_register_legacy_master(dev, &smmu);
Artem Savkova7990c62017-08-08 12:26:02 +02001363
1364 /*
1365 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1366 * will allocate/initialise a new one. Thus we need to update fwspec for
1367 * later use.
1368 */
1369 fwspec = dev->iommu_fwspec;
Robin Murphy021bb842016-09-14 15:26:46 +01001370 if (ret)
1371 goto out_free;
Robin Murphy3c117b52016-11-02 17:31:32 +00001372 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
Lorenzo Pieralisice9babe2016-11-21 10:01:37 +00001373 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
Robin Murphy021bb842016-09-14 15:26:46 +01001374 } else {
1375 return -ENODEV;
1376 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001377
1378 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001379 for (i = 0; i < fwspec->num_ids; i++) {
1380 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001381 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001382
Robin Murphyadfec2e2016-09-12 17:13:55 +01001383 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001384 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001385 sid, smmu->streamid_mask);
1386 goto out_free;
1387 }
1388 if (mask & ~smmu->smr_mask_mask) {
1389 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
Peng Fan6323f472017-04-21 17:03:36 +08001390 mask, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001391 goto out_free;
1392 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001393 }
Will Deacon03edb222015-01-19 14:27:33 +00001394
Robin Murphyadfec2e2016-09-12 17:13:55 +01001395 ret = -ENOMEM;
1396 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1397 GFP_KERNEL);
1398 if (!cfg)
1399 goto out_free;
1400
1401 cfg->smmu = smmu;
1402 fwspec->iommu_priv = cfg;
1403 while (i--)
1404 cfg->smendx[i] = INVALID_SMENDX;
1405
Robin Murphy588888a2016-09-12 17:13:54 +01001406 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001407 if (ret)
Vivek Gautamc54451a2017-07-06 15:07:00 +05301408 goto out_cfg_free;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001409
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001410 iommu_device_link(&smmu->iommu, dev);
1411
Robin Murphyadfec2e2016-09-12 17:13:55 +01001412 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001413
Vivek Gautamc54451a2017-07-06 15:07:00 +05301414out_cfg_free:
1415 kfree(cfg);
Robin Murphyf80cd882016-09-14 15:21:39 +01001416out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001417 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001418 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001419}
1420
Will Deacon45ae7cf2013-06-24 18:31:25 +01001421static void arm_smmu_remove_device(struct device *dev)
1422{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001423 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001424 struct arm_smmu_master_cfg *cfg;
1425 struct arm_smmu_device *smmu;
1426
Robin Murphy8e8b2032016-09-12 17:13:50 +01001427
Robin Murphyadfec2e2016-09-12 17:13:55 +01001428 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001429 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001430
Joerg Roedel9648cbc2017-02-01 18:11:36 +01001431 cfg = fwspec->iommu_priv;
1432 smmu = cfg->smmu;
1433
1434 iommu_device_unlink(&smmu->iommu, dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001435 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001436 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001437 kfree(fwspec->iommu_priv);
1438 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001439}
1440
Joerg Roedelaf659932015-10-21 23:51:41 +02001441static struct iommu_group *arm_smmu_device_group(struct device *dev)
1442{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001443 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1444 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001445 struct iommu_group *group = NULL;
1446 int i, idx;
1447
Robin Murphyadfec2e2016-09-12 17:13:55 +01001448 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001449 if (group && smmu->s2crs[idx].group &&
1450 group != smmu->s2crs[idx].group)
1451 return ERR_PTR(-EINVAL);
1452
1453 group = smmu->s2crs[idx].group;
1454 }
1455
1456 if (group)
Robin Murphye1b44cb2016-11-11 17:59:22 +00001457 return iommu_group_ref_get(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001458
1459 if (dev_is_pci(dev))
1460 group = pci_device_group(dev);
1461 else
1462 group = generic_device_group(dev);
1463
Joerg Roedelaf659932015-10-21 23:51:41 +02001464 return group;
1465}
1466
Will Deaconc752ce42014-06-25 22:46:31 +01001467static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1468 enum iommu_attr attr, void *data)
1469{
Joerg Roedel1d672632015-03-26 13:43:10 +01001470 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001471
Will Deacon0834cc22017-01-06 16:28:17 +00001472 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1473 return -EINVAL;
1474
Will Deaconc752ce42014-06-25 22:46:31 +01001475 switch (attr) {
1476 case DOMAIN_ATTR_NESTING:
1477 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1478 return 0;
1479 default:
1480 return -ENODEV;
1481 }
1482}
1483
1484static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1485 enum iommu_attr attr, void *data)
1486{
Will Deacon518f7132014-11-14 17:17:54 +00001487 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001488 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001489
Will Deacon0834cc22017-01-06 16:28:17 +00001490 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1491 return -EINVAL;
1492
Will Deacon518f7132014-11-14 17:17:54 +00001493 mutex_lock(&smmu_domain->init_mutex);
1494
Will Deaconc752ce42014-06-25 22:46:31 +01001495 switch (attr) {
1496 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001497 if (smmu_domain->smmu) {
1498 ret = -EPERM;
1499 goto out_unlock;
1500 }
1501
Will Deaconc752ce42014-06-25 22:46:31 +01001502 if (*(int *)data)
1503 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1504 else
1505 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1506
Will Deacon518f7132014-11-14 17:17:54 +00001507 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001508 default:
Will Deacon518f7132014-11-14 17:17:54 +00001509 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001510 }
Will Deacon518f7132014-11-14 17:17:54 +00001511
1512out_unlock:
1513 mutex_unlock(&smmu_domain->init_mutex);
1514 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001515}
1516
Robin Murphy021bb842016-09-14 15:26:46 +01001517static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1518{
Robin Murphy56fbf602017-03-31 12:03:33 +01001519 u32 mask, fwid = 0;
Robin Murphy021bb842016-09-14 15:26:46 +01001520
1521 if (args->args_count > 0)
1522 fwid |= (u16)args->args[0];
1523
1524 if (args->args_count > 1)
1525 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
Robin Murphy56fbf602017-03-31 12:03:33 +01001526 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1527 fwid |= (u16)mask << SMR_MASK_SHIFT;
Robin Murphy021bb842016-09-14 15:26:46 +01001528
1529 return iommu_fwspec_add_ids(dev, &fwid, 1);
1530}
1531
Eric Augerf3ebee82017-01-19 20:57:55 +00001532static void arm_smmu_get_resv_regions(struct device *dev,
1533 struct list_head *head)
1534{
1535 struct iommu_resv_region *region;
1536 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1537
1538 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00001539 prot, IOMMU_RESV_SW_MSI);
Eric Augerf3ebee82017-01-19 20:57:55 +00001540 if (!region)
1541 return;
1542
1543 list_add_tail(&region->list, head);
Robin Murphy273df962017-03-16 17:00:19 +00001544
1545 iommu_dma_get_resv_regions(dev, head);
Eric Augerf3ebee82017-01-19 20:57:55 +00001546}
1547
1548static void arm_smmu_put_resv_regions(struct device *dev,
1549 struct list_head *head)
1550{
1551 struct iommu_resv_region *entry, *next;
1552
1553 list_for_each_entry_safe(entry, next, head, list)
1554 kfree(entry);
1555}
1556
Will Deacon518f7132014-11-14 17:17:54 +00001557static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001558 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001559 .domain_alloc = arm_smmu_domain_alloc,
1560 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001561 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001562 .map = arm_smmu_map,
1563 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001564 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001565 .iova_to_phys = arm_smmu_iova_to_phys,
1566 .add_device = arm_smmu_add_device,
1567 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001568 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001569 .domain_get_attr = arm_smmu_domain_get_attr,
1570 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001571 .of_xlate = arm_smmu_of_xlate,
Eric Augerf3ebee82017-01-19 20:57:55 +00001572 .get_resv_regions = arm_smmu_get_resv_regions,
1573 .put_resv_regions = arm_smmu_put_resv_regions,
Will Deacon518f7132014-11-14 17:17:54 +00001574 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001575};
1576
1577static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1578{
1579 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001580 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001581 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001582
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001583 /* clear global FSR */
1584 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1585 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001586
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001587 /*
1588 * Reset stream mapping groups: Initial values mark all SMRn as
1589 * invalid and all S2CRn as bypass unless overridden.
1590 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001591 for (i = 0; i < smmu->num_mapping_groups; ++i)
1592 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001593
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301594 if (smmu->model == ARM_MMU500) {
1595 /*
1596 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1597 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1598 * bit is only present in MMU-500r2 onwards.
1599 */
1600 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1601 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
Peng Fan3ca37122016-05-03 21:50:30 +08001602 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
Nipun Gupta6eb18d42016-11-04 15:25:23 +05301603 if (major >= 2)
1604 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1605 /*
1606 * Allow unmatched Stream IDs to allocate bypass
1607 * TLB entries for reduced latency.
1608 */
1609 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
Peng Fan3ca37122016-05-03 21:50:30 +08001610 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1611 }
1612
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001613 /* Make sure all context banks are disabled and clear CB_FSR */
1614 for (i = 0; i < smmu->num_context_banks; ++i) {
Robin Murphy90df3732017-08-08 14:56:14 +01001615 void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
1616
1617 arm_smmu_write_context_bank(smmu, i);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001618 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001619 /*
1620 * Disable MMU-500's not-particularly-beneficial next-page
1621 * prefetcher for the sake of errata #841119 and #826419.
1622 */
1623 if (smmu->model == ARM_MMU500) {
1624 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1625 reg &= ~ARM_MMU500_ACTLR_CPRE;
1626 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1627 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001628 }
Will Deacon1463fe42013-07-31 19:21:27 +01001629
Will Deacon45ae7cf2013-06-24 18:31:25 +01001630 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001631 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1632 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1633
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001634 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001635
Will Deacon45ae7cf2013-06-24 18:31:25 +01001636 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001637 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001638
1639 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001640 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001641
Robin Murphy25a1c962016-02-10 14:25:33 +00001642 /* Enable client access, handling unmatched streams as appropriate */
1643 reg &= ~sCR0_CLIENTPD;
1644 if (disable_bypass)
1645 reg |= sCR0_USFCFG;
1646 else
1647 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001648
1649 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001650 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001651
1652 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001653 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001654
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001655 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1656 reg |= sCR0_VMID16EN;
1657
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001658 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1659 reg |= sCR0_EXIDENABLE;
1660
Will Deacon45ae7cf2013-06-24 18:31:25 +01001661 /* Push the button */
Robin Murphy11febfc2017-03-30 17:56:31 +01001662 arm_smmu_tlb_sync_global(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001663 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001664}
1665
1666static int arm_smmu_id_size_to_bits(int size)
1667{
1668 switch (size) {
1669 case 0:
1670 return 32;
1671 case 1:
1672 return 36;
1673 case 2:
1674 return 40;
1675 case 3:
1676 return 42;
1677 case 4:
1678 return 44;
1679 case 5:
1680 default:
1681 return 48;
1682 }
1683}
1684
1685static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1686{
1687 unsigned long size;
1688 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1689 u32 id;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001690 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001691 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001692
1693 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001694 dev_notice(smmu->dev, "SMMUv%d with:\n",
1695 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001696
1697 /* ID0 */
1698 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001699
1700 /* Restrict available stages based on module parameter */
1701 if (force_stage == 1)
1702 id &= ~(ID0_S2TS | ID0_NTS);
1703 else if (force_stage == 2)
1704 id &= ~(ID0_S1TS | ID0_NTS);
1705
Will Deacon45ae7cf2013-06-24 18:31:25 +01001706 if (id & ID0_S1TS) {
1707 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1708 dev_notice(smmu->dev, "\tstage 1 translation\n");
1709 }
1710
1711 if (id & ID0_S2TS) {
1712 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1713 dev_notice(smmu->dev, "\tstage 2 translation\n");
1714 }
1715
1716 if (id & ID0_NTS) {
1717 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1718 dev_notice(smmu->dev, "\tnested translation\n");
1719 }
1720
1721 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001722 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001723 dev_err(smmu->dev, "\tno translation support!\n");
1724 return -ENODEV;
1725 }
1726
Robin Murphyb7862e32016-04-13 18:13:03 +01001727 if ((id & ID0_S1TS) &&
1728 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001729 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1730 dev_notice(smmu->dev, "\taddress translation ops\n");
1731 }
1732
Robin Murphybae2c2d2015-07-29 19:46:05 +01001733 /*
1734 * In order for DMA API calls to work properly, we must defer to what
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001735 * the FW says about coherency, regardless of what the hardware claims.
Robin Murphybae2c2d2015-07-29 19:46:05 +01001736 * Fortunately, this also opens up a workaround for systems where the
1737 * ID register value has ended up configured incorrectly.
1738 */
Robin Murphybae2c2d2015-07-29 19:46:05 +01001739 cttw_reg = !!(id & ID0_CTTW);
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001740 if (cttw_fw || cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001741 dev_notice(smmu->dev, "\t%scoherent table walk\n",
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001742 cttw_fw ? "" : "non-");
1743 if (cttw_fw != cttw_reg)
Robin Murphybae2c2d2015-07-29 19:46:05 +01001744 dev_notice(smmu->dev,
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001745 "\t(IDR0.CTTW overridden by FW configuration)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001746
Robin Murphy21174242016-09-12 17:13:48 +01001747 /* Max. number of entries we have for stream matching/indexing */
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001748 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1749 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1750 size = 1 << 16;
1751 } else {
1752 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1753 }
Robin Murphy21174242016-09-12 17:13:48 +01001754 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001755 if (id & ID0_SMS) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001756 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001757 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1758 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759 dev_err(smmu->dev,
1760 "stream-matching supported, but no SMRs present!\n");
1761 return -ENODEV;
1762 }
1763
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001764 /* Zero-initialised to mark as invalid */
1765 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1766 GFP_KERNEL);
1767 if (!smmu->smrs)
1768 return -ENOMEM;
1769
Will Deacon45ae7cf2013-06-24 18:31:25 +01001770 dev_notice(smmu->dev,
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03001771 "\tstream matching with %lu register groups", size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001773 /* s2cr->type == 0 means translation, so initialise explicitly */
1774 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1775 GFP_KERNEL);
1776 if (!smmu->s2crs)
1777 return -ENOMEM;
1778 for (i = 0; i < size; i++)
1779 smmu->s2crs[i] = s2cr_init_val;
1780
Robin Murphy21174242016-09-12 17:13:48 +01001781 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001782 mutex_init(&smmu->stream_map_mutex);
Will Deacon8e517e72017-07-06 15:55:48 +01001783 spin_lock_init(&smmu->global_sync_lock);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001784
Robin Murphy7602b872016-04-28 17:12:09 +01001785 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1786 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1787 if (!(id & ID0_PTFS_NO_AARCH32S))
1788 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1789 }
1790
Will Deacon45ae7cf2013-06-24 18:31:25 +01001791 /* ID1 */
1792 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001793 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001794
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001795 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001796 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Robin Murphy452107c2017-03-30 17:56:30 +01001797 size <<= smmu->pgshift;
1798 if (smmu->cb_base != gr0_base + size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001799 dev_warn(smmu->dev,
Robin Murphy452107c2017-03-30 17:56:30 +01001800 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1801 size * 2, (smmu->cb_base - gr0_base) * 2);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001802
Will Deacon518f7132014-11-14 17:17:54 +00001803 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001804 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1805 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1806 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1807 return -ENODEV;
1808 }
1809 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1810 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001811 /*
1812 * Cavium CN88xx erratum #27704.
1813 * Ensure ASID and VMID allocation is unique across all SMMUs in
1814 * the system.
1815 */
1816 if (smmu->model == CAVIUM_SMMUV2) {
1817 smmu->cavium_id_base =
1818 atomic_add_return(smmu->num_context_banks,
1819 &cavium_smmu_context_count);
1820 smmu->cavium_id_base -= smmu->num_context_banks;
Robert Richter53c35dce2017-03-13 11:39:01 +01001821 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
Robin Murphye086d912016-04-13 18:12:58 +01001822 }
Robin Murphy90df3732017-08-08 14:56:14 +01001823 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1824 sizeof(*smmu->cbs), GFP_KERNEL);
1825 if (!smmu->cbs)
1826 return -ENOMEM;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001827
1828 /* ID2 */
1829 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1830 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001831 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001832
Will Deacon518f7132014-11-14 17:17:54 +00001833 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001834 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001835 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001836
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001837 if (id & ID2_VMID16)
1838 smmu->features |= ARM_SMMU_FEAT_VMID16;
1839
Robin Murphyf1d84542015-03-04 16:41:05 +00001840 /*
1841 * What the page table walker can address actually depends on which
1842 * descriptor format is in use, but since a) we don't know that yet,
1843 * and b) it can vary per context bank, this will have to do...
1844 */
1845 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1846 dev_warn(smmu->dev,
1847 "failed to set DMA mask for table walker\n");
1848
Robin Murphyb7862e32016-04-13 18:13:03 +01001849 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001850 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001851 if (smmu->version == ARM_SMMU_V1_64K)
1852 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001853 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001854 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001855 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001856 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001857 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001858 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001859 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001860 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001861 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001862 }
1863
Robin Murphy7602b872016-04-28 17:12:09 +01001864 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001865 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001866 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001867 if (smmu->features &
1868 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001869 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001870 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001871 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001872 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001873 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001874
Robin Murphyd5466352016-05-09 17:20:09 +01001875 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1876 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1877 else
1878 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1879 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1880 smmu->pgsize_bitmap);
1881
Will Deacon518f7132014-11-14 17:17:54 +00001882
Will Deacon28d60072014-09-01 16:24:48 +01001883 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1884 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001885 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001886
1887 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1888 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001889 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001890
Will Deacon45ae7cf2013-06-24 18:31:25 +01001891 return 0;
1892}
1893
Robin Murphy67b65a32016-04-13 18:12:57 +01001894struct arm_smmu_match_data {
1895 enum arm_smmu_arch_version version;
1896 enum arm_smmu_implementation model;
1897};
1898
1899#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1900static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1901
1902ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1903ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001904ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001905ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001906ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001907
Joerg Roedel09b52692014-10-02 12:24:45 +02001908static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001909 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1910 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1911 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001912 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001913 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001914 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001915 { },
1916};
1917MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1918
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001919#ifdef CONFIG_ACPI
1920static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1921{
1922 int ret = 0;
1923
1924 switch (model) {
1925 case ACPI_IORT_SMMU_V1:
1926 case ACPI_IORT_SMMU_CORELINK_MMU400:
1927 smmu->version = ARM_SMMU_V1;
1928 smmu->model = GENERIC_SMMU;
1929 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001930 case ACPI_IORT_SMMU_CORELINK_MMU401:
1931 smmu->version = ARM_SMMU_V1_64K;
1932 smmu->model = GENERIC_SMMU;
1933 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001934 case ACPI_IORT_SMMU_V2:
1935 smmu->version = ARM_SMMU_V2;
1936 smmu->model = GENERIC_SMMU;
1937 break;
1938 case ACPI_IORT_SMMU_CORELINK_MMU500:
1939 smmu->version = ARM_SMMU_V2;
1940 smmu->model = ARM_MMU500;
1941 break;
Robin Murphy84c24372017-06-19 16:41:56 +01001942 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1943 smmu->version = ARM_SMMU_V2;
1944 smmu->model = CAVIUM_SMMUV2;
1945 break;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001946 default:
1947 ret = -ENODEV;
1948 }
1949
1950 return ret;
1951}
1952
1953static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1954 struct arm_smmu_device *smmu)
1955{
1956 struct device *dev = smmu->dev;
1957 struct acpi_iort_node *node =
1958 *(struct acpi_iort_node **)dev_get_platdata(dev);
1959 struct acpi_iort_smmu *iort_smmu;
1960 int ret;
1961
1962 /* Retrieve SMMU1/2 specific data */
1963 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1964
1965 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1966 if (ret < 0)
1967 return ret;
1968
1969 /* Ignore the configuration access interrupt */
1970 smmu->num_global_irqs = 1;
1971
1972 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1973 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1974
1975 return 0;
1976}
1977#else
1978static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1979 struct arm_smmu_device *smmu)
1980{
1981 return -ENODEV;
1982}
1983#endif
1984
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001985static int arm_smmu_device_dt_probe(struct platform_device *pdev,
1986 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001987{
Robin Murphy67b65a32016-04-13 18:12:57 +01001988 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989 struct device *dev = &pdev->dev;
Robin Murphy021bb842016-09-14 15:26:46 +01001990 bool legacy_binding;
1991
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00001992 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1993 &smmu->num_global_irqs)) {
1994 dev_err(dev, "missing #global-interrupts property\n");
1995 return -ENODEV;
1996 }
1997
1998 data = of_device_get_match_data(dev);
1999 smmu->version = data->version;
2000 smmu->model = data->model;
2001
2002 parse_driver_options(smmu);
2003
Robin Murphy021bb842016-09-14 15:26:46 +01002004 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2005 if (legacy_binding && !using_generic_binding) {
2006 if (!using_legacy_binding)
2007 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2008 using_legacy_binding = true;
2009 } else if (!legacy_binding && !using_legacy_binding) {
2010 using_generic_binding = true;
2011 } else {
2012 dev_err(dev, "not probing due to mismatched DT properties\n");
2013 return -ENODEV;
2014 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002015
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002016 if (of_dma_is_coherent(dev->of_node))
2017 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2018
2019 return 0;
2020}
2021
Robin Murphyf6810c12017-04-10 16:51:05 +05302022static void arm_smmu_bus_init(void)
2023{
2024 /* Oh, for a proper bus abstraction */
2025 if (!iommu_present(&platform_bus_type))
2026 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2027#ifdef CONFIG_ARM_AMBA
2028 if (!iommu_present(&amba_bustype))
2029 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2030#endif
2031#ifdef CONFIG_PCI
2032 if (!iommu_present(&pci_bus_type)) {
2033 pci_request_acs();
2034 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2035 }
2036#endif
2037}
2038
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002039static int arm_smmu_device_probe(struct platform_device *pdev)
2040{
2041 struct resource *res;
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002042 resource_size_t ioaddr;
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002043 struct arm_smmu_device *smmu;
2044 struct device *dev = &pdev->dev;
2045 int num_irqs, i, err;
2046
Will Deacon45ae7cf2013-06-24 18:31:25 +01002047 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2048 if (!smmu) {
2049 dev_err(dev, "failed to allocate arm_smmu_device\n");
2050 return -ENOMEM;
2051 }
2052 smmu->dev = dev;
2053
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002054 if (dev->of_node)
2055 err = arm_smmu_device_dt_probe(pdev, smmu);
2056 else
2057 err = arm_smmu_device_acpi_probe(pdev, smmu);
2058
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002059 if (err)
2060 return err;
Robin Murphy09360402014-08-28 17:51:59 +01002061
Will Deacon45ae7cf2013-06-24 18:31:25 +01002062 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002063 ioaddr = res->start;
Julia Lawall8a7f4312013-08-19 12:20:37 +01002064 smmu->base = devm_ioremap_resource(dev, res);
2065 if (IS_ERR(smmu->base))
2066 return PTR_ERR(smmu->base);
Robin Murphy452107c2017-03-30 17:56:30 +01002067 smmu->cb_base = smmu->base + resource_size(res) / 2;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002068
Will Deacon45ae7cf2013-06-24 18:31:25 +01002069 num_irqs = 0;
2070 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2071 num_irqs++;
2072 if (num_irqs > smmu->num_global_irqs)
2073 smmu->num_context_irqs++;
2074 }
2075
Andreas Herrmann44a08de2013-10-01 13:39:07 +01002076 if (!smmu->num_context_irqs) {
2077 dev_err(dev, "found %d interrupts but expected at least %d\n",
2078 num_irqs, smmu->num_global_irqs + 1);
2079 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002080 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01002081
2082 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2083 GFP_KERNEL);
2084 if (!smmu->irqs) {
2085 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2086 return -ENOMEM;
2087 }
2088
2089 for (i = 0; i < num_irqs; ++i) {
2090 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07002091
Will Deacon45ae7cf2013-06-24 18:31:25 +01002092 if (irq < 0) {
2093 dev_err(dev, "failed to get irq index %d\n", i);
2094 return -ENODEV;
2095 }
2096 smmu->irqs[i] = irq;
2097 }
2098
Olav Haugan3c8766d2014-08-22 17:12:32 -07002099 err = arm_smmu_device_cfg_probe(smmu);
2100 if (err)
2101 return err;
2102
Robin Murphyb7862e32016-04-13 18:13:03 +01002103 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002104 smmu->num_context_banks != smmu->num_context_irqs) {
2105 dev_err(dev,
2106 "found only %d context interrupt(s) but %d required\n",
2107 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01002108 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002109 }
2110
Will Deacon45ae7cf2013-06-24 18:31:25 +01002111 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002112 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2113 arm_smmu_global_fault,
2114 IRQF_SHARED,
2115 "arm-smmu global fault",
2116 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002117 if (err) {
2118 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2119 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01002120 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002121 }
2122 }
2123
Joerg Roedel9648cbc2017-02-01 18:11:36 +01002124 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2125 "smmu.%pa", &ioaddr);
2126 if (err) {
2127 dev_err(dev, "Failed to register iommu in sysfs\n");
2128 return err;
2129 }
2130
2131 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2132 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2133
2134 err = iommu_device_register(&smmu->iommu);
2135 if (err) {
2136 dev_err(dev, "Failed to register iommu\n");
2137 return err;
2138 }
2139
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002140 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01002141 arm_smmu_device_reset(smmu);
Aleksey Makarovdc0eaa42017-01-19 17:36:36 +03002142 arm_smmu_test_smr_masks(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01002143
Robin Murphyf6810c12017-04-10 16:51:05 +05302144 /*
2145 * For ACPI and generic DT bindings, an SMMU will be probed before
2146 * any device which might need it, so we want the bus ops in place
2147 * ready to handle default domain setup as soon as any SMMU exists.
2148 */
2149 if (!using_legacy_binding)
2150 arm_smmu_bus_init();
2151
Will Deacon45ae7cf2013-06-24 18:31:25 +01002152 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002153}
2154
Robin Murphyf6810c12017-04-10 16:51:05 +05302155/*
2156 * With the legacy DT binding in play, though, we have no guarantees about
2157 * probe order, but then we're also not doing default domains, so we can
2158 * delay setting bus ops until we're sure every possible SMMU is ready,
2159 * and that way ensure that no add_device() calls get missed.
2160 */
2161static int arm_smmu_legacy_bus_init(void)
2162{
2163 if (using_legacy_binding)
2164 arm_smmu_bus_init();
2165 return 0;
2166}
2167device_initcall_sync(arm_smmu_legacy_bus_init);
2168
Will Deacon45ae7cf2013-06-24 18:31:25 +01002169static int arm_smmu_device_remove(struct platform_device *pdev)
2170{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002171 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002172
2173 if (!smmu)
2174 return -ENODEV;
2175
Will Deaconecfadb62013-07-31 19:21:28 +01002176 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002177 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002178
Will Deacon45ae7cf2013-06-24 18:31:25 +01002179 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002180 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002181 return 0;
2182}
2183
Nate Watterson7aa86192017-06-29 18:18:15 -04002184static void arm_smmu_device_shutdown(struct platform_device *pdev)
2185{
2186 arm_smmu_device_remove(pdev);
2187}
2188
Robin Murphya2d866f2017-08-08 14:56:15 +01002189static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2190{
2191 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2192
2193 arm_smmu_device_reset(smmu);
2194 return 0;
2195}
2196
2197static SIMPLE_DEV_PM_OPS(arm_smmu_pm_ops, NULL, arm_smmu_pm_resume);
2198
Will Deacon45ae7cf2013-06-24 18:31:25 +01002199static struct platform_driver arm_smmu_driver = {
2200 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002201 .name = "arm-smmu",
2202 .of_match_table = of_match_ptr(arm_smmu_of_match),
Robin Murphya2d866f2017-08-08 14:56:15 +01002203 .pm = &arm_smmu_pm_ops,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002204 },
Lorenzo Pieralisibbb8a182016-11-21 10:01:44 +00002205 .probe = arm_smmu_device_probe,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002206 .remove = arm_smmu_device_remove,
Nate Watterson7aa86192017-06-29 18:18:15 -04002207 .shutdown = arm_smmu_device_shutdown,
Will Deacon45ae7cf2013-06-24 18:31:25 +01002208};
Robin Murphyf6810c12017-04-10 16:51:05 +05302209module_platform_driver(arm_smmu_driver);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002210
Robin Murphyf6810c12017-04-10 16:51:05 +05302211IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL);
2212IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL);
2213IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL);
2214IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL);
2215IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL);
2216IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL);
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00002217
Will Deacon45ae7cf2013-06-24 18:31:25 +01002218MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2219MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2220MODULE_LICENSE("GPL v2");