blob: 7587017972b0aa485bcd978bded55c4af770a865 [file] [log] [blame]
Laurent Pinchartd25a2a12014-04-02 12:47:37 +02001/*
2 * IPMMU VMSA
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 */
10
Magnus Dammdbb70692017-05-17 19:06:38 +090011#include <linux/bitmap.h>
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020012#include <linux/delay.h>
Magnus Damm3ae47292017-05-17 19:07:10 +090013#include <linux/dma-iommu.h>
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020014#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/export.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/iommu.h>
20#include <linux/module.h>
Laurent Pinchart275f5052014-03-17 01:02:46 +010021#include <linux/of.h>
Magnus Damm33f3ac92017-10-16 21:29:25 +090022#include <linux/of_device.h>
Magnus Dammcda52fc2017-10-16 21:29:57 +090023#include <linux/of_iommu.h>
Magnus Damm7b2d5962017-07-17 22:05:41 +090024#include <linux/of_platform.h>
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020025#include <linux/platform_device.h>
26#include <linux/sizes.h>
27#include <linux/slab.h>
28
Magnus Damm3ae47292017-05-17 19:07:10 +090029#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020030#include <asm/dma-iommu.h>
31#include <asm/pgalloc.h>
Robin Murphy49c875f2017-10-13 19:23:42 +010032#else
33#define arm_iommu_create_mapping(...) NULL
34#define arm_iommu_attach_device(...) -ENODEV
35#define arm_iommu_release_mapping(...) do {} while (0)
36#define arm_iommu_detach_device(...) do {} while (0)
Magnus Damm3ae47292017-05-17 19:07:10 +090037#endif
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020038
Laurent Pinchartf20ed392015-01-20 18:30:04 +020039#include "io-pgtable.h"
40
Magnus Damm5fd16342017-10-16 21:29:46 +090041#define IPMMU_CTX_MAX 8
Magnus Dammdbb70692017-05-17 19:06:38 +090042
Magnus Damm33f3ac92017-10-16 21:29:25 +090043struct ipmmu_features {
44 bool use_ns_alias_offset;
Magnus Dammfd5140e2017-10-16 21:29:36 +090045 bool has_cache_leaf_nodes;
Magnus Damm5fd16342017-10-16 21:29:46 +090046 unsigned int number_of_contexts;
Magnus Damm33f3ac92017-10-16 21:29:25 +090047};
48
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020049struct ipmmu_vmsa_device {
50 struct device *dev;
51 void __iomem *base;
Magnus Damm01da21e2017-07-17 22:05:10 +090052 struct iommu_device iommu;
Magnus Dammfd5140e2017-10-16 21:29:36 +090053 struct ipmmu_vmsa_device *root;
Magnus Damm33f3ac92017-10-16 21:29:25 +090054 const struct ipmmu_features *features;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020055 unsigned int num_utlbs;
Magnus Damm5fd16342017-10-16 21:29:46 +090056 unsigned int num_ctx;
Magnus Dammdbb70692017-05-17 19:06:38 +090057 spinlock_t lock; /* Protects ctx and domains[] */
58 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
59 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020060
Robin Murphyb354c732017-10-13 19:23:40 +010061 struct iommu_group *group;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020062 struct dma_iommu_mapping *mapping;
63};
64
65struct ipmmu_vmsa_domain {
66 struct ipmmu_vmsa_device *mmu;
Joerg Roedel5914c5f2015-03-26 13:43:16 +010067 struct iommu_domain io_domain;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020068
Laurent Pinchartf20ed392015-01-20 18:30:04 +020069 struct io_pgtable_cfg cfg;
70 struct io_pgtable_ops *iop;
71
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020072 unsigned int context_id;
73 spinlock_t lock; /* Protects mappings */
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020074};
75
Joerg Roedel5914c5f2015-03-26 13:43:16 +010076static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
77{
78 return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
79}
80
Robin Murphye4efe4a2017-10-13 19:23:41 +010081static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
Magnus Damm0fbc8b02017-05-17 19:07:20 +090082{
Robin Murphy3c49ed32017-07-17 22:05:31 +090083 return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL;
Magnus Damm0fbc8b02017-05-17 19:07:20 +090084}
85
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020086#define TLB_LOOP_TIMEOUT 100 /* 100us */
87
88/* -----------------------------------------------------------------------------
89 * Registers Definition
90 */
91
Laurent Pinchart275f5052014-03-17 01:02:46 +010092#define IM_NS_ALIAS_OFFSET 0x800
93
Laurent Pinchartd25a2a12014-04-02 12:47:37 +020094#define IM_CTX_SIZE 0x40
95
96#define IMCTR 0x0000
97#define IMCTR_TRE (1 << 17)
98#define IMCTR_AFE (1 << 16)
99#define IMCTR_RTSEL_MASK (3 << 4)
100#define IMCTR_RTSEL_SHIFT 4
101#define IMCTR_TREN (1 << 3)
102#define IMCTR_INTEN (1 << 2)
103#define IMCTR_FLUSH (1 << 1)
104#define IMCTR_MMUEN (1 << 0)
105
106#define IMCAAR 0x0004
107
108#define IMTTBCR 0x0008
109#define IMTTBCR_EAE (1 << 31)
110#define IMTTBCR_PMB (1 << 30)
111#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28)
112#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28)
113#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28)
114#define IMTTBCR_SH1_MASK (3 << 28)
115#define IMTTBCR_ORGN1_NC (0 << 26)
116#define IMTTBCR_ORGN1_WB_WA (1 << 26)
117#define IMTTBCR_ORGN1_WT (2 << 26)
118#define IMTTBCR_ORGN1_WB (3 << 26)
119#define IMTTBCR_ORGN1_MASK (3 << 26)
120#define IMTTBCR_IRGN1_NC (0 << 24)
121#define IMTTBCR_IRGN1_WB_WA (1 << 24)
122#define IMTTBCR_IRGN1_WT (2 << 24)
123#define IMTTBCR_IRGN1_WB (3 << 24)
124#define IMTTBCR_IRGN1_MASK (3 << 24)
125#define IMTTBCR_TSZ1_MASK (7 << 16)
126#define IMTTBCR_TSZ1_SHIFT 16
127#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12)
128#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12)
129#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12)
130#define IMTTBCR_SH0_MASK (3 << 12)
131#define IMTTBCR_ORGN0_NC (0 << 10)
132#define IMTTBCR_ORGN0_WB_WA (1 << 10)
133#define IMTTBCR_ORGN0_WT (2 << 10)
134#define IMTTBCR_ORGN0_WB (3 << 10)
135#define IMTTBCR_ORGN0_MASK (3 << 10)
136#define IMTTBCR_IRGN0_NC (0 << 8)
137#define IMTTBCR_IRGN0_WB_WA (1 << 8)
138#define IMTTBCR_IRGN0_WT (2 << 8)
139#define IMTTBCR_IRGN0_WB (3 << 8)
140#define IMTTBCR_IRGN0_MASK (3 << 8)
141#define IMTTBCR_SL0_LVL_2 (0 << 4)
142#define IMTTBCR_SL0_LVL_1 (1 << 4)
143#define IMTTBCR_TSZ0_MASK (7 << 0)
144#define IMTTBCR_TSZ0_SHIFT O
145
146#define IMBUSCR 0x000c
147#define IMBUSCR_DVM (1 << 2)
148#define IMBUSCR_BUSSEL_SYS (0 << 0)
149#define IMBUSCR_BUSSEL_CCI (1 << 0)
150#define IMBUSCR_BUSSEL_IMCAAR (2 << 0)
151#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0)
152#define IMBUSCR_BUSSEL_MASK (3 << 0)
153
154#define IMTTLBR0 0x0010
155#define IMTTUBR0 0x0014
156#define IMTTLBR1 0x0018
157#define IMTTUBR1 0x001c
158
159#define IMSTR 0x0020
160#define IMSTR_ERRLVL_MASK (3 << 12)
161#define IMSTR_ERRLVL_SHIFT 12
162#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8)
163#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8)
164#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8)
165#define IMSTR_ERRCODE_MASK (7 << 8)
166#define IMSTR_MHIT (1 << 4)
167#define IMSTR_ABORT (1 << 2)
168#define IMSTR_PF (1 << 1)
169#define IMSTR_TF (1 << 0)
170
171#define IMMAIR0 0x0028
172#define IMMAIR1 0x002c
173#define IMMAIR_ATTR_MASK 0xff
174#define IMMAIR_ATTR_DEVICE 0x04
175#define IMMAIR_ATTR_NC 0x44
176#define IMMAIR_ATTR_WBRWA 0xff
177#define IMMAIR_ATTR_SHIFT(n) ((n) << 3)
178#define IMMAIR_ATTR_IDX_NC 0
179#define IMMAIR_ATTR_IDX_WBRWA 1
180#define IMMAIR_ATTR_IDX_DEV 2
181
182#define IMEAR 0x0030
183
184#define IMPCTR 0x0200
185#define IMPSTR 0x0208
186#define IMPEAR 0x020c
187#define IMPMBA(n) (0x0280 + ((n) * 4))
188#define IMPMBD(n) (0x02c0 + ((n) * 4))
189
190#define IMUCTR(n) (0x0300 + ((n) * 16))
191#define IMUCTR_FIXADDEN (1 << 31)
192#define IMUCTR_FIXADD_MASK (0xff << 16)
193#define IMUCTR_FIXADD_SHIFT 16
194#define IMUCTR_TTSEL_MMU(n) ((n) << 4)
195#define IMUCTR_TTSEL_PMB (8 << 4)
196#define IMUCTR_TTSEL_MASK (15 << 4)
197#define IMUCTR_FLUSH (1 << 1)
198#define IMUCTR_MMUEN (1 << 0)
199
200#define IMUASID(n) (0x0308 + ((n) * 16))
201#define IMUASID_ASID8_MASK (0xff << 8)
202#define IMUASID_ASID8_SHIFT 8
203#define IMUASID_ASID0_MASK (0xff << 0)
204#define IMUASID_ASID0_SHIFT 0
205
206/* -----------------------------------------------------------------------------
Magnus Dammfd5140e2017-10-16 21:29:36 +0900207 * Root device handling
208 */
209
210static struct platform_driver ipmmu_driver;
211
212static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
213{
214 return mmu->root == mmu;
215}
216
217static int __ipmmu_check_device(struct device *dev, void *data)
218{
219 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
220 struct ipmmu_vmsa_device **rootp = data;
221
222 if (ipmmu_is_root(mmu))
223 *rootp = mmu;
224
225 return 0;
226}
227
228static struct ipmmu_vmsa_device *ipmmu_find_root(void)
229{
230 struct ipmmu_vmsa_device *root = NULL;
231
232 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
233 __ipmmu_check_device) == 0 ? root : NULL;
234}
235
236/* -----------------------------------------------------------------------------
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200237 * Read/Write Access
238 */
239
240static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
241{
242 return ioread32(mmu->base + offset);
243}
244
245static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
246 u32 data)
247{
248 iowrite32(data, mmu->base + offset);
249}
250
Magnus Dammd5748932017-10-16 21:30:18 +0900251static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
252 unsigned int reg)
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200253{
Magnus Dammfd5140e2017-10-16 21:29:36 +0900254 return ipmmu_read(domain->mmu->root,
255 domain->context_id * IM_CTX_SIZE + reg);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200256}
257
Magnus Dammd5748932017-10-16 21:30:18 +0900258static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
259 unsigned int reg, u32 data)
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200260{
Magnus Dammfd5140e2017-10-16 21:29:36 +0900261 ipmmu_write(domain->mmu->root,
262 domain->context_id * IM_CTX_SIZE + reg, data);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200263}
264
Magnus Dammd5748932017-10-16 21:30:18 +0900265static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
266 unsigned int reg, u32 data)
267{
268 if (domain->mmu != domain->mmu->root)
269 ipmmu_write(domain->mmu,
270 domain->context_id * IM_CTX_SIZE + reg, data);
271
272 ipmmu_write(domain->mmu->root,
273 domain->context_id * IM_CTX_SIZE + reg, data);
274}
275
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200276/* -----------------------------------------------------------------------------
277 * TLB and microTLB Management
278 */
279
280/* Wait for any pending TLB invalidations to complete */
281static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
282{
283 unsigned int count = 0;
284
Magnus Dammd5748932017-10-16 21:30:18 +0900285 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200286 cpu_relax();
287 if (++count == TLB_LOOP_TIMEOUT) {
288 dev_err_ratelimited(domain->mmu->dev,
289 "TLB sync timed out -- MMU may be deadlocked\n");
290 return;
291 }
292 udelay(1);
293 }
294}
295
296static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
297{
298 u32 reg;
299
Magnus Dammd5748932017-10-16 21:30:18 +0900300 reg = ipmmu_ctx_read_root(domain, IMCTR);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200301 reg |= IMCTR_FLUSH;
Magnus Dammd5748932017-10-16 21:30:18 +0900302 ipmmu_ctx_write_all(domain, IMCTR, reg);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200303
304 ipmmu_tlb_sync(domain);
305}
306
307/*
308 * Enable MMU translation for the microTLB.
309 */
310static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
Laurent Pinchart192d2042014-05-15 12:40:42 +0200311 unsigned int utlb)
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200312{
313 struct ipmmu_vmsa_device *mmu = domain->mmu;
314
Laurent Pinchart192d2042014-05-15 12:40:42 +0200315 /*
316 * TODO: Reference-count the microTLB as several bus masters can be
317 * connected to the same microTLB.
318 */
319
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200320 /* TODO: What should we set the ASID to ? */
Laurent Pinchart192d2042014-05-15 12:40:42 +0200321 ipmmu_write(mmu, IMUASID(utlb), 0);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200322 /* TODO: Do we need to flush the microTLB ? */
Laurent Pinchart192d2042014-05-15 12:40:42 +0200323 ipmmu_write(mmu, IMUCTR(utlb),
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200324 IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
325 IMUCTR_MMUEN);
326}
327
328/*
329 * Disable MMU translation for the microTLB.
330 */
331static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
Laurent Pinchart192d2042014-05-15 12:40:42 +0200332 unsigned int utlb)
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200333{
334 struct ipmmu_vmsa_device *mmu = domain->mmu;
335
Laurent Pinchart192d2042014-05-15 12:40:42 +0200336 ipmmu_write(mmu, IMUCTR(utlb), 0);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200337}
338
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200339static void ipmmu_tlb_flush_all(void *cookie)
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200340{
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200341 struct ipmmu_vmsa_domain *domain = cookie;
342
343 ipmmu_tlb_invalidate(domain);
344}
345
Robin Murphy06c610e2015-12-07 18:18:53 +0000346static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
347 size_t granule, bool leaf, void *cookie)
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200348{
349 /* The hardware doesn't support selective TLB flush. */
350}
351
Bhumika Goyal8da4af92017-08-28 23:47:27 +0530352static const struct iommu_gather_ops ipmmu_gather_ops = {
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200353 .tlb_flush_all = ipmmu_tlb_flush_all,
354 .tlb_add_flush = ipmmu_tlb_add_flush,
355 .tlb_sync = ipmmu_tlb_flush_all,
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200356};
357
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200358/* -----------------------------------------------------------------------------
359 * Domain/Context Management
360 */
361
Magnus Dammdbb70692017-05-17 19:06:38 +0900362static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
363 struct ipmmu_vmsa_domain *domain)
364{
365 unsigned long flags;
366 int ret;
367
368 spin_lock_irqsave(&mmu->lock, flags);
369
Magnus Damm5fd16342017-10-16 21:29:46 +0900370 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
371 if (ret != mmu->num_ctx) {
Magnus Dammdbb70692017-05-17 19:06:38 +0900372 mmu->domains[ret] = domain;
373 set_bit(ret, mmu->ctx);
Magnus Damm5fd16342017-10-16 21:29:46 +0900374 } else
375 ret = -EBUSY;
Magnus Dammdbb70692017-05-17 19:06:38 +0900376
377 spin_unlock_irqrestore(&mmu->lock, flags);
378
379 return ret;
380}
381
Oleksandr Tyshchenkoa175a672017-08-23 17:31:42 +0300382static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
383 unsigned int context_id)
384{
385 unsigned long flags;
386
387 spin_lock_irqsave(&mmu->lock, flags);
388
389 clear_bit(context_id, mmu->ctx);
390 mmu->domains[context_id] = NULL;
391
392 spin_unlock_irqrestore(&mmu->lock, flags);
393}
394
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200395static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
396{
Geert Uytterhoevenf64232e2015-12-22 20:01:06 +0100397 u64 ttbr;
Magnus Dammdbb70692017-05-17 19:06:38 +0900398 int ret;
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200399
400 /*
401 * Allocate the page table operations.
402 *
403 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
404 * access, Long-descriptor format" that the NStable bit being set in a
405 * table descriptor will result in the NStable and NS bits of all child
406 * entries being ignored and considered as being set. The IPMMU seems
407 * not to comply with this, as it generates a secure access page fault
408 * if any of the NStable and NS bits isn't set when running in
409 * non-secure mode.
410 */
411 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
Magnus Damm26b6aec2017-05-17 19:07:41 +0900412 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200413 domain->cfg.ias = 32;
414 domain->cfg.oas = 40;
415 domain->cfg.tlb = &ipmmu_gather_ops;
Geert Uytterhoeven3b6bb5b2017-01-31 12:17:07 +0100416 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
417 domain->io_domain.geometry.force_aperture = true;
Robin Murphyff2ed962015-07-29 19:46:08 +0100418 /*
419 * TODO: Add support for coherent walk through CCI with DVM and remove
420 * cache handling. For now, delegate it to the io-pgtable code.
421 */
Magnus Dammfd5140e2017-10-16 21:29:36 +0900422 domain->cfg.iommu_dev = domain->mmu->root->dev;
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200423
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200424 /*
Magnus Dammdbb70692017-05-17 19:06:38 +0900425 * Find an unused context.
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200426 */
Magnus Dammfd5140e2017-10-16 21:29:36 +0900427 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
Magnus Damm5fd16342017-10-16 21:29:46 +0900428 if (ret < 0)
429 return ret;
Magnus Dammdbb70692017-05-17 19:06:38 +0900430
431 domain->context_id = ret;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200432
Oleksandr Tyshchenkoa175a672017-08-23 17:31:42 +0300433 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
434 domain);
435 if (!domain->iop) {
Magnus Dammfd5140e2017-10-16 21:29:36 +0900436 ipmmu_domain_free_context(domain->mmu->root,
437 domain->context_id);
Oleksandr Tyshchenkoa175a672017-08-23 17:31:42 +0300438 return -EINVAL;
439 }
440
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200441 /* TTBR0 */
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200442 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
Magnus Dammd5748932017-10-16 21:30:18 +0900443 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
444 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200445
446 /*
447 * TTBCR
448 * We use long descriptors with inner-shareable WBWA tables and allocate
449 * the whole 32-bit VA space to TTBR0.
450 */
Magnus Dammd5748932017-10-16 21:30:18 +0900451 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
452 IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
453 IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200454
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200455 /* MAIR0 */
Magnus Dammd5748932017-10-16 21:30:18 +0900456 ipmmu_ctx_write_root(domain, IMMAIR0,
457 domain->cfg.arm_lpae_s1_cfg.mair[0]);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200458
459 /* IMBUSCR */
Magnus Dammd5748932017-10-16 21:30:18 +0900460 ipmmu_ctx_write_root(domain, IMBUSCR,
461 ipmmu_ctx_read_root(domain, IMBUSCR) &
462 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200463
464 /*
465 * IMSTR
466 * Clear all interrupt flags.
467 */
Magnus Dammd5748932017-10-16 21:30:18 +0900468 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200469
470 /*
471 * IMCTR
472 * Enable the MMU and interrupt generation. The long-descriptor
473 * translation table format doesn't use TEX remapping. Don't enable AF
474 * software management as we have no use for it. Flush the TLB as
475 * required when modifying the context registers.
476 */
Magnus Dammd5748932017-10-16 21:30:18 +0900477 ipmmu_ctx_write_all(domain, IMCTR,
478 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200479
480 return 0;
481}
482
483static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
484{
485 /*
486 * Disable the context. Flush the TLB as required when modifying the
487 * context registers.
488 *
489 * TODO: Is TLB flush really needed ?
490 */
Magnus Dammd5748932017-10-16 21:30:18 +0900491 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200492 ipmmu_tlb_sync(domain);
Magnus Dammfd5140e2017-10-16 21:29:36 +0900493 ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200494}
495
496/* -----------------------------------------------------------------------------
497 * Fault Handling
498 */
499
500static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
501{
502 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
503 struct ipmmu_vmsa_device *mmu = domain->mmu;
504 u32 status;
505 u32 iova;
506
Magnus Dammd5748932017-10-16 21:30:18 +0900507 status = ipmmu_ctx_read_root(domain, IMSTR);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200508 if (!(status & err_mask))
509 return IRQ_NONE;
510
Magnus Dammd5748932017-10-16 21:30:18 +0900511 iova = ipmmu_ctx_read_root(domain, IMEAR);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200512
513 /*
514 * Clear the error status flags. Unlike traditional interrupt flag
515 * registers that must be cleared by writing 1, this status register
516 * seems to require 0. The error address register must be read before,
517 * otherwise its value will be 0.
518 */
Magnus Dammd5748932017-10-16 21:30:18 +0900519 ipmmu_ctx_write_root(domain, IMSTR, 0);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200520
521 /* Log fatal errors. */
522 if (status & IMSTR_MHIT)
523 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
524 iova);
525 if (status & IMSTR_ABORT)
526 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
527 iova);
528
529 if (!(status & (IMSTR_PF | IMSTR_TF)))
530 return IRQ_NONE;
531
532 /*
533 * Try to handle page faults and translation faults.
534 *
535 * TODO: We need to look up the faulty device based on the I/O VA. Use
536 * the IOMMU device for now.
537 */
Joerg Roedel5914c5f2015-03-26 13:43:16 +0100538 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200539 return IRQ_HANDLED;
540
541 dev_err_ratelimited(mmu->dev,
542 "Unhandled fault: status 0x%08x iova 0x%08x\n",
543 status, iova);
544
545 return IRQ_HANDLED;
546}
547
548static irqreturn_t ipmmu_irq(int irq, void *dev)
549{
550 struct ipmmu_vmsa_device *mmu = dev;
Magnus Dammdbb70692017-05-17 19:06:38 +0900551 irqreturn_t status = IRQ_NONE;
552 unsigned int i;
553 unsigned long flags;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200554
Magnus Dammdbb70692017-05-17 19:06:38 +0900555 spin_lock_irqsave(&mmu->lock, flags);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200556
Magnus Dammdbb70692017-05-17 19:06:38 +0900557 /*
558 * Check interrupts for all active contexts.
559 */
Magnus Damm5fd16342017-10-16 21:29:46 +0900560 for (i = 0; i < mmu->num_ctx; i++) {
Magnus Dammdbb70692017-05-17 19:06:38 +0900561 if (!mmu->domains[i])
562 continue;
563 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
564 status = IRQ_HANDLED;
565 }
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200566
Magnus Dammdbb70692017-05-17 19:06:38 +0900567 spin_unlock_irqrestore(&mmu->lock, flags);
568
569 return status;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200570}
571
572/* -----------------------------------------------------------------------------
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200573 * IOMMU Operations
574 */
575
Magnus Damm8e73bf62017-05-17 19:06:59 +0900576static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200577{
578 struct ipmmu_vmsa_domain *domain;
579
580 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
581 if (!domain)
Joerg Roedel5914c5f2015-03-26 13:43:16 +0100582 return NULL;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200583
584 spin_lock_init(&domain->lock);
585
Joerg Roedel5914c5f2015-03-26 13:43:16 +0100586 return &domain->io_domain;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200587}
588
Robin Murphy1c7e7c02017-10-13 19:23:39 +0100589static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
590{
591 struct iommu_domain *io_domain = NULL;
592
593 switch (type) {
594 case IOMMU_DOMAIN_UNMANAGED:
595 io_domain = __ipmmu_domain_alloc(type);
596 break;
597
598 case IOMMU_DOMAIN_DMA:
599 io_domain = __ipmmu_domain_alloc(type);
600 if (io_domain && iommu_get_dma_cookie(io_domain)) {
601 kfree(io_domain);
602 io_domain = NULL;
603 }
604 break;
605 }
606
607 return io_domain;
608}
609
Joerg Roedel5914c5f2015-03-26 13:43:16 +0100610static void ipmmu_domain_free(struct iommu_domain *io_domain)
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200611{
Joerg Roedel5914c5f2015-03-26 13:43:16 +0100612 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200613
614 /*
615 * Free the domain resources. We assume that all devices have already
616 * been detached.
617 */
Robin Murphy1c7e7c02017-10-13 19:23:39 +0100618 iommu_put_dma_cookie(io_domain);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200619 ipmmu_domain_destroy_context(domain);
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200620 free_io_pgtable_ops(domain->iop);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200621 kfree(domain);
622}
623
624static int ipmmu_attach_device(struct iommu_domain *io_domain,
625 struct device *dev)
626{
Magnus Damm7b2d5962017-07-17 22:05:41 +0900627 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphye4efe4a2017-10-13 19:23:41 +0100628 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
Joerg Roedel5914c5f2015-03-26 13:43:16 +0100629 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200630 unsigned long flags;
Laurent Pincharta166d312014-07-24 01:36:43 +0200631 unsigned int i;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200632 int ret = 0;
633
Robin Murphye4efe4a2017-10-13 19:23:41 +0100634 if (!mmu) {
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200635 dev_err(dev, "Cannot attach to IPMMU\n");
636 return -ENXIO;
637 }
638
639 spin_lock_irqsave(&domain->lock, flags);
640
641 if (!domain->mmu) {
642 /* The domain hasn't been used yet, initialize it. */
643 domain->mmu = mmu;
644 ret = ipmmu_domain_init_context(domain);
Magnus Damm5fd16342017-10-16 21:29:46 +0900645 if (ret < 0) {
646 dev_err(dev, "Unable to initialize IPMMU context\n");
647 domain->mmu = NULL;
648 } else {
649 dev_info(dev, "Using IPMMU context %u\n",
650 domain->context_id);
651 }
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200652 } else if (domain->mmu != mmu) {
653 /*
654 * Something is wrong, we can't attach two devices using
655 * different IOMMUs to the same domain.
656 */
657 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
658 dev_name(mmu->dev), dev_name(domain->mmu->dev));
659 ret = -EINVAL;
Magnus Damm3ae47292017-05-17 19:07:10 +0900660 } else
661 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200662
663 spin_unlock_irqrestore(&domain->lock, flags);
664
665 if (ret < 0)
666 return ret;
667
Magnus Damm7b2d5962017-07-17 22:05:41 +0900668 for (i = 0; i < fwspec->num_ids; ++i)
669 ipmmu_utlb_enable(domain, fwspec->ids[i]);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200670
671 return 0;
672}
673
674static void ipmmu_detach_device(struct iommu_domain *io_domain,
675 struct device *dev)
676{
Magnus Damm7b2d5962017-07-17 22:05:41 +0900677 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Joerg Roedel5914c5f2015-03-26 13:43:16 +0100678 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
Laurent Pincharta166d312014-07-24 01:36:43 +0200679 unsigned int i;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200680
Magnus Damm7b2d5962017-07-17 22:05:41 +0900681 for (i = 0; i < fwspec->num_ids; ++i)
682 ipmmu_utlb_disable(domain, fwspec->ids[i]);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200683
684 /*
685 * TODO: Optimize by disabling the context when no device is attached.
686 */
687}
688
689static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
690 phys_addr_t paddr, size_t size, int prot)
691{
Joerg Roedel5914c5f2015-03-26 13:43:16 +0100692 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200693
694 if (!domain)
695 return -ENODEV;
696
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200697 return domain->iop->map(domain->iop, iova, paddr, size, prot);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200698}
699
700static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
701 size_t size)
702{
Joerg Roedel5914c5f2015-03-26 13:43:16 +0100703 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200704
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200705 return domain->iop->unmap(domain->iop, iova, size);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200706}
707
Robin Murphy32b12442017-09-28 15:55:01 +0100708static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
709{
710 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
711
712 if (domain->mmu)
713 ipmmu_tlb_flush_all(domain);
714}
715
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200716static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
717 dma_addr_t iova)
718{
Joerg Roedel5914c5f2015-03-26 13:43:16 +0100719 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200720
721 /* TODO: Is locking needed ? */
722
Laurent Pinchartf20ed392015-01-20 18:30:04 +0200723 return domain->iop->iova_to_phys(domain->iop, iova);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200724}
725
Magnus Damm7b2d5962017-07-17 22:05:41 +0900726static int ipmmu_init_platform_device(struct device *dev,
727 struct of_phandle_args *args)
Laurent Pinchart192d2042014-05-15 12:40:42 +0200728{
Magnus Damm7b2d5962017-07-17 22:05:41 +0900729 struct platform_device *ipmmu_pdev;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200730
Magnus Damm7b2d5962017-07-17 22:05:41 +0900731 ipmmu_pdev = of_find_device_by_node(args->np);
732 if (!ipmmu_pdev)
Laurent Pinchartbb590c92015-01-24 23:13:50 +0200733 return -ENODEV;
734
Robin Murphye4efe4a2017-10-13 19:23:41 +0100735 dev->iommu_fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
Magnus Damm383fef5f2017-05-17 19:06:48 +0900736 return 0;
Magnus Damm383fef5f2017-05-17 19:06:48 +0900737}
738
Magnus Damm49558da2017-07-17 22:05:20 +0900739static int ipmmu_of_xlate(struct device *dev,
740 struct of_phandle_args *spec)
741{
Magnus Damm7b2d5962017-07-17 22:05:41 +0900742 iommu_fwspec_add_ids(dev, spec->args, 1);
743
Magnus Damm49558da2017-07-17 22:05:20 +0900744 /* Initialize once - xlate() will call multiple times */
Robin Murphye4efe4a2017-10-13 19:23:41 +0100745 if (to_ipmmu(dev))
Magnus Damm49558da2017-07-17 22:05:20 +0900746 return 0;
747
Magnus Damm7b2d5962017-07-17 22:05:41 +0900748 return ipmmu_init_platform_device(dev, spec);
Magnus Damm49558da2017-07-17 22:05:20 +0900749}
750
Robin Murphy49c875f2017-10-13 19:23:42 +0100751static int ipmmu_init_arm_mapping(struct device *dev)
Magnus Damm383fef5f2017-05-17 19:06:48 +0900752{
Robin Murphye4efe4a2017-10-13 19:23:41 +0100753 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
Magnus Damm383fef5f2017-05-17 19:06:48 +0900754 struct iommu_group *group;
755 int ret;
756
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200757 /* Create a device group and add the device to it. */
758 group = iommu_group_alloc();
759 if (IS_ERR(group)) {
760 dev_err(dev, "Failed to allocate IOMMU group\n");
Robin Murphy49c875f2017-10-13 19:23:42 +0100761 return PTR_ERR(group);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200762 }
763
764 ret = iommu_group_add_device(group, dev);
765 iommu_group_put(group);
766
767 if (ret < 0) {
768 dev_err(dev, "Failed to add device to IPMMU group\n");
Robin Murphy49c875f2017-10-13 19:23:42 +0100769 return ret;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200770 }
771
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200772 /*
773 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
774 * VAs. This will allocate a corresponding IOMMU domain.
775 *
776 * TODO:
777 * - Create one mapping per context (TLB).
778 * - Make the mapping size configurable ? We currently use a 2GB mapping
779 * at a 1GB offset to ensure that NULL VAs will fault.
780 */
781 if (!mmu->mapping) {
782 struct dma_iommu_mapping *mapping;
783
784 mapping = arm_iommu_create_mapping(&platform_bus_type,
Joerg Roedel720b0ce2014-05-26 13:07:01 +0200785 SZ_1G, SZ_2G);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200786 if (IS_ERR(mapping)) {
787 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
Laurent Pinchartb8f80bf2014-03-14 14:00:56 +0100788 ret = PTR_ERR(mapping);
789 goto error;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200790 }
791
792 mmu->mapping = mapping;
793 }
794
795 /* Attach the ARM VA mapping to the device. */
796 ret = arm_iommu_attach_device(dev, mmu->mapping);
797 if (ret < 0) {
798 dev_err(dev, "Failed to attach device to VA mapping\n");
799 goto error;
800 }
801
802 return 0;
803
804error:
Robin Murphy49c875f2017-10-13 19:23:42 +0100805 iommu_group_remove_device(dev);
806 if (mmu->mapping)
Magnus Damm383fef5f2017-05-17 19:06:48 +0900807 arm_iommu_release_mapping(mmu->mapping);
Laurent Pincharta166d312014-07-24 01:36:43 +0200808
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200809 return ret;
810}
811
Robin Murphy49c875f2017-10-13 19:23:42 +0100812static int ipmmu_add_device(struct device *dev)
Magnus Damm3ae47292017-05-17 19:07:10 +0900813{
Magnus Damm3ae47292017-05-17 19:07:10 +0900814 struct iommu_group *group;
815
Magnus Damm0fbc8b02017-05-17 19:07:20 +0900816 /*
817 * Only let through devices that have been verified in xlate()
Magnus Damm0fbc8b02017-05-17 19:07:20 +0900818 */
Robin Murphye4efe4a2017-10-13 19:23:41 +0100819 if (!to_ipmmu(dev))
Magnus Damm3ae47292017-05-17 19:07:10 +0900820 return -ENODEV;
821
Robin Murphy49c875f2017-10-13 19:23:42 +0100822 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
823 return ipmmu_init_arm_mapping(dev);
824
Magnus Damm3ae47292017-05-17 19:07:10 +0900825 group = iommu_group_get_for_dev(dev);
826 if (IS_ERR(group))
827 return PTR_ERR(group);
828
Robin Murphy49c875f2017-10-13 19:23:42 +0100829 iommu_group_put(group);
Magnus Damm3ae47292017-05-17 19:07:10 +0900830 return 0;
831}
832
Robin Murphy49c875f2017-10-13 19:23:42 +0100833static void ipmmu_remove_device(struct device *dev)
Magnus Damm3ae47292017-05-17 19:07:10 +0900834{
Robin Murphy49c875f2017-10-13 19:23:42 +0100835 arm_iommu_detach_device(dev);
Magnus Damm3ae47292017-05-17 19:07:10 +0900836 iommu_group_remove_device(dev);
837}
838
Robin Murphyb354c732017-10-13 19:23:40 +0100839static struct iommu_group *ipmmu_find_group(struct device *dev)
Magnus Damm3ae47292017-05-17 19:07:10 +0900840{
Robin Murphye4efe4a2017-10-13 19:23:41 +0100841 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
Magnus Damm3ae47292017-05-17 19:07:10 +0900842 struct iommu_group *group;
Magnus Damm3ae47292017-05-17 19:07:10 +0900843
Robin Murphye4efe4a2017-10-13 19:23:41 +0100844 if (mmu->group)
845 return iommu_group_ref_get(mmu->group);
Robin Murphyb354c732017-10-13 19:23:40 +0100846
847 group = iommu_group_alloc();
848 if (!IS_ERR(group))
Robin Murphye4efe4a2017-10-13 19:23:41 +0100849 mmu->group = group;
Magnus Damm3ae47292017-05-17 19:07:10 +0900850
851 return group;
852}
853
Magnus Damm3ae47292017-05-17 19:07:10 +0900854static const struct iommu_ops ipmmu_ops = {
Robin Murphy1c7e7c02017-10-13 19:23:39 +0100855 .domain_alloc = ipmmu_domain_alloc,
856 .domain_free = ipmmu_domain_free,
Magnus Damm3ae47292017-05-17 19:07:10 +0900857 .attach_dev = ipmmu_attach_device,
858 .detach_dev = ipmmu_detach_device,
859 .map = ipmmu_map,
860 .unmap = ipmmu_unmap,
Robin Murphy32b12442017-09-28 15:55:01 +0100861 .flush_iotlb_all = ipmmu_iotlb_sync,
862 .iotlb_sync = ipmmu_iotlb_sync,
Magnus Damm3ae47292017-05-17 19:07:10 +0900863 .map_sg = default_iommu_map_sg,
864 .iova_to_phys = ipmmu_iova_to_phys,
Robin Murphy49c875f2017-10-13 19:23:42 +0100865 .add_device = ipmmu_add_device,
866 .remove_device = ipmmu_remove_device,
Robin Murphyb354c732017-10-13 19:23:40 +0100867 .device_group = ipmmu_find_group,
Magnus Damm3ae47292017-05-17 19:07:10 +0900868 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
Magnus Damm49558da2017-07-17 22:05:20 +0900869 .of_xlate = ipmmu_of_xlate,
Magnus Damm3ae47292017-05-17 19:07:10 +0900870};
871
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200872/* -----------------------------------------------------------------------------
873 * Probe/remove and init
874 */
875
876static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
877{
878 unsigned int i;
879
880 /* Disable all contexts. */
Magnus Damm5fd16342017-10-16 21:29:46 +0900881 for (i = 0; i < mmu->num_ctx; ++i)
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200882 ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
883}
884
Magnus Damm33f3ac92017-10-16 21:29:25 +0900885static const struct ipmmu_features ipmmu_features_default = {
886 .use_ns_alias_offset = true,
Magnus Dammfd5140e2017-10-16 21:29:36 +0900887 .has_cache_leaf_nodes = false,
Magnus Damm5fd16342017-10-16 21:29:46 +0900888 .number_of_contexts = 1, /* software only tested with one context */
Magnus Damm33f3ac92017-10-16 21:29:25 +0900889};
890
891static const struct of_device_id ipmmu_of_ids[] = {
892 {
893 .compatible = "renesas,ipmmu-vmsa",
894 .data = &ipmmu_features_default,
895 }, {
896 /* Terminator */
897 },
898};
899
900MODULE_DEVICE_TABLE(of, ipmmu_of_ids);
901
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200902static int ipmmu_probe(struct platform_device *pdev)
903{
904 struct ipmmu_vmsa_device *mmu;
905 struct resource *res;
906 int irq;
907 int ret;
908
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200909 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
910 if (!mmu) {
911 dev_err(&pdev->dev, "cannot allocate device data\n");
912 return -ENOMEM;
913 }
914
915 mmu->dev = &pdev->dev;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200916 mmu->num_utlbs = 32;
Magnus Dammdbb70692017-05-17 19:06:38 +0900917 spin_lock_init(&mmu->lock);
918 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
Magnus Damm33f3ac92017-10-16 21:29:25 +0900919 mmu->features = of_device_get_match_data(&pdev->dev);
Magnus Damm1c894222017-10-16 21:30:07 +0900920 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200921
922 /* Map I/O memory and request IRQ. */
923 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
924 mmu->base = devm_ioremap_resource(&pdev->dev, res);
925 if (IS_ERR(mmu->base))
926 return PTR_ERR(mmu->base);
927
Laurent Pinchart275f5052014-03-17 01:02:46 +0100928 /*
929 * The IPMMU has two register banks, for secure and non-secure modes.
930 * The bank mapped at the beginning of the IPMMU address space
931 * corresponds to the running mode of the CPU. When running in secure
932 * mode the non-secure register bank is also available at an offset.
933 *
934 * Secure mode operation isn't clearly documented and is thus currently
935 * not implemented in the driver. Furthermore, preliminary tests of
936 * non-secure operation with the main register bank were not successful.
937 * Offset the registers base unconditionally to point to the non-secure
938 * alias space for now.
939 */
Magnus Damm33f3ac92017-10-16 21:29:25 +0900940 if (mmu->features->use_ns_alias_offset)
941 mmu->base += IM_NS_ALIAS_OFFSET;
Laurent Pinchart275f5052014-03-17 01:02:46 +0100942
Magnus Damm5fd16342017-10-16 21:29:46 +0900943 mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX,
944 mmu->features->number_of_contexts);
945
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200946 irq = platform_get_irq(pdev, 0);
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200947
Magnus Dammfd5140e2017-10-16 21:29:36 +0900948 /*
949 * Determine if this IPMMU instance is a root device by checking for
950 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
951 */
952 if (!mmu->features->has_cache_leaf_nodes ||
953 !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
954 mmu->root = mmu;
955 else
956 mmu->root = ipmmu_find_root();
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200957
Magnus Dammfd5140e2017-10-16 21:29:36 +0900958 /*
959 * Wait until the root device has been registered for sure.
960 */
961 if (!mmu->root)
962 return -EPROBE_DEFER;
963
964 /* Root devices have mandatory IRQs */
965 if (ipmmu_is_root(mmu)) {
966 if (irq < 0) {
967 dev_err(&pdev->dev, "no IRQ found\n");
968 return irq;
969 }
970
971 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
972 dev_name(&pdev->dev), mmu);
973 if (ret < 0) {
974 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
975 return ret;
976 }
977
978 ipmmu_device_reset(mmu);
979 }
Laurent Pinchartd25a2a12014-04-02 12:47:37 +0200980
Magnus Dammcda52fc2017-10-16 21:29:57 +0900981 /*
982 * Register the IPMMU to the IOMMU subsystem in the following cases:
983 * - R-Car Gen2 IPMMU (all devices registered)
984 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
985 */
986 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
987 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
988 dev_name(&pdev->dev));
989 if (ret)
990 return ret;
Magnus Damm7af9a5f2017-08-21 14:53:35 +0900991
Magnus Dammcda52fc2017-10-16 21:29:57 +0900992 iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
993 iommu_device_set_fwnode(&mmu->iommu,
994 &pdev->dev.of_node->fwnode);
Magnus Damm01da21e2017-07-17 22:05:10 +0900995
Magnus Dammcda52fc2017-10-16 21:29:57 +0900996 ret = iommu_device_register(&mmu->iommu);
997 if (ret)
998 return ret;
999
1000#if defined(CONFIG_IOMMU_DMA)
1001 if (!iommu_present(&platform_bus_type))
1002 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1003#endif
1004 }
Magnus Damm01da21e2017-07-17 22:05:10 +09001005
Laurent Pinchartd25a2a12014-04-02 12:47:37 +02001006 /*
1007 * We can't create the ARM mapping here as it requires the bus to have
1008 * an IOMMU, which only happens when bus_set_iommu() is called in
1009 * ipmmu_init() after the probe function returns.
1010 */
1011
Laurent Pinchartd25a2a12014-04-02 12:47:37 +02001012 platform_set_drvdata(pdev, mmu);
1013
1014 return 0;
1015}
1016
1017static int ipmmu_remove(struct platform_device *pdev)
1018{
1019 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1020
Magnus Damm7af9a5f2017-08-21 14:53:35 +09001021 iommu_device_sysfs_remove(&mmu->iommu);
Magnus Damm01da21e2017-07-17 22:05:10 +09001022 iommu_device_unregister(&mmu->iommu);
1023
Laurent Pinchartd25a2a12014-04-02 12:47:37 +02001024 arm_iommu_release_mapping(mmu->mapping);
1025
1026 ipmmu_device_reset(mmu);
1027
1028 return 0;
1029}
1030
1031static struct platform_driver ipmmu_driver = {
1032 .driver = {
Laurent Pinchartd25a2a12014-04-02 12:47:37 +02001033 .name = "ipmmu-vmsa",
Laurent Pinchart275f5052014-03-17 01:02:46 +01001034 .of_match_table = of_match_ptr(ipmmu_of_ids),
Laurent Pinchartd25a2a12014-04-02 12:47:37 +02001035 },
1036 .probe = ipmmu_probe,
1037 .remove = ipmmu_remove,
1038};
1039
1040static int __init ipmmu_init(void)
1041{
Magnus Dammcda52fc2017-10-16 21:29:57 +09001042 static bool setup_done;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +02001043 int ret;
1044
Magnus Dammcda52fc2017-10-16 21:29:57 +09001045 if (setup_done)
1046 return 0;
1047
Laurent Pinchartd25a2a12014-04-02 12:47:37 +02001048 ret = platform_driver_register(&ipmmu_driver);
1049 if (ret < 0)
1050 return ret;
1051
Magnus Dammcda52fc2017-10-16 21:29:57 +09001052#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
Laurent Pinchartd25a2a12014-04-02 12:47:37 +02001053 if (!iommu_present(&platform_bus_type))
1054 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
Magnus Dammcda52fc2017-10-16 21:29:57 +09001055#endif
Laurent Pinchartd25a2a12014-04-02 12:47:37 +02001056
Magnus Dammcda52fc2017-10-16 21:29:57 +09001057 setup_done = true;
Laurent Pinchartd25a2a12014-04-02 12:47:37 +02001058 return 0;
1059}
1060
1061static void __exit ipmmu_exit(void)
1062{
1063 return platform_driver_unregister(&ipmmu_driver);
1064}
1065
1066subsys_initcall(ipmmu_init);
1067module_exit(ipmmu_exit);
1068
Magnus Dammcda52fc2017-10-16 21:29:57 +09001069#ifdef CONFIG_IOMMU_DMA
1070static int __init ipmmu_vmsa_iommu_of_setup(struct device_node *np)
1071{
1072 ipmmu_init();
1073 return 0;
1074}
1075
1076IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa",
1077 ipmmu_vmsa_iommu_of_setup);
1078#endif
1079
Laurent Pinchartd25a2a12014-04-02 12:47:37 +02001080MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
1081MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1082MODULE_LICENSE("GPL v2");