blob: 7970d21b9858bf32e054149962eb17e99a0bef2d [file] [log] [blame]
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08007#include <linux/clk.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +08008#include <linux/compiler.h>
9#include <linux/delay.h>
10#include <linux/device.h>
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080011#include <linux/dma-iommu.h>
Joerg Roedel461a6942017-04-26 15:46:20 +020012#include <linux/dma-mapping.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080013#include <linux/errno.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/iommu.h>
Tomasz Figa0416bf62018-03-23 15:38:05 +080017#include <linux/iopoll.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080018#include <linux/list.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/of_platform.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27/** MMU register offsets */
28#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
29#define RK_MMU_STATUS 0x04
30#define RK_MMU_COMMAND 0x08
31#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
32#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
33#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
34#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
35#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
36#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
37#define RK_MMU_AUTO_GATING 0x24
38
39#define DTE_ADDR_DUMMY 0xCAFEBABE
Tomasz Figa0416bf62018-03-23 15:38:05 +080040
41#define RK_MMU_POLL_PERIOD_US 100
42#define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
43#define RK_MMU_POLL_TIMEOUT_US 1000
Daniel Kurtzc68a2922014-11-03 10:53:27 +080044
45/* RK_MMU_STATUS fields */
46#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
47#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
48#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
49#define RK_MMU_STATUS_IDLE BIT(3)
50#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
51#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
52#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
53
54/* RK_MMU_COMMAND command values */
55#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
56#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
57#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
58#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
59#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
60#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
61#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
62
63/* RK_MMU_INT_* register fields */
64#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
65#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
66#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
67
68#define NUM_DT_ENTRIES 1024
69#define NUM_PT_ENTRIES 1024
70
71#define SPAGE_ORDER 12
72#define SPAGE_SIZE (1 << SPAGE_ORDER)
73
74 /*
75 * Support mapping any size that fits in one page table:
76 * 4 KiB to 4 MiB
77 */
78#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
79
Daniel Kurtzc68a2922014-11-03 10:53:27 +080080struct rk_iommu_domain {
81 struct list_head iommus;
82 u32 *dt; /* page directory table */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080083 dma_addr_t dt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080084 spinlock_t iommus_lock; /* lock for iommus list */
85 spinlock_t dt_lock; /* lock for modifying page directory table */
Joerg Roedelbcd516a2015-03-26 13:43:17 +010086
87 struct iommu_domain domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080088};
89
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +080090/* list of clocks required by IOMMU */
91static const char * const rk_iommu_clocks[] = {
92 "aclk", "iface",
93};
94
Daniel Kurtzc68a2922014-11-03 10:53:27 +080095struct rk_iommu {
96 struct device *dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +080097 void __iomem **bases;
98 int num_mmu;
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +080099 struct clk_bulk_data *clocks;
100 int num_clocks;
Simon Xuec3aa4742017-07-24 10:37:15 +0800101 bool reset_disabled;
Joerg Roedelc9d9f232017-03-31 16:26:03 +0200102 struct iommu_device iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800103 struct list_head node; /* entry in rk_iommu_domain.iommus */
104 struct iommu_domain *domain; /* domain to which iommu is attached */
105};
106
Jeffy Chen9176a302018-03-23 15:38:10 +0800107static struct device *dma_dev;
108
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800109static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
110 unsigned int count)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800111{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800112 size_t size = count * sizeof(u32); /* count of u32 entry */
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800113
Jeffy Chen9176a302018-03-23 15:38:10 +0800114 dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800115}
116
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100117static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
118{
119 return container_of(dom, struct rk_iommu_domain, domain);
120}
121
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800122/*
123 * The Rockchip rk3288 iommu uses a 2-level page table.
124 * The first level is the "Directory Table" (DT).
125 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
126 * to a "Page Table".
127 * The second level is the 1024 Page Tables (PT).
128 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
129 * a 4 KB page of physical memory.
130 *
131 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
132 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
133 * address of the start of the DT page.
134 *
135 * The structure of the page table is as follows:
136 *
137 * DT
138 * MMU_DTE_ADDR -> +-----+
139 * | |
140 * +-----+ PT
141 * | DTE | -> +-----+
142 * +-----+ | | Memory
143 * | | +-----+ Page
144 * | | | PTE | -> +-----+
145 * +-----+ +-----+ | |
146 * | | | |
147 * | | | |
148 * +-----+ | |
149 * | |
150 * | |
151 * +-----+
152 */
153
154/*
155 * Each DTE has a PT address and a valid bit:
156 * +---------------------+-----------+-+
157 * | PT address | Reserved |V|
158 * +---------------------+-----------+-+
159 * 31:12 - PT address (PTs always starts on a 4 KB boundary)
160 * 11: 1 - Reserved
161 * 0 - 1 if PT @ PT address is valid
162 */
163#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
164#define RK_DTE_PT_VALID BIT(0)
165
166static inline phys_addr_t rk_dte_pt_address(u32 dte)
167{
168 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
169}
170
171static inline bool rk_dte_is_pt_valid(u32 dte)
172{
173 return dte & RK_DTE_PT_VALID;
174}
175
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800176static inline u32 rk_mk_dte(dma_addr_t pt_dma)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800177{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800178 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800179}
180
181/*
182 * Each PTE has a Page address, some flags and a valid bit:
183 * +---------------------+---+-------+-+
184 * | Page address |Rsv| Flags |V|
185 * +---------------------+---+-------+-+
186 * 31:12 - Page address (Pages always start on a 4 KB boundary)
187 * 11: 9 - Reserved
188 * 8: 1 - Flags
189 * 8 - Read allocate - allocate cache space on read misses
190 * 7 - Read cache - enable cache & prefetch of data
191 * 6 - Write buffer - enable delaying writes on their way to memory
192 * 5 - Write allocate - allocate cache space on write misses
193 * 4 - Write cache - different writes can be merged together
194 * 3 - Override cache attributes
195 * if 1, bits 4-8 control cache attributes
196 * if 0, the system bus defaults are used
197 * 2 - Writable
198 * 1 - Readable
199 * 0 - 1 if Page @ Page address is valid
200 */
201#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
202#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
203#define RK_PTE_PAGE_WRITABLE BIT(2)
204#define RK_PTE_PAGE_READABLE BIT(1)
205#define RK_PTE_PAGE_VALID BIT(0)
206
207static inline phys_addr_t rk_pte_page_address(u32 pte)
208{
209 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
210}
211
212static inline bool rk_pte_is_page_valid(u32 pte)
213{
214 return pte & RK_PTE_PAGE_VALID;
215}
216
217/* TODO: set cache flags per prot IOMMU_CACHE */
218static u32 rk_mk_pte(phys_addr_t page, int prot)
219{
220 u32 flags = 0;
221 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
222 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
223 page &= RK_PTE_PAGE_ADDRESS_MASK;
224 return page | flags | RK_PTE_PAGE_VALID;
225}
226
227static u32 rk_mk_pte_invalid(u32 pte)
228{
229 return pte & ~RK_PTE_PAGE_VALID;
230}
231
232/*
233 * rk3288 iova (IOMMU Virtual Address) format
234 * 31 22.21 12.11 0
235 * +-----------+-----------+-------------+
236 * | DTE index | PTE index | Page offset |
237 * +-----------+-----------+-------------+
238 * 31:22 - DTE index - index of DTE in DT
239 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
240 * 11: 0 - Page offset - offset into page @ PTE.page_address
241 */
242#define RK_IOVA_DTE_MASK 0xffc00000
243#define RK_IOVA_DTE_SHIFT 22
244#define RK_IOVA_PTE_MASK 0x003ff000
245#define RK_IOVA_PTE_SHIFT 12
246#define RK_IOVA_PAGE_MASK 0x00000fff
247#define RK_IOVA_PAGE_SHIFT 0
248
249static u32 rk_iova_dte_index(dma_addr_t iova)
250{
251 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
252}
253
254static u32 rk_iova_pte_index(dma_addr_t iova)
255{
256 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
257}
258
259static u32 rk_iova_page_offset(dma_addr_t iova)
260{
261 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
262}
263
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800264static u32 rk_iommu_read(void __iomem *base, u32 offset)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800265{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800266 return readl(base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800267}
268
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800269static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800270{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800271 writel(value, base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800272}
273
274static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
275{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800276 int i;
277
278 for (i = 0; i < iommu->num_mmu; i++)
279 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800280}
281
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800282static void rk_iommu_base_command(void __iomem *base, u32 command)
283{
284 writel(command, base + RK_MMU_COMMAND);
285}
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800286static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800287 size_t size)
288{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800289 int i;
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800290 dma_addr_t iova_end = iova_start + size;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800291 /*
292 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
293 * entire iotlb rather than iterate over individual iovas.
294 */
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800295 for (i = 0; i < iommu->num_mmu; i++) {
296 dma_addr_t iova;
297
298 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800299 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800300 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800301}
302
303static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
304{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800305 bool active = true;
306 int i;
307
308 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100309 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
310 RK_MMU_STATUS_STALL_ACTIVE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800311
312 return active;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800313}
314
315static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
316{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800317 bool enable = true;
318 int i;
319
320 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100321 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
322 RK_MMU_STATUS_PAGING_ENABLED);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800323
324 return enable;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800325}
326
Tomasz Figa0416bf62018-03-23 15:38:05 +0800327static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
328{
329 bool done = true;
330 int i;
331
332 for (i = 0; i < iommu->num_mmu; i++)
333 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
334
335 return done;
336}
337
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800338static int rk_iommu_enable_stall(struct rk_iommu *iommu)
339{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800340 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800341 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800342
343 if (rk_iommu_is_stall_active(iommu))
344 return 0;
345
346 /* Stall can only be enabled if paging is enabled */
347 if (!rk_iommu_is_paging_enabled(iommu))
348 return 0;
349
350 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
351
Tomasz Figa0416bf62018-03-23 15:38:05 +0800352 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
353 val, RK_MMU_POLL_PERIOD_US,
354 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800355 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800356 for (i = 0; i < iommu->num_mmu; i++)
357 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
358 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800359
360 return ret;
361}
362
363static int rk_iommu_disable_stall(struct rk_iommu *iommu)
364{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800365 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800366 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800367
368 if (!rk_iommu_is_stall_active(iommu))
369 return 0;
370
371 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
372
Tomasz Figa0416bf62018-03-23 15:38:05 +0800373 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
374 !val, RK_MMU_POLL_PERIOD_US,
375 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800376 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800377 for (i = 0; i < iommu->num_mmu; i++)
378 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
379 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800380
381 return ret;
382}
383
384static int rk_iommu_enable_paging(struct rk_iommu *iommu)
385{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800386 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800387 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800388
389 if (rk_iommu_is_paging_enabled(iommu))
390 return 0;
391
392 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
393
Tomasz Figa0416bf62018-03-23 15:38:05 +0800394 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
395 val, RK_MMU_POLL_PERIOD_US,
396 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800397 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800398 for (i = 0; i < iommu->num_mmu; i++)
399 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
400 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800401
402 return ret;
403}
404
405static int rk_iommu_disable_paging(struct rk_iommu *iommu)
406{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800407 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800408 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800409
410 if (!rk_iommu_is_paging_enabled(iommu))
411 return 0;
412
413 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
414
Tomasz Figa0416bf62018-03-23 15:38:05 +0800415 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
416 !val, RK_MMU_POLL_PERIOD_US,
417 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800418 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800419 for (i = 0; i < iommu->num_mmu; i++)
420 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
421 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800422
423 return ret;
424}
425
426static int rk_iommu_force_reset(struct rk_iommu *iommu)
427{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800428 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800429 u32 dte_addr;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800430 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800431
Simon Xuec3aa4742017-07-24 10:37:15 +0800432 if (iommu->reset_disabled)
433 return 0;
434
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800435 /*
436 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
437 * and verifying that upper 5 nybbles are read back.
438 */
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800439 for (i = 0; i < iommu->num_mmu; i++) {
440 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800441
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800442 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
443 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
444 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
445 return -EFAULT;
446 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800447 }
448
449 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
450
Tomasz Figa0416bf62018-03-23 15:38:05 +0800451 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
452 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
453 RK_MMU_POLL_TIMEOUT_US);
454 if (ret) {
455 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
456 return ret;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800457 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800458
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800459 return 0;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800460}
461
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800462static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800463{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800464 void __iomem *base = iommu->bases[index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800465 u32 dte_index, pte_index, page_offset;
466 u32 mmu_dte_addr;
467 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
468 u32 *dte_addr;
469 u32 dte;
470 phys_addr_t pte_addr_phys = 0;
471 u32 *pte_addr = NULL;
472 u32 pte = 0;
473 phys_addr_t page_addr_phys = 0;
474 u32 page_flags = 0;
475
476 dte_index = rk_iova_dte_index(iova);
477 pte_index = rk_iova_pte_index(iova);
478 page_offset = rk_iova_page_offset(iova);
479
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800480 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800481 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
482
483 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
484 dte_addr = phys_to_virt(dte_addr_phys);
485 dte = *dte_addr;
486
487 if (!rk_dte_is_pt_valid(dte))
488 goto print_it;
489
490 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
491 pte_addr = phys_to_virt(pte_addr_phys);
492 pte = *pte_addr;
493
494 if (!rk_pte_is_page_valid(pte))
495 goto print_it;
496
497 page_addr_phys = rk_pte_page_address(pte) + page_offset;
498 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
499
500print_it:
501 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
502 &iova, dte_index, pte_index, page_offset);
503 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
504 &mmu_dte_addr_phys, &dte_addr_phys, dte,
505 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
506 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
507}
508
509static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
510{
511 struct rk_iommu *iommu = dev_id;
512 u32 status;
513 u32 int_status;
514 dma_addr_t iova;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800515 irqreturn_t ret = IRQ_NONE;
516 int i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800517
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800518 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
519
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800520 for (i = 0; i < iommu->num_mmu; i++) {
521 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
522 if (int_status == 0)
523 continue;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800524
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800525 ret = IRQ_HANDLED;
526 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800527
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800528 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
529 int flags;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800530
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800531 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
532 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
533 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800534
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800535 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
536 &iova,
537 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800538
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800539 log_iova(iommu, i, iova);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800540
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800541 /*
542 * Report page fault to any installed handlers.
543 * Ignore the return code, though, since we always zap cache
544 * and clear the page fault anyway.
545 */
546 if (iommu->domain)
547 report_iommu_fault(iommu->domain, iommu->dev, iova,
548 flags);
549 else
550 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800551
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800552 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
553 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
554 }
555
556 if (int_status & RK_MMU_IRQ_BUS_ERROR)
557 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
558
559 if (int_status & ~RK_MMU_IRQ_MASK)
560 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
561 int_status);
562
563 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800564 }
565
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800566 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
567
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800568 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800569}
570
571static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
572 dma_addr_t iova)
573{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100574 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800575 unsigned long flags;
576 phys_addr_t pt_phys, phys = 0;
577 u32 dte, pte;
578 u32 *page_table;
579
580 spin_lock_irqsave(&rk_domain->dt_lock, flags);
581
582 dte = rk_domain->dt[rk_iova_dte_index(iova)];
583 if (!rk_dte_is_pt_valid(dte))
584 goto out;
585
586 pt_phys = rk_dte_pt_address(dte);
587 page_table = (u32 *)phys_to_virt(pt_phys);
588 pte = page_table[rk_iova_pte_index(iova)];
589 if (!rk_pte_is_page_valid(pte))
590 goto out;
591
592 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
593out:
594 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
595
596 return phys;
597}
598
599static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
600 dma_addr_t iova, size_t size)
601{
602 struct list_head *pos;
603 unsigned long flags;
604
605 /* shootdown these iova from all iommus using this domain */
606 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
607 list_for_each(pos, &rk_domain->iommus) {
608 struct rk_iommu *iommu;
609 iommu = list_entry(pos, struct rk_iommu, node);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800610 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800611 rk_iommu_zap_lines(iommu, iova, size);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800612 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800613 }
614 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
615}
616
Tomasz Figad4dd9202015-04-20 20:43:44 +0900617static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
618 dma_addr_t iova, size_t size)
619{
620 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
621 if (size > SPAGE_SIZE)
622 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
623 SPAGE_SIZE);
624}
625
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800626static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
627 dma_addr_t iova)
628{
629 u32 *page_table, *dte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800630 u32 dte_index, dte;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800631 phys_addr_t pt_phys;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800632 dma_addr_t pt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800633
634 assert_spin_locked(&rk_domain->dt_lock);
635
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800636 dte_index = rk_iova_dte_index(iova);
637 dte_addr = &rk_domain->dt[dte_index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800638 dte = *dte_addr;
639 if (rk_dte_is_pt_valid(dte))
640 goto done;
641
642 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
643 if (!page_table)
644 return ERR_PTR(-ENOMEM);
645
Jeffy Chen9176a302018-03-23 15:38:10 +0800646 pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
647 if (dma_mapping_error(dma_dev, pt_dma)) {
648 dev_err(dma_dev, "DMA mapping error while allocating page table\n");
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800649 free_page((unsigned long)page_table);
650 return ERR_PTR(-ENOMEM);
651 }
652
653 dte = rk_mk_dte(pt_dma);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800654 *dte_addr = dte;
655
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800656 rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
657 rk_table_flush(rk_domain,
658 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800659done:
660 pt_phys = rk_dte_pt_address(dte);
661 return (u32 *)phys_to_virt(pt_phys);
662}
663
664static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800665 u32 *pte_addr, dma_addr_t pte_dma,
666 size_t size)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800667{
668 unsigned int pte_count;
669 unsigned int pte_total = size / SPAGE_SIZE;
670
671 assert_spin_locked(&rk_domain->dt_lock);
672
673 for (pte_count = 0; pte_count < pte_total; pte_count++) {
674 u32 pte = pte_addr[pte_count];
675 if (!rk_pte_is_page_valid(pte))
676 break;
677
678 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
679 }
680
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800681 rk_table_flush(rk_domain, pte_dma, pte_count);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800682
683 return pte_count * SPAGE_SIZE;
684}
685
686static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800687 dma_addr_t pte_dma, dma_addr_t iova,
688 phys_addr_t paddr, size_t size, int prot)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800689{
690 unsigned int pte_count;
691 unsigned int pte_total = size / SPAGE_SIZE;
692 phys_addr_t page_phys;
693
694 assert_spin_locked(&rk_domain->dt_lock);
695
696 for (pte_count = 0; pte_count < pte_total; pte_count++) {
697 u32 pte = pte_addr[pte_count];
698
699 if (rk_pte_is_page_valid(pte))
700 goto unwind;
701
702 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
703
704 paddr += SPAGE_SIZE;
705 }
706
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800707 rk_table_flush(rk_domain, pte_dma, pte_total);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800708
Tomasz Figad4dd9202015-04-20 20:43:44 +0900709 /*
710 * Zap the first and last iova to evict from iotlb any previously
711 * mapped cachelines holding stale values for its dte and pte.
712 * We only zap the first and last iova, since only they could have
713 * dte or pte shared with an existing mapping.
714 */
715 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
716
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800717 return 0;
718unwind:
719 /* Unmap the range of iovas that we just mapped */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800720 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
721 pte_count * SPAGE_SIZE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800722
723 iova += pte_count * SPAGE_SIZE;
724 page_phys = rk_pte_page_address(pte_addr[pte_count]);
725 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
726 &iova, &page_phys, &paddr, prot);
727
728 return -EADDRINUSE;
729}
730
731static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
732 phys_addr_t paddr, size_t size, int prot)
733{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100734 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800735 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800736 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800737 u32 *page_table, *pte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800738 u32 dte_index, pte_index;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800739 int ret;
740
741 spin_lock_irqsave(&rk_domain->dt_lock, flags);
742
743 /*
744 * pgsize_bitmap specifies iova sizes that fit in one page table
745 * (1024 4-KiB pages = 4 MiB).
746 * So, size will always be 4096 <= size <= 4194304.
747 * Since iommu_map() guarantees that both iova and size will be
748 * aligned, we will always only be mapping from a single dte here.
749 */
750 page_table = rk_dte_get_page_table(rk_domain, iova);
751 if (IS_ERR(page_table)) {
752 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
753 return PTR_ERR(page_table);
754 }
755
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800756 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
757 pte_index = rk_iova_pte_index(iova);
758 pte_addr = &page_table[pte_index];
759 pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
760 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
761 paddr, size, prot);
762
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800763 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
764
765 return ret;
766}
767
768static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
769 size_t size)
770{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100771 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800772 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800773 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800774 phys_addr_t pt_phys;
775 u32 dte;
776 u32 *pte_addr;
777 size_t unmap_size;
778
779 spin_lock_irqsave(&rk_domain->dt_lock, flags);
780
781 /*
782 * pgsize_bitmap specifies iova sizes that fit in one page table
783 * (1024 4-KiB pages = 4 MiB).
784 * So, size will always be 4096 <= size <= 4194304.
785 * Since iommu_unmap() guarantees that both iova and size will be
786 * aligned, we will always only be unmapping from a single dte here.
787 */
788 dte = rk_domain->dt[rk_iova_dte_index(iova)];
789 /* Just return 0 if iova is unmapped */
790 if (!rk_dte_is_pt_valid(dte)) {
791 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
792 return 0;
793 }
794
795 pt_phys = rk_dte_pt_address(dte);
796 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800797 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
798 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800799
800 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
801
802 /* Shootdown iotlb entries for iova range that was just unmapped */
803 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
804
805 return unmap_size;
806}
807
808static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
809{
810 struct iommu_group *group;
811 struct device *iommu_dev;
812 struct rk_iommu *rk_iommu;
813
814 group = iommu_group_get(dev);
815 if (!group)
816 return NULL;
817 iommu_dev = iommu_group_get_iommudata(group);
818 rk_iommu = dev_get_drvdata(iommu_dev);
819 iommu_group_put(group);
820
821 return rk_iommu;
822}
823
824static int rk_iommu_attach_device(struct iommu_domain *domain,
825 struct device *dev)
826{
827 struct rk_iommu *iommu;
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100828 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800829 unsigned long flags;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800830 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800831
832 /*
833 * Allow 'virtual devices' (e.g., drm) to attach to domain.
834 * Such a device does not belong to an iommu group.
835 */
836 iommu = rk_iommu_from_dev(dev);
837 if (!iommu)
838 return 0;
839
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800840 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800841 if (ret)
842 return ret;
843
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800844 ret = rk_iommu_enable_stall(iommu);
845 if (ret)
846 goto out_disable_clocks;
847
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800848 ret = rk_iommu_force_reset(iommu);
849 if (ret)
Tomasz Figaf6717d72018-03-23 15:38:04 +0800850 goto out_disable_stall;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800851
852 iommu->domain = domain;
853
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800854 for (i = 0; i < iommu->num_mmu; i++) {
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800855 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
856 rk_domain->dt_dma);
John Keepingae8a7912016-06-01 16:46:10 +0100857 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800858 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
859 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800860
861 ret = rk_iommu_enable_paging(iommu);
862 if (ret)
Tomasz Figaf6717d72018-03-23 15:38:04 +0800863 goto out_disable_stall;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800864
865 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
866 list_add_tail(&iommu->node, &rk_domain->iommus);
867 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
868
Heiko Stuebnerec4292d2015-05-21 09:57:29 +0200869 dev_dbg(dev, "Attached to iommu domain\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800870
Tomasz Figaf6717d72018-03-23 15:38:04 +0800871out_disable_stall:
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800872 rk_iommu_disable_stall(iommu);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800873out_disable_clocks:
874 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
Tomasz Figaf6717d72018-03-23 15:38:04 +0800875 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800876}
877
878static void rk_iommu_detach_device(struct iommu_domain *domain,
879 struct device *dev)
880{
881 struct rk_iommu *iommu;
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100882 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800883 unsigned long flags;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800884 int i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800885
886 /* Allow 'virtual devices' (eg drm) to detach from domain */
887 iommu = rk_iommu_from_dev(dev);
888 if (!iommu)
889 return;
890
891 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
892 list_del_init(&iommu->node);
893 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
894
895 /* Ignore error while disabling, just keep going */
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800896 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800897 rk_iommu_enable_stall(iommu);
898 rk_iommu_disable_paging(iommu);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800899 for (i = 0; i < iommu->num_mmu; i++) {
900 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
901 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
902 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800903 rk_iommu_disable_stall(iommu);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800904 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800905
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800906 iommu->domain = NULL;
907
Heiko Stuebnerec4292d2015-05-21 09:57:29 +0200908 dev_dbg(dev, "Detached from iommu domain\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800909}
910
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100911static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800912{
913 struct rk_iommu_domain *rk_domain;
914
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800915 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100916 return NULL;
917
Jeffy Chen9176a302018-03-23 15:38:10 +0800918 if (!dma_dev)
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100919 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800920
Jeffy Chen9176a302018-03-23 15:38:10 +0800921 rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800922 if (!rk_domain)
Jeffy Chen9176a302018-03-23 15:38:10 +0800923 return NULL;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800924
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800925 if (type == IOMMU_DOMAIN_DMA &&
926 iommu_get_dma_cookie(&rk_domain->domain))
Jeffy Chen9176a302018-03-23 15:38:10 +0800927 return NULL;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800928
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800929 /*
930 * rk32xx iommus use a 2 level pagetable.
931 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
932 * Allocate one 4 KiB page for each table.
933 */
934 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
935 if (!rk_domain->dt)
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800936 goto err_put_cookie;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800937
Jeffy Chen9176a302018-03-23 15:38:10 +0800938 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800939 SPAGE_SIZE, DMA_TO_DEVICE);
Jeffy Chen9176a302018-03-23 15:38:10 +0800940 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
941 dev_err(dma_dev, "DMA map error for DT\n");
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800942 goto err_free_dt;
943 }
944
945 rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800946
947 spin_lock_init(&rk_domain->iommus_lock);
948 spin_lock_init(&rk_domain->dt_lock);
949 INIT_LIST_HEAD(&rk_domain->iommus);
950
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800951 rk_domain->domain.geometry.aperture_start = 0;
952 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
953 rk_domain->domain.geometry.force_aperture = true;
954
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100955 return &rk_domain->domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800956
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800957err_free_dt:
958 free_page((unsigned long)rk_domain->dt);
959err_put_cookie:
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800960 if (type == IOMMU_DOMAIN_DMA)
961 iommu_put_dma_cookie(&rk_domain->domain);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800962
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100963 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800964}
965
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100966static void rk_iommu_domain_free(struct iommu_domain *domain)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800967{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100968 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800969 int i;
970
971 WARN_ON(!list_empty(&rk_domain->iommus));
972
973 for (i = 0; i < NUM_DT_ENTRIES; i++) {
974 u32 dte = rk_domain->dt[i];
975 if (rk_dte_is_pt_valid(dte)) {
976 phys_addr_t pt_phys = rk_dte_pt_address(dte);
977 u32 *page_table = phys_to_virt(pt_phys);
Jeffy Chen9176a302018-03-23 15:38:10 +0800978 dma_unmap_single(dma_dev, pt_phys,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800979 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800980 free_page((unsigned long)page_table);
981 }
982 }
983
Jeffy Chen9176a302018-03-23 15:38:10 +0800984 dma_unmap_single(dma_dev, rk_domain->dt_dma,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800985 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800986 free_page((unsigned long)rk_domain->dt);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800987
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800988 if (domain->type == IOMMU_DOMAIN_DMA)
989 iommu_put_dma_cookie(&rk_domain->domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800990}
991
992static bool rk_iommu_is_dev_iommu_master(struct device *dev)
993{
994 struct device_node *np = dev->of_node;
995 int ret;
996
997 /*
998 * An iommu master has an iommus property containing a list of phandles
999 * to iommu nodes, each with an #iommu-cells property with value 0.
1000 */
1001 ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
1002 return (ret > 0);
1003}
1004
1005static int rk_iommu_group_set_iommudata(struct iommu_group *group,
1006 struct device *dev)
1007{
1008 struct device_node *np = dev->of_node;
1009 struct platform_device *pd;
1010 int ret;
1011 struct of_phandle_args args;
1012
1013 /*
1014 * An iommu master has an iommus property containing a list of phandles
1015 * to iommu nodes, each with an #iommu-cells property with value 0.
1016 */
1017 ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
1018 &args);
1019 if (ret) {
Rob Herring6bd4f1c2017-07-18 16:43:09 -05001020 dev_err(dev, "of_parse_phandle_with_args(%pOF) => %d\n",
1021 np, ret);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001022 return ret;
1023 }
1024 if (args.args_count != 0) {
Rob Herring6bd4f1c2017-07-18 16:43:09 -05001025 dev_err(dev, "incorrect number of iommu params found for %pOF (found %d, expected 0)\n",
1026 args.np, args.args_count);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001027 return -EINVAL;
1028 }
1029
1030 pd = of_find_device_by_node(args.np);
1031 of_node_put(args.np);
1032 if (!pd) {
Rob Herring6bd4f1c2017-07-18 16:43:09 -05001033 dev_err(dev, "iommu %pOF not found\n", args.np);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001034 return -EPROBE_DEFER;
1035 }
1036
1037 /* TODO(djkurtz): handle multiple slave iommus for a single master */
1038 iommu_group_set_iommudata(group, &pd->dev, NULL);
1039
1040 return 0;
1041}
1042
1043static int rk_iommu_add_device(struct device *dev)
1044{
1045 struct iommu_group *group;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001046 struct rk_iommu *iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001047 int ret;
1048
1049 if (!rk_iommu_is_dev_iommu_master(dev))
1050 return -ENODEV;
1051
1052 group = iommu_group_get(dev);
1053 if (!group) {
1054 group = iommu_group_alloc();
1055 if (IS_ERR(group)) {
1056 dev_err(dev, "Failed to allocate IOMMU group\n");
1057 return PTR_ERR(group);
1058 }
1059 }
1060
1061 ret = iommu_group_add_device(group, dev);
1062 if (ret)
1063 goto err_put_group;
1064
1065 ret = rk_iommu_group_set_iommudata(group, dev);
1066 if (ret)
1067 goto err_remove_device;
1068
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001069 iommu = rk_iommu_from_dev(dev);
1070 if (iommu)
1071 iommu_device_link(&iommu->iommu, dev);
1072
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001073 iommu_group_put(group);
1074
1075 return 0;
1076
1077err_remove_device:
1078 iommu_group_remove_device(dev);
1079err_put_group:
1080 iommu_group_put(group);
1081 return ret;
1082}
1083
1084static void rk_iommu_remove_device(struct device *dev)
1085{
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001086 struct rk_iommu *iommu;
1087
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001088 if (!rk_iommu_is_dev_iommu_master(dev))
1089 return;
1090
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001091 iommu = rk_iommu_from_dev(dev);
1092 if (iommu)
1093 iommu_device_unlink(&iommu->iommu, dev);
1094
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001095 iommu_group_remove_device(dev);
1096}
1097
1098static const struct iommu_ops rk_iommu_ops = {
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001099 .domain_alloc = rk_iommu_domain_alloc,
1100 .domain_free = rk_iommu_domain_free,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001101 .attach_dev = rk_iommu_attach_device,
1102 .detach_dev = rk_iommu_detach_device,
1103 .map = rk_iommu_map,
1104 .unmap = rk_iommu_unmap,
Simon Xuee6d0f472016-06-24 10:13:27 +08001105 .map_sg = default_iommu_map_sg,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001106 .add_device = rk_iommu_add_device,
1107 .remove_device = rk_iommu_remove_device,
1108 .iova_to_phys = rk_iommu_iova_to_phys,
1109 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1110};
1111
1112static int rk_iommu_probe(struct platform_device *pdev)
1113{
1114 struct device *dev = &pdev->dev;
1115 struct rk_iommu *iommu;
1116 struct resource *res;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001117 int num_res = pdev->num_resources;
Jeffy Chend0b912b2018-03-23 15:38:03 +08001118 int err, i, irq;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001119
1120 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1121 if (!iommu)
1122 return -ENOMEM;
1123
1124 platform_set_drvdata(pdev, iommu);
1125 iommu->dev = dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001126 iommu->num_mmu = 0;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001127
1128 iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001129 GFP_KERNEL);
1130 if (!iommu->bases)
1131 return -ENOMEM;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001132
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001133 for (i = 0; i < num_res; i++) {
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001134 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
Tomeu Vizoso8d7f2d82016-03-21 12:00:23 +01001135 if (!res)
1136 continue;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001137 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1138 if (IS_ERR(iommu->bases[i]))
1139 continue;
1140 iommu->num_mmu++;
1141 }
1142 if (iommu->num_mmu == 0)
1143 return PTR_ERR(iommu->bases[0]);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001144
Jeffy Chend0b912b2018-03-23 15:38:03 +08001145 i = 0;
1146 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
1147 if (irq < 0)
1148 return irq;
Simon Xue03f732f2017-07-24 10:37:14 +08001149
Jeffy Chend0b912b2018-03-23 15:38:03 +08001150 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1151 IRQF_SHARED, dev_name(dev), iommu);
1152 if (err)
1153 return err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001154 }
1155
Simon Xuec3aa4742017-07-24 10:37:15 +08001156 iommu->reset_disabled = device_property_read_bool(dev,
1157 "rockchip,disable-mmu-reset");
1158
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001159 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1160 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1161 sizeof(*iommu->clocks), GFP_KERNEL);
1162 if (!iommu->clocks)
1163 return -ENOMEM;
1164
1165 for (i = 0; i < iommu->num_clocks; ++i)
1166 iommu->clocks[i].id = rk_iommu_clocks[i];
1167
1168 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001169 if (err)
1170 return err;
1171
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001172 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1173 if (err)
1174 return err;
1175
1176 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1177 if (err)
1178 goto err_unprepare_clocks;
1179
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001180 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1181 err = iommu_device_register(&iommu->iommu);
Jeffy Chen6d9ffaa2018-03-23 15:38:02 +08001182 if (err)
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001183 goto err_remove_sysfs;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001184
Jeffy Chen9176a302018-03-23 15:38:10 +08001185 /*
1186 * Use the first registered IOMMU device for domain to use with DMA
1187 * API, since a domain might not physically correspond to a single
1188 * IOMMU device..
1189 */
1190 if (!dma_dev)
1191 dma_dev = &pdev->dev;
1192
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001193 return 0;
1194err_remove_sysfs:
1195 iommu_device_sysfs_remove(&iommu->iommu);
1196err_unprepare_clocks:
1197 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001198 return err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001199}
1200
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001201static void rk_iommu_shutdown(struct platform_device *pdev)
1202{
1203 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1204
1205 /*
1206 * Be careful not to try to shutdown an otherwise unused
1207 * IOMMU, as it is likely not to be clocked, and accessing it
1208 * would just block. An IOMMU without a domain is likely to be
1209 * unused, so let's use this as a (weak) guard.
1210 */
1211 if (iommu && iommu->domain) {
1212 rk_iommu_enable_stall(iommu);
1213 rk_iommu_disable_paging(iommu);
1214 rk_iommu_force_reset(iommu);
1215 }
1216}
1217
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001218static const struct of_device_id rk_iommu_dt_ids[] = {
1219 { .compatible = "rockchip,iommu" },
1220 { /* sentinel */ }
1221};
1222MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001223
1224static struct platform_driver rk_iommu_driver = {
1225 .probe = rk_iommu_probe,
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001226 .shutdown = rk_iommu_shutdown,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001227 .driver = {
1228 .name = "rk_iommu",
Arnd Bergmannd9e7eb12015-04-10 23:58:24 +02001229 .of_match_table = rk_iommu_dt_ids,
Jeffy Chen98b72b92018-03-23 15:38:01 +08001230 .suppress_bind_attrs = true,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001231 },
1232};
1233
1234static int __init rk_iommu_init(void)
1235{
Thierry Reding425061b2015-02-06 11:44:07 +01001236 struct device_node *np;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001237 int ret;
1238
Thierry Reding425061b2015-02-06 11:44:07 +01001239 np = of_find_matching_node(NULL, rk_iommu_dt_ids);
1240 if (!np)
1241 return 0;
1242
1243 of_node_put(np);
1244
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001245 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1246 if (ret)
1247 return ret;
1248
Jeffy Chen9176a302018-03-23 15:38:10 +08001249 return platform_driver_register(&rk_iommu_driver);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001250}
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001251subsys_initcall(rk_iommu_init);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001252
1253MODULE_DESCRIPTION("IOMMU API for Rockchip");
1254MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1255MODULE_ALIAS("platform:rockchip-iommu");
1256MODULE_LICENSE("GPL v2");