blob: bd8580b897e9532533edc88f8389edcb4c939639 [file] [log] [blame]
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08007#include <linux/clk.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +08008#include <linux/compiler.h>
9#include <linux/delay.h>
10#include <linux/device.h>
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080011#include <linux/dma-iommu.h>
Joerg Roedel461a6942017-04-26 15:46:20 +020012#include <linux/dma-mapping.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080013#include <linux/errno.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/iommu.h>
Tomasz Figa0416bf62018-03-23 15:38:05 +080017#include <linux/iopoll.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080018#include <linux/list.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/of.h>
Jeffy Chen5fd577c2018-03-23 15:38:11 +080022#include <linux/of_iommu.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080023#include <linux/of_platform.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26#include <linux/spinlock.h>
27
28/** MMU register offsets */
29#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
30#define RK_MMU_STATUS 0x04
31#define RK_MMU_COMMAND 0x08
32#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
33#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
34#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
35#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
36#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
37#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
38#define RK_MMU_AUTO_GATING 0x24
39
40#define DTE_ADDR_DUMMY 0xCAFEBABE
Tomasz Figa0416bf62018-03-23 15:38:05 +080041
42#define RK_MMU_POLL_PERIOD_US 100
43#define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
44#define RK_MMU_POLL_TIMEOUT_US 1000
Daniel Kurtzc68a2922014-11-03 10:53:27 +080045
46/* RK_MMU_STATUS fields */
47#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
48#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
49#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
50#define RK_MMU_STATUS_IDLE BIT(3)
51#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
52#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
53#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
54
55/* RK_MMU_COMMAND command values */
56#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
57#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
58#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
59#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
60#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
61#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
62#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
63
64/* RK_MMU_INT_* register fields */
65#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
66#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
67#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
68
69#define NUM_DT_ENTRIES 1024
70#define NUM_PT_ENTRIES 1024
71
72#define SPAGE_ORDER 12
73#define SPAGE_SIZE (1 << SPAGE_ORDER)
74
75 /*
76 * Support mapping any size that fits in one page table:
77 * 4 KiB to 4 MiB
78 */
79#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
80
Daniel Kurtzc68a2922014-11-03 10:53:27 +080081struct rk_iommu_domain {
82 struct list_head iommus;
83 u32 *dt; /* page directory table */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080084 dma_addr_t dt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080085 spinlock_t iommus_lock; /* lock for iommus list */
86 spinlock_t dt_lock; /* lock for modifying page directory table */
Joerg Roedelbcd516a2015-03-26 13:43:17 +010087
88 struct iommu_domain domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080089};
90
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +080091/* list of clocks required by IOMMU */
92static const char * const rk_iommu_clocks[] = {
93 "aclk", "iface",
94};
95
Daniel Kurtzc68a2922014-11-03 10:53:27 +080096struct rk_iommu {
97 struct device *dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +080098 void __iomem **bases;
99 int num_mmu;
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800100 struct clk_bulk_data *clocks;
101 int num_clocks;
Simon Xuec3aa4742017-07-24 10:37:15 +0800102 bool reset_disabled;
Joerg Roedelc9d9f232017-03-31 16:26:03 +0200103 struct iommu_device iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800104 struct list_head node; /* entry in rk_iommu_domain.iommus */
105 struct iommu_domain *domain; /* domain to which iommu is attached */
106};
107
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800108struct rk_iommudata {
109 struct rk_iommu *iommu;
110};
111
Jeffy Chen9176a302018-03-23 15:38:10 +0800112static struct device *dma_dev;
113
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800114static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
115 unsigned int count)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800116{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800117 size_t size = count * sizeof(u32); /* count of u32 entry */
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800118
Jeffy Chen9176a302018-03-23 15:38:10 +0800119 dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800120}
121
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100122static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
123{
124 return container_of(dom, struct rk_iommu_domain, domain);
125}
126
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800127/*
128 * The Rockchip rk3288 iommu uses a 2-level page table.
129 * The first level is the "Directory Table" (DT).
130 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
131 * to a "Page Table".
132 * The second level is the 1024 Page Tables (PT).
133 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
134 * a 4 KB page of physical memory.
135 *
136 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
137 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
138 * address of the start of the DT page.
139 *
140 * The structure of the page table is as follows:
141 *
142 * DT
143 * MMU_DTE_ADDR -> +-----+
144 * | |
145 * +-----+ PT
146 * | DTE | -> +-----+
147 * +-----+ | | Memory
148 * | | +-----+ Page
149 * | | | PTE | -> +-----+
150 * +-----+ +-----+ | |
151 * | | | |
152 * | | | |
153 * +-----+ | |
154 * | |
155 * | |
156 * +-----+
157 */
158
159/*
160 * Each DTE has a PT address and a valid bit:
161 * +---------------------+-----------+-+
162 * | PT address | Reserved |V|
163 * +---------------------+-----------+-+
164 * 31:12 - PT address (PTs always starts on a 4 KB boundary)
165 * 11: 1 - Reserved
166 * 0 - 1 if PT @ PT address is valid
167 */
168#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
169#define RK_DTE_PT_VALID BIT(0)
170
171static inline phys_addr_t rk_dte_pt_address(u32 dte)
172{
173 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
174}
175
176static inline bool rk_dte_is_pt_valid(u32 dte)
177{
178 return dte & RK_DTE_PT_VALID;
179}
180
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800181static inline u32 rk_mk_dte(dma_addr_t pt_dma)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800182{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800183 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800184}
185
186/*
187 * Each PTE has a Page address, some flags and a valid bit:
188 * +---------------------+---+-------+-+
189 * | Page address |Rsv| Flags |V|
190 * +---------------------+---+-------+-+
191 * 31:12 - Page address (Pages always start on a 4 KB boundary)
192 * 11: 9 - Reserved
193 * 8: 1 - Flags
194 * 8 - Read allocate - allocate cache space on read misses
195 * 7 - Read cache - enable cache & prefetch of data
196 * 6 - Write buffer - enable delaying writes on their way to memory
197 * 5 - Write allocate - allocate cache space on write misses
198 * 4 - Write cache - different writes can be merged together
199 * 3 - Override cache attributes
200 * if 1, bits 4-8 control cache attributes
201 * if 0, the system bus defaults are used
202 * 2 - Writable
203 * 1 - Readable
204 * 0 - 1 if Page @ Page address is valid
205 */
206#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
207#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
208#define RK_PTE_PAGE_WRITABLE BIT(2)
209#define RK_PTE_PAGE_READABLE BIT(1)
210#define RK_PTE_PAGE_VALID BIT(0)
211
212static inline phys_addr_t rk_pte_page_address(u32 pte)
213{
214 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
215}
216
217static inline bool rk_pte_is_page_valid(u32 pte)
218{
219 return pte & RK_PTE_PAGE_VALID;
220}
221
222/* TODO: set cache flags per prot IOMMU_CACHE */
223static u32 rk_mk_pte(phys_addr_t page, int prot)
224{
225 u32 flags = 0;
226 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
227 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
228 page &= RK_PTE_PAGE_ADDRESS_MASK;
229 return page | flags | RK_PTE_PAGE_VALID;
230}
231
232static u32 rk_mk_pte_invalid(u32 pte)
233{
234 return pte & ~RK_PTE_PAGE_VALID;
235}
236
237/*
238 * rk3288 iova (IOMMU Virtual Address) format
239 * 31 22.21 12.11 0
240 * +-----------+-----------+-------------+
241 * | DTE index | PTE index | Page offset |
242 * +-----------+-----------+-------------+
243 * 31:22 - DTE index - index of DTE in DT
244 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
245 * 11: 0 - Page offset - offset into page @ PTE.page_address
246 */
247#define RK_IOVA_DTE_MASK 0xffc00000
248#define RK_IOVA_DTE_SHIFT 22
249#define RK_IOVA_PTE_MASK 0x003ff000
250#define RK_IOVA_PTE_SHIFT 12
251#define RK_IOVA_PAGE_MASK 0x00000fff
252#define RK_IOVA_PAGE_SHIFT 0
253
254static u32 rk_iova_dte_index(dma_addr_t iova)
255{
256 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
257}
258
259static u32 rk_iova_pte_index(dma_addr_t iova)
260{
261 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
262}
263
264static u32 rk_iova_page_offset(dma_addr_t iova)
265{
266 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
267}
268
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800269static u32 rk_iommu_read(void __iomem *base, u32 offset)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800270{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800271 return readl(base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800272}
273
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800274static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800275{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800276 writel(value, base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800277}
278
279static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
280{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800281 int i;
282
283 for (i = 0; i < iommu->num_mmu; i++)
284 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800285}
286
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800287static void rk_iommu_base_command(void __iomem *base, u32 command)
288{
289 writel(command, base + RK_MMU_COMMAND);
290}
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800291static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800292 size_t size)
293{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800294 int i;
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800295 dma_addr_t iova_end = iova_start + size;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800296 /*
297 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
298 * entire iotlb rather than iterate over individual iovas.
299 */
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800300 for (i = 0; i < iommu->num_mmu; i++) {
301 dma_addr_t iova;
302
303 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800304 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800305 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800306}
307
308static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
309{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800310 bool active = true;
311 int i;
312
313 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100314 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
315 RK_MMU_STATUS_STALL_ACTIVE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800316
317 return active;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800318}
319
320static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
321{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800322 bool enable = true;
323 int i;
324
325 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100326 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
327 RK_MMU_STATUS_PAGING_ENABLED);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800328
329 return enable;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800330}
331
Tomasz Figa0416bf62018-03-23 15:38:05 +0800332static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
333{
334 bool done = true;
335 int i;
336
337 for (i = 0; i < iommu->num_mmu; i++)
338 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
339
340 return done;
341}
342
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800343static int rk_iommu_enable_stall(struct rk_iommu *iommu)
344{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800345 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800346 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800347
348 if (rk_iommu_is_stall_active(iommu))
349 return 0;
350
351 /* Stall can only be enabled if paging is enabled */
352 if (!rk_iommu_is_paging_enabled(iommu))
353 return 0;
354
355 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
356
Tomasz Figa0416bf62018-03-23 15:38:05 +0800357 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
358 val, RK_MMU_POLL_PERIOD_US,
359 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800360 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800361 for (i = 0; i < iommu->num_mmu; i++)
362 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
363 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800364
365 return ret;
366}
367
368static int rk_iommu_disable_stall(struct rk_iommu *iommu)
369{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800370 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800371 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800372
373 if (!rk_iommu_is_stall_active(iommu))
374 return 0;
375
376 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
377
Tomasz Figa0416bf62018-03-23 15:38:05 +0800378 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
379 !val, RK_MMU_POLL_PERIOD_US,
380 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800381 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800382 for (i = 0; i < iommu->num_mmu; i++)
383 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
384 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800385
386 return ret;
387}
388
389static int rk_iommu_enable_paging(struct rk_iommu *iommu)
390{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800391 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800392 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800393
394 if (rk_iommu_is_paging_enabled(iommu))
395 return 0;
396
397 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
398
Tomasz Figa0416bf62018-03-23 15:38:05 +0800399 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
400 val, RK_MMU_POLL_PERIOD_US,
401 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800402 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800403 for (i = 0; i < iommu->num_mmu; i++)
404 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
405 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800406
407 return ret;
408}
409
410static int rk_iommu_disable_paging(struct rk_iommu *iommu)
411{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800412 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800413 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800414
415 if (!rk_iommu_is_paging_enabled(iommu))
416 return 0;
417
418 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
419
Tomasz Figa0416bf62018-03-23 15:38:05 +0800420 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
421 !val, RK_MMU_POLL_PERIOD_US,
422 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800423 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800424 for (i = 0; i < iommu->num_mmu; i++)
425 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
426 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800427
428 return ret;
429}
430
431static int rk_iommu_force_reset(struct rk_iommu *iommu)
432{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800433 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800434 u32 dte_addr;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800435 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800436
Simon Xuec3aa4742017-07-24 10:37:15 +0800437 if (iommu->reset_disabled)
438 return 0;
439
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800440 /*
441 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
442 * and verifying that upper 5 nybbles are read back.
443 */
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800444 for (i = 0; i < iommu->num_mmu; i++) {
445 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800446
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800447 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
448 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
449 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
450 return -EFAULT;
451 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800452 }
453
454 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
455
Tomasz Figa0416bf62018-03-23 15:38:05 +0800456 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
457 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
458 RK_MMU_POLL_TIMEOUT_US);
459 if (ret) {
460 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
461 return ret;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800462 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800463
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800464 return 0;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800465}
466
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800467static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800468{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800469 void __iomem *base = iommu->bases[index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800470 u32 dte_index, pte_index, page_offset;
471 u32 mmu_dte_addr;
472 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
473 u32 *dte_addr;
474 u32 dte;
475 phys_addr_t pte_addr_phys = 0;
476 u32 *pte_addr = NULL;
477 u32 pte = 0;
478 phys_addr_t page_addr_phys = 0;
479 u32 page_flags = 0;
480
481 dte_index = rk_iova_dte_index(iova);
482 pte_index = rk_iova_pte_index(iova);
483 page_offset = rk_iova_page_offset(iova);
484
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800485 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800486 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
487
488 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
489 dte_addr = phys_to_virt(dte_addr_phys);
490 dte = *dte_addr;
491
492 if (!rk_dte_is_pt_valid(dte))
493 goto print_it;
494
495 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
496 pte_addr = phys_to_virt(pte_addr_phys);
497 pte = *pte_addr;
498
499 if (!rk_pte_is_page_valid(pte))
500 goto print_it;
501
502 page_addr_phys = rk_pte_page_address(pte) + page_offset;
503 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
504
505print_it:
506 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
507 &iova, dte_index, pte_index, page_offset);
508 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
509 &mmu_dte_addr_phys, &dte_addr_phys, dte,
510 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
511 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
512}
513
514static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
515{
516 struct rk_iommu *iommu = dev_id;
517 u32 status;
518 u32 int_status;
519 dma_addr_t iova;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800520 irqreturn_t ret = IRQ_NONE;
521 int i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800522
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800523 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
524
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800525 for (i = 0; i < iommu->num_mmu; i++) {
526 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
527 if (int_status == 0)
528 continue;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800529
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800530 ret = IRQ_HANDLED;
531 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800532
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800533 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
534 int flags;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800535
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800536 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
537 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
538 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800539
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800540 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
541 &iova,
542 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800543
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800544 log_iova(iommu, i, iova);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800545
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800546 /*
547 * Report page fault to any installed handlers.
548 * Ignore the return code, though, since we always zap cache
549 * and clear the page fault anyway.
550 */
551 if (iommu->domain)
552 report_iommu_fault(iommu->domain, iommu->dev, iova,
553 flags);
554 else
555 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800556
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800557 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
558 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
559 }
560
561 if (int_status & RK_MMU_IRQ_BUS_ERROR)
562 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
563
564 if (int_status & ~RK_MMU_IRQ_MASK)
565 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
566 int_status);
567
568 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800569 }
570
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800571 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
572
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800573 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800574}
575
576static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
577 dma_addr_t iova)
578{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100579 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800580 unsigned long flags;
581 phys_addr_t pt_phys, phys = 0;
582 u32 dte, pte;
583 u32 *page_table;
584
585 spin_lock_irqsave(&rk_domain->dt_lock, flags);
586
587 dte = rk_domain->dt[rk_iova_dte_index(iova)];
588 if (!rk_dte_is_pt_valid(dte))
589 goto out;
590
591 pt_phys = rk_dte_pt_address(dte);
592 page_table = (u32 *)phys_to_virt(pt_phys);
593 pte = page_table[rk_iova_pte_index(iova)];
594 if (!rk_pte_is_page_valid(pte))
595 goto out;
596
597 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
598out:
599 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
600
601 return phys;
602}
603
604static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
605 dma_addr_t iova, size_t size)
606{
607 struct list_head *pos;
608 unsigned long flags;
609
610 /* shootdown these iova from all iommus using this domain */
611 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
612 list_for_each(pos, &rk_domain->iommus) {
613 struct rk_iommu *iommu;
614 iommu = list_entry(pos, struct rk_iommu, node);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800615 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800616 rk_iommu_zap_lines(iommu, iova, size);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800617 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800618 }
619 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
620}
621
Tomasz Figad4dd9202015-04-20 20:43:44 +0900622static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
623 dma_addr_t iova, size_t size)
624{
625 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
626 if (size > SPAGE_SIZE)
627 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
628 SPAGE_SIZE);
629}
630
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800631static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
632 dma_addr_t iova)
633{
634 u32 *page_table, *dte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800635 u32 dte_index, dte;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800636 phys_addr_t pt_phys;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800637 dma_addr_t pt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800638
639 assert_spin_locked(&rk_domain->dt_lock);
640
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800641 dte_index = rk_iova_dte_index(iova);
642 dte_addr = &rk_domain->dt[dte_index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800643 dte = *dte_addr;
644 if (rk_dte_is_pt_valid(dte))
645 goto done;
646
647 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
648 if (!page_table)
649 return ERR_PTR(-ENOMEM);
650
Jeffy Chen9176a302018-03-23 15:38:10 +0800651 pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
652 if (dma_mapping_error(dma_dev, pt_dma)) {
653 dev_err(dma_dev, "DMA mapping error while allocating page table\n");
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800654 free_page((unsigned long)page_table);
655 return ERR_PTR(-ENOMEM);
656 }
657
658 dte = rk_mk_dte(pt_dma);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800659 *dte_addr = dte;
660
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800661 rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
662 rk_table_flush(rk_domain,
663 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800664done:
665 pt_phys = rk_dte_pt_address(dte);
666 return (u32 *)phys_to_virt(pt_phys);
667}
668
669static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800670 u32 *pte_addr, dma_addr_t pte_dma,
671 size_t size)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800672{
673 unsigned int pte_count;
674 unsigned int pte_total = size / SPAGE_SIZE;
675
676 assert_spin_locked(&rk_domain->dt_lock);
677
678 for (pte_count = 0; pte_count < pte_total; pte_count++) {
679 u32 pte = pte_addr[pte_count];
680 if (!rk_pte_is_page_valid(pte))
681 break;
682
683 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
684 }
685
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800686 rk_table_flush(rk_domain, pte_dma, pte_count);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800687
688 return pte_count * SPAGE_SIZE;
689}
690
691static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800692 dma_addr_t pte_dma, dma_addr_t iova,
693 phys_addr_t paddr, size_t size, int prot)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800694{
695 unsigned int pte_count;
696 unsigned int pte_total = size / SPAGE_SIZE;
697 phys_addr_t page_phys;
698
699 assert_spin_locked(&rk_domain->dt_lock);
700
701 for (pte_count = 0; pte_count < pte_total; pte_count++) {
702 u32 pte = pte_addr[pte_count];
703
704 if (rk_pte_is_page_valid(pte))
705 goto unwind;
706
707 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
708
709 paddr += SPAGE_SIZE;
710 }
711
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800712 rk_table_flush(rk_domain, pte_dma, pte_total);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800713
Tomasz Figad4dd9202015-04-20 20:43:44 +0900714 /*
715 * Zap the first and last iova to evict from iotlb any previously
716 * mapped cachelines holding stale values for its dte and pte.
717 * We only zap the first and last iova, since only they could have
718 * dte or pte shared with an existing mapping.
719 */
720 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
721
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800722 return 0;
723unwind:
724 /* Unmap the range of iovas that we just mapped */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800725 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
726 pte_count * SPAGE_SIZE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800727
728 iova += pte_count * SPAGE_SIZE;
729 page_phys = rk_pte_page_address(pte_addr[pte_count]);
730 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
731 &iova, &page_phys, &paddr, prot);
732
733 return -EADDRINUSE;
734}
735
736static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
737 phys_addr_t paddr, size_t size, int prot)
738{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100739 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800740 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800741 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800742 u32 *page_table, *pte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800743 u32 dte_index, pte_index;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800744 int ret;
745
746 spin_lock_irqsave(&rk_domain->dt_lock, flags);
747
748 /*
749 * pgsize_bitmap specifies iova sizes that fit in one page table
750 * (1024 4-KiB pages = 4 MiB).
751 * So, size will always be 4096 <= size <= 4194304.
752 * Since iommu_map() guarantees that both iova and size will be
753 * aligned, we will always only be mapping from a single dte here.
754 */
755 page_table = rk_dte_get_page_table(rk_domain, iova);
756 if (IS_ERR(page_table)) {
757 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
758 return PTR_ERR(page_table);
759 }
760
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800761 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
762 pte_index = rk_iova_pte_index(iova);
763 pte_addr = &page_table[pte_index];
764 pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
765 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
766 paddr, size, prot);
767
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800768 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
769
770 return ret;
771}
772
773static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
774 size_t size)
775{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100776 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800777 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800778 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800779 phys_addr_t pt_phys;
780 u32 dte;
781 u32 *pte_addr;
782 size_t unmap_size;
783
784 spin_lock_irqsave(&rk_domain->dt_lock, flags);
785
786 /*
787 * pgsize_bitmap specifies iova sizes that fit in one page table
788 * (1024 4-KiB pages = 4 MiB).
789 * So, size will always be 4096 <= size <= 4194304.
790 * Since iommu_unmap() guarantees that both iova and size will be
791 * aligned, we will always only be unmapping from a single dte here.
792 */
793 dte = rk_domain->dt[rk_iova_dte_index(iova)];
794 /* Just return 0 if iova is unmapped */
795 if (!rk_dte_is_pt_valid(dte)) {
796 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
797 return 0;
798 }
799
800 pt_phys = rk_dte_pt_address(dte);
801 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800802 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
803 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800804
805 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
806
807 /* Shootdown iotlb entries for iova range that was just unmapped */
808 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
809
810 return unmap_size;
811}
812
813static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
814{
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800815 struct rk_iommudata *data = dev->archdata.iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800816
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800817 return data ? data->iommu : NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800818}
819
820static int rk_iommu_attach_device(struct iommu_domain *domain,
821 struct device *dev)
822{
823 struct rk_iommu *iommu;
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100824 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800825 unsigned long flags;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800826 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800827
828 /*
829 * Allow 'virtual devices' (e.g., drm) to attach to domain.
830 * Such a device does not belong to an iommu group.
831 */
832 iommu = rk_iommu_from_dev(dev);
833 if (!iommu)
834 return 0;
835
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800836 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800837 if (ret)
838 return ret;
839
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800840 ret = rk_iommu_enable_stall(iommu);
841 if (ret)
842 goto out_disable_clocks;
843
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800844 ret = rk_iommu_force_reset(iommu);
845 if (ret)
Tomasz Figaf6717d72018-03-23 15:38:04 +0800846 goto out_disable_stall;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800847
848 iommu->domain = domain;
849
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800850 for (i = 0; i < iommu->num_mmu; i++) {
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800851 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
852 rk_domain->dt_dma);
John Keepingae8a7912016-06-01 16:46:10 +0100853 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800854 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
855 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800856
857 ret = rk_iommu_enable_paging(iommu);
858 if (ret)
Tomasz Figaf6717d72018-03-23 15:38:04 +0800859 goto out_disable_stall;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800860
861 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
862 list_add_tail(&iommu->node, &rk_domain->iommus);
863 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
864
Heiko Stuebnerec4292d2015-05-21 09:57:29 +0200865 dev_dbg(dev, "Attached to iommu domain\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800866
Tomasz Figaf6717d72018-03-23 15:38:04 +0800867out_disable_stall:
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800868 rk_iommu_disable_stall(iommu);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800869out_disable_clocks:
870 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
Tomasz Figaf6717d72018-03-23 15:38:04 +0800871 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800872}
873
874static void rk_iommu_detach_device(struct iommu_domain *domain,
875 struct device *dev)
876{
877 struct rk_iommu *iommu;
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100878 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800879 unsigned long flags;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800880 int i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800881
882 /* Allow 'virtual devices' (eg drm) to detach from domain */
883 iommu = rk_iommu_from_dev(dev);
884 if (!iommu)
885 return;
886
887 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
888 list_del_init(&iommu->node);
889 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
890
891 /* Ignore error while disabling, just keep going */
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800892 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800893 rk_iommu_enable_stall(iommu);
894 rk_iommu_disable_paging(iommu);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800895 for (i = 0; i < iommu->num_mmu; i++) {
896 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
897 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
898 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800899 rk_iommu_disable_stall(iommu);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800900 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800901
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800902 iommu->domain = NULL;
903
Heiko Stuebnerec4292d2015-05-21 09:57:29 +0200904 dev_dbg(dev, "Detached from iommu domain\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800905}
906
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100907static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800908{
909 struct rk_iommu_domain *rk_domain;
910
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800911 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100912 return NULL;
913
Jeffy Chen9176a302018-03-23 15:38:10 +0800914 if (!dma_dev)
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100915 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800916
Jeffy Chen9176a302018-03-23 15:38:10 +0800917 rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800918 if (!rk_domain)
Jeffy Chen9176a302018-03-23 15:38:10 +0800919 return NULL;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800920
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800921 if (type == IOMMU_DOMAIN_DMA &&
922 iommu_get_dma_cookie(&rk_domain->domain))
Jeffy Chen9176a302018-03-23 15:38:10 +0800923 return NULL;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800924
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800925 /*
926 * rk32xx iommus use a 2 level pagetable.
927 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
928 * Allocate one 4 KiB page for each table.
929 */
930 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
931 if (!rk_domain->dt)
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800932 goto err_put_cookie;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800933
Jeffy Chen9176a302018-03-23 15:38:10 +0800934 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800935 SPAGE_SIZE, DMA_TO_DEVICE);
Jeffy Chen9176a302018-03-23 15:38:10 +0800936 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
937 dev_err(dma_dev, "DMA map error for DT\n");
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800938 goto err_free_dt;
939 }
940
941 rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800942
943 spin_lock_init(&rk_domain->iommus_lock);
944 spin_lock_init(&rk_domain->dt_lock);
945 INIT_LIST_HEAD(&rk_domain->iommus);
946
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800947 rk_domain->domain.geometry.aperture_start = 0;
948 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
949 rk_domain->domain.geometry.force_aperture = true;
950
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100951 return &rk_domain->domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800952
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800953err_free_dt:
954 free_page((unsigned long)rk_domain->dt);
955err_put_cookie:
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800956 if (type == IOMMU_DOMAIN_DMA)
957 iommu_put_dma_cookie(&rk_domain->domain);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800958
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100959 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800960}
961
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100962static void rk_iommu_domain_free(struct iommu_domain *domain)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800963{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100964 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800965 int i;
966
967 WARN_ON(!list_empty(&rk_domain->iommus));
968
969 for (i = 0; i < NUM_DT_ENTRIES; i++) {
970 u32 dte = rk_domain->dt[i];
971 if (rk_dte_is_pt_valid(dte)) {
972 phys_addr_t pt_phys = rk_dte_pt_address(dte);
973 u32 *page_table = phys_to_virt(pt_phys);
Jeffy Chen9176a302018-03-23 15:38:10 +0800974 dma_unmap_single(dma_dev, pt_phys,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800975 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800976 free_page((unsigned long)page_table);
977 }
978 }
979
Jeffy Chen9176a302018-03-23 15:38:10 +0800980 dma_unmap_single(dma_dev, rk_domain->dt_dma,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800981 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800982 free_page((unsigned long)rk_domain->dt);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800983
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800984 if (domain->type == IOMMU_DOMAIN_DMA)
985 iommu_put_dma_cookie(&rk_domain->domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800986}
987
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800988static int rk_iommu_add_device(struct device *dev)
989{
990 struct iommu_group *group;
Joerg Roedelc9d9f232017-03-31 16:26:03 +0200991 struct rk_iommu *iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800992
Joerg Roedelc9d9f232017-03-31 16:26:03 +0200993 iommu = rk_iommu_from_dev(dev);
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800994 if (!iommu)
995 return -ENODEV;
Joerg Roedelc9d9f232017-03-31 16:26:03 +0200996
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800997 group = iommu_group_get_for_dev(dev);
998 if (IS_ERR(group))
999 return PTR_ERR(group);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001000 iommu_group_put(group);
1001
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001002 iommu_device_link(&iommu->iommu, dev);
1003
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001004 return 0;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001005}
1006
1007static void rk_iommu_remove_device(struct device *dev)
1008{
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001009 struct rk_iommu *iommu;
1010
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001011 iommu = rk_iommu_from_dev(dev);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001012
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001013 iommu_device_unlink(&iommu->iommu, dev);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001014 iommu_group_remove_device(dev);
1015}
1016
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001017static int rk_iommu_of_xlate(struct device *dev,
1018 struct of_phandle_args *args)
1019{
1020 struct platform_device *iommu_dev;
1021 struct rk_iommudata *data;
1022
1023 data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
1024 if (!data)
1025 return -ENOMEM;
1026
1027 iommu_dev = of_find_device_by_node(args->np);
1028
1029 data->iommu = platform_get_drvdata(iommu_dev);
1030 dev->archdata.iommu = data;
1031
1032 of_dev_put(iommu_dev);
1033
1034 return 0;
1035}
1036
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001037static const struct iommu_ops rk_iommu_ops = {
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001038 .domain_alloc = rk_iommu_domain_alloc,
1039 .domain_free = rk_iommu_domain_free,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001040 .attach_dev = rk_iommu_attach_device,
1041 .detach_dev = rk_iommu_detach_device,
1042 .map = rk_iommu_map,
1043 .unmap = rk_iommu_unmap,
Simon Xuee6d0f472016-06-24 10:13:27 +08001044 .map_sg = default_iommu_map_sg,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001045 .add_device = rk_iommu_add_device,
1046 .remove_device = rk_iommu_remove_device,
1047 .iova_to_phys = rk_iommu_iova_to_phys,
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001048 .device_group = generic_device_group,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001049 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001050 .of_xlate = rk_iommu_of_xlate,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001051};
1052
1053static int rk_iommu_probe(struct platform_device *pdev)
1054{
1055 struct device *dev = &pdev->dev;
1056 struct rk_iommu *iommu;
1057 struct resource *res;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001058 int num_res = pdev->num_resources;
Jeffy Chend0b912b2018-03-23 15:38:03 +08001059 int err, i, irq;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001060
1061 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1062 if (!iommu)
1063 return -ENOMEM;
1064
1065 platform_set_drvdata(pdev, iommu);
1066 iommu->dev = dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001067 iommu->num_mmu = 0;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001068
1069 iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001070 GFP_KERNEL);
1071 if (!iommu->bases)
1072 return -ENOMEM;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001073
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001074 for (i = 0; i < num_res; i++) {
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001075 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
Tomeu Vizoso8d7f2d82016-03-21 12:00:23 +01001076 if (!res)
1077 continue;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001078 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1079 if (IS_ERR(iommu->bases[i]))
1080 continue;
1081 iommu->num_mmu++;
1082 }
1083 if (iommu->num_mmu == 0)
1084 return PTR_ERR(iommu->bases[0]);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001085
Jeffy Chend0b912b2018-03-23 15:38:03 +08001086 i = 0;
1087 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
1088 if (irq < 0)
1089 return irq;
Simon Xue03f732f2017-07-24 10:37:14 +08001090
Jeffy Chend0b912b2018-03-23 15:38:03 +08001091 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1092 IRQF_SHARED, dev_name(dev), iommu);
1093 if (err)
1094 return err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001095 }
1096
Simon Xuec3aa4742017-07-24 10:37:15 +08001097 iommu->reset_disabled = device_property_read_bool(dev,
1098 "rockchip,disable-mmu-reset");
1099
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001100 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1101 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1102 sizeof(*iommu->clocks), GFP_KERNEL);
1103 if (!iommu->clocks)
1104 return -ENOMEM;
1105
1106 for (i = 0; i < iommu->num_clocks; ++i)
1107 iommu->clocks[i].id = rk_iommu_clocks[i];
1108
1109 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001110 if (err)
1111 return err;
1112
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001113 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1114 if (err)
1115 return err;
1116
1117 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1118 if (err)
1119 goto err_unprepare_clocks;
1120
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001121 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001122 iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
1123
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001124 err = iommu_device_register(&iommu->iommu);
Jeffy Chen6d9ffaa2018-03-23 15:38:02 +08001125 if (err)
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001126 goto err_remove_sysfs;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001127
Jeffy Chen9176a302018-03-23 15:38:10 +08001128 /*
1129 * Use the first registered IOMMU device for domain to use with DMA
1130 * API, since a domain might not physically correspond to a single
1131 * IOMMU device..
1132 */
1133 if (!dma_dev)
1134 dma_dev = &pdev->dev;
1135
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001136 return 0;
1137err_remove_sysfs:
1138 iommu_device_sysfs_remove(&iommu->iommu);
1139err_unprepare_clocks:
1140 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001141 return err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001142}
1143
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001144static void rk_iommu_shutdown(struct platform_device *pdev)
1145{
1146 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1147
1148 /*
1149 * Be careful not to try to shutdown an otherwise unused
1150 * IOMMU, as it is likely not to be clocked, and accessing it
1151 * would just block. An IOMMU without a domain is likely to be
1152 * unused, so let's use this as a (weak) guard.
1153 */
1154 if (iommu && iommu->domain) {
1155 rk_iommu_enable_stall(iommu);
1156 rk_iommu_disable_paging(iommu);
1157 rk_iommu_force_reset(iommu);
1158 }
1159}
1160
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001161static const struct of_device_id rk_iommu_dt_ids[] = {
1162 { .compatible = "rockchip,iommu" },
1163 { /* sentinel */ }
1164};
1165MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001166
1167static struct platform_driver rk_iommu_driver = {
1168 .probe = rk_iommu_probe,
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001169 .shutdown = rk_iommu_shutdown,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001170 .driver = {
1171 .name = "rk_iommu",
Arnd Bergmannd9e7eb12015-04-10 23:58:24 +02001172 .of_match_table = rk_iommu_dt_ids,
Jeffy Chen98b72b92018-03-23 15:38:01 +08001173 .suppress_bind_attrs = true,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001174 },
1175};
1176
1177static int __init rk_iommu_init(void)
1178{
Thierry Reding425061b2015-02-06 11:44:07 +01001179 struct device_node *np;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001180 int ret;
1181
Thierry Reding425061b2015-02-06 11:44:07 +01001182 np = of_find_matching_node(NULL, rk_iommu_dt_ids);
1183 if (!np)
1184 return 0;
1185
1186 of_node_put(np);
1187
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001188 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1189 if (ret)
1190 return ret;
1191
Jeffy Chen9176a302018-03-23 15:38:10 +08001192 return platform_driver_register(&rk_iommu_driver);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001193}
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001194subsys_initcall(rk_iommu_init);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001195
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001196IOMMU_OF_DECLARE(rk_iommu_of, "rockchip,iommu");
1197
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001198MODULE_DESCRIPTION("IOMMU API for Rockchip");
1199MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1200MODULE_ALIAS("platform:rockchip-iommu");
1201MODULE_LICENSE("GPL v2");