blob: 054cd2c8e9c8a670ee2c4fc23fe9ae3a6f78e41a [file] [log] [blame]
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08007#include <linux/clk.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +08008#include <linux/compiler.h>
9#include <linux/delay.h>
10#include <linux/device.h>
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080011#include <linux/dma-iommu.h>
Joerg Roedel461a6942017-04-26 15:46:20 +020012#include <linux/dma-mapping.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080013#include <linux/errno.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/iommu.h>
Tomasz Figa0416bf62018-03-23 15:38:05 +080017#include <linux/iopoll.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080018#include <linux/list.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/of.h>
Jeffy Chen5fd577c2018-03-23 15:38:11 +080022#include <linux/of_iommu.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080023#include <linux/of_platform.h>
24#include <linux/platform_device.h>
Jeffy Chen0f181d32018-03-23 15:38:13 +080025#include <linux/pm_runtime.h>
Daniel Kurtzc68a2922014-11-03 10:53:27 +080026#include <linux/slab.h>
27#include <linux/spinlock.h>
28
29/** MMU register offsets */
30#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
31#define RK_MMU_STATUS 0x04
32#define RK_MMU_COMMAND 0x08
33#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
34#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
35#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
36#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
37#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
38#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
39#define RK_MMU_AUTO_GATING 0x24
40
41#define DTE_ADDR_DUMMY 0xCAFEBABE
Tomasz Figa0416bf62018-03-23 15:38:05 +080042
43#define RK_MMU_POLL_PERIOD_US 100
44#define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
45#define RK_MMU_POLL_TIMEOUT_US 1000
Daniel Kurtzc68a2922014-11-03 10:53:27 +080046
47/* RK_MMU_STATUS fields */
48#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
49#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
50#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
51#define RK_MMU_STATUS_IDLE BIT(3)
52#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
53#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
54#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
55
56/* RK_MMU_COMMAND command values */
57#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
58#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
59#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
60#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
61#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
62#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
63#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
64
65/* RK_MMU_INT_* register fields */
66#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
67#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
68#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
69
70#define NUM_DT_ENTRIES 1024
71#define NUM_PT_ENTRIES 1024
72
73#define SPAGE_ORDER 12
74#define SPAGE_SIZE (1 << SPAGE_ORDER)
75
76 /*
77 * Support mapping any size that fits in one page table:
78 * 4 KiB to 4 MiB
79 */
80#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
81
Daniel Kurtzc68a2922014-11-03 10:53:27 +080082struct rk_iommu_domain {
83 struct list_head iommus;
84 u32 *dt; /* page directory table */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +080085 dma_addr_t dt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080086 spinlock_t iommus_lock; /* lock for iommus list */
87 spinlock_t dt_lock; /* lock for modifying page directory table */
Joerg Roedelbcd516a2015-03-26 13:43:17 +010088
89 struct iommu_domain domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +080090};
91
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +080092/* list of clocks required by IOMMU */
93static const char * const rk_iommu_clocks[] = {
94 "aclk", "iface",
95};
96
Daniel Kurtzc68a2922014-11-03 10:53:27 +080097struct rk_iommu {
98 struct device *dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +080099 void __iomem **bases;
100 int num_mmu;
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800101 struct clk_bulk_data *clocks;
102 int num_clocks;
Simon Xuec3aa4742017-07-24 10:37:15 +0800103 bool reset_disabled;
Joerg Roedelc9d9f232017-03-31 16:26:03 +0200104 struct iommu_device iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800105 struct list_head node; /* entry in rk_iommu_domain.iommus */
106 struct iommu_domain *domain; /* domain to which iommu is attached */
Jeffy Chen57c26952018-03-23 15:38:14 +0800107 struct iommu_group *group;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800108};
109
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800110struct rk_iommudata {
Jeffy Chen0f181d32018-03-23 15:38:13 +0800111 struct device_link *link; /* runtime PM link from IOMMU to master */
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800112 struct rk_iommu *iommu;
113};
114
Jeffy Chen9176a302018-03-23 15:38:10 +0800115static struct device *dma_dev;
116
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800117static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
118 unsigned int count)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800119{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800120 size_t size = count * sizeof(u32); /* count of u32 entry */
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800121
Jeffy Chen9176a302018-03-23 15:38:10 +0800122 dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800123}
124
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100125static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
126{
127 return container_of(dom, struct rk_iommu_domain, domain);
128}
129
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800130/*
131 * The Rockchip rk3288 iommu uses a 2-level page table.
132 * The first level is the "Directory Table" (DT).
133 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
134 * to a "Page Table".
135 * The second level is the 1024 Page Tables (PT).
136 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
137 * a 4 KB page of physical memory.
138 *
139 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
140 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
141 * address of the start of the DT page.
142 *
143 * The structure of the page table is as follows:
144 *
145 * DT
146 * MMU_DTE_ADDR -> +-----+
147 * | |
148 * +-----+ PT
149 * | DTE | -> +-----+
150 * +-----+ | | Memory
151 * | | +-----+ Page
152 * | | | PTE | -> +-----+
153 * +-----+ +-----+ | |
154 * | | | |
155 * | | | |
156 * +-----+ | |
157 * | |
158 * | |
159 * +-----+
160 */
161
162/*
163 * Each DTE has a PT address and a valid bit:
164 * +---------------------+-----------+-+
165 * | PT address | Reserved |V|
166 * +---------------------+-----------+-+
167 * 31:12 - PT address (PTs always starts on a 4 KB boundary)
168 * 11: 1 - Reserved
169 * 0 - 1 if PT @ PT address is valid
170 */
171#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
172#define RK_DTE_PT_VALID BIT(0)
173
174static inline phys_addr_t rk_dte_pt_address(u32 dte)
175{
176 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
177}
178
179static inline bool rk_dte_is_pt_valid(u32 dte)
180{
181 return dte & RK_DTE_PT_VALID;
182}
183
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800184static inline u32 rk_mk_dte(dma_addr_t pt_dma)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800185{
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800186 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800187}
188
189/*
190 * Each PTE has a Page address, some flags and a valid bit:
191 * +---------------------+---+-------+-+
192 * | Page address |Rsv| Flags |V|
193 * +---------------------+---+-------+-+
194 * 31:12 - Page address (Pages always start on a 4 KB boundary)
195 * 11: 9 - Reserved
196 * 8: 1 - Flags
197 * 8 - Read allocate - allocate cache space on read misses
198 * 7 - Read cache - enable cache & prefetch of data
199 * 6 - Write buffer - enable delaying writes on their way to memory
200 * 5 - Write allocate - allocate cache space on write misses
201 * 4 - Write cache - different writes can be merged together
202 * 3 - Override cache attributes
203 * if 1, bits 4-8 control cache attributes
204 * if 0, the system bus defaults are used
205 * 2 - Writable
206 * 1 - Readable
207 * 0 - 1 if Page @ Page address is valid
208 */
209#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
210#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
211#define RK_PTE_PAGE_WRITABLE BIT(2)
212#define RK_PTE_PAGE_READABLE BIT(1)
213#define RK_PTE_PAGE_VALID BIT(0)
214
215static inline phys_addr_t rk_pte_page_address(u32 pte)
216{
217 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
218}
219
220static inline bool rk_pte_is_page_valid(u32 pte)
221{
222 return pte & RK_PTE_PAGE_VALID;
223}
224
225/* TODO: set cache flags per prot IOMMU_CACHE */
226static u32 rk_mk_pte(phys_addr_t page, int prot)
227{
228 u32 flags = 0;
229 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
230 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
231 page &= RK_PTE_PAGE_ADDRESS_MASK;
232 return page | flags | RK_PTE_PAGE_VALID;
233}
234
235static u32 rk_mk_pte_invalid(u32 pte)
236{
237 return pte & ~RK_PTE_PAGE_VALID;
238}
239
240/*
241 * rk3288 iova (IOMMU Virtual Address) format
242 * 31 22.21 12.11 0
243 * +-----------+-----------+-------------+
244 * | DTE index | PTE index | Page offset |
245 * +-----------+-----------+-------------+
246 * 31:22 - DTE index - index of DTE in DT
247 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
248 * 11: 0 - Page offset - offset into page @ PTE.page_address
249 */
250#define RK_IOVA_DTE_MASK 0xffc00000
251#define RK_IOVA_DTE_SHIFT 22
252#define RK_IOVA_PTE_MASK 0x003ff000
253#define RK_IOVA_PTE_SHIFT 12
254#define RK_IOVA_PAGE_MASK 0x00000fff
255#define RK_IOVA_PAGE_SHIFT 0
256
257static u32 rk_iova_dte_index(dma_addr_t iova)
258{
259 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
260}
261
262static u32 rk_iova_pte_index(dma_addr_t iova)
263{
264 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
265}
266
267static u32 rk_iova_page_offset(dma_addr_t iova)
268{
269 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
270}
271
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800272static u32 rk_iommu_read(void __iomem *base, u32 offset)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800273{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800274 return readl(base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800275}
276
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800277static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800278{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800279 writel(value, base + offset);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800280}
281
282static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
283{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800284 int i;
285
286 for (i = 0; i < iommu->num_mmu; i++)
287 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800288}
289
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800290static void rk_iommu_base_command(void __iomem *base, u32 command)
291{
292 writel(command, base + RK_MMU_COMMAND);
293}
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800294static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800295 size_t size)
296{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800297 int i;
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800298 dma_addr_t iova_end = iova_start + size;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800299 /*
300 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
301 * entire iotlb rather than iterate over individual iovas.
302 */
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800303 for (i = 0; i < iommu->num_mmu; i++) {
304 dma_addr_t iova;
305
306 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800307 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
Tomasz Figabf2a5e72018-03-23 15:38:06 +0800308 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800309}
310
311static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
312{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800313 bool active = true;
314 int i;
315
316 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100317 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
318 RK_MMU_STATUS_STALL_ACTIVE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800319
320 return active;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800321}
322
323static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
324{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800325 bool enable = true;
326 int i;
327
328 for (i = 0; i < iommu->num_mmu; i++)
John Keepingfbedd9b2016-04-05 15:05:46 +0100329 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
330 RK_MMU_STATUS_PAGING_ENABLED);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800331
332 return enable;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800333}
334
Tomasz Figa0416bf62018-03-23 15:38:05 +0800335static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
336{
337 bool done = true;
338 int i;
339
340 for (i = 0; i < iommu->num_mmu; i++)
341 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
342
343 return done;
344}
345
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800346static int rk_iommu_enable_stall(struct rk_iommu *iommu)
347{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800348 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800349 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800350
351 if (rk_iommu_is_stall_active(iommu))
352 return 0;
353
354 /* Stall can only be enabled if paging is enabled */
355 if (!rk_iommu_is_paging_enabled(iommu))
356 return 0;
357
358 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
359
Tomasz Figa0416bf62018-03-23 15:38:05 +0800360 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
361 val, RK_MMU_POLL_PERIOD_US,
362 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800363 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800364 for (i = 0; i < iommu->num_mmu; i++)
365 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
366 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800367
368 return ret;
369}
370
371static int rk_iommu_disable_stall(struct rk_iommu *iommu)
372{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800373 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800374 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800375
376 if (!rk_iommu_is_stall_active(iommu))
377 return 0;
378
379 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
380
Tomasz Figa0416bf62018-03-23 15:38:05 +0800381 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
382 !val, RK_MMU_POLL_PERIOD_US,
383 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800384 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800385 for (i = 0; i < iommu->num_mmu; i++)
386 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
387 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800388
389 return ret;
390}
391
392static int rk_iommu_enable_paging(struct rk_iommu *iommu)
393{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800394 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800395 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800396
397 if (rk_iommu_is_paging_enabled(iommu))
398 return 0;
399
400 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
401
Tomasz Figa0416bf62018-03-23 15:38:05 +0800402 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
403 val, RK_MMU_POLL_PERIOD_US,
404 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800405 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800406 for (i = 0; i < iommu->num_mmu; i++)
407 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
408 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800409
410 return ret;
411}
412
413static int rk_iommu_disable_paging(struct rk_iommu *iommu)
414{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800415 int ret, i;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800416 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800417
418 if (!rk_iommu_is_paging_enabled(iommu))
419 return 0;
420
421 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
422
Tomasz Figa0416bf62018-03-23 15:38:05 +0800423 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
424 !val, RK_MMU_POLL_PERIOD_US,
425 RK_MMU_POLL_TIMEOUT_US);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800426 if (ret)
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800427 for (i = 0; i < iommu->num_mmu; i++)
428 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
429 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800430
431 return ret;
432}
433
434static int rk_iommu_force_reset(struct rk_iommu *iommu)
435{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800436 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800437 u32 dte_addr;
Tomasz Figa0416bf62018-03-23 15:38:05 +0800438 bool val;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800439
Simon Xuec3aa4742017-07-24 10:37:15 +0800440 if (iommu->reset_disabled)
441 return 0;
442
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800443 /*
444 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
445 * and verifying that upper 5 nybbles are read back.
446 */
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800447 for (i = 0; i < iommu->num_mmu; i++) {
448 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800449
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800450 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
451 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
452 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
453 return -EFAULT;
454 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800455 }
456
457 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
458
Tomasz Figa0416bf62018-03-23 15:38:05 +0800459 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
460 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
461 RK_MMU_POLL_TIMEOUT_US);
462 if (ret) {
463 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
464 return ret;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800465 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800466
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800467 return 0;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800468}
469
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800470static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800471{
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800472 void __iomem *base = iommu->bases[index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800473 u32 dte_index, pte_index, page_offset;
474 u32 mmu_dte_addr;
475 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
476 u32 *dte_addr;
477 u32 dte;
478 phys_addr_t pte_addr_phys = 0;
479 u32 *pte_addr = NULL;
480 u32 pte = 0;
481 phys_addr_t page_addr_phys = 0;
482 u32 page_flags = 0;
483
484 dte_index = rk_iova_dte_index(iova);
485 pte_index = rk_iova_pte_index(iova);
486 page_offset = rk_iova_page_offset(iova);
487
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800488 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800489 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
490
491 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
492 dte_addr = phys_to_virt(dte_addr_phys);
493 dte = *dte_addr;
494
495 if (!rk_dte_is_pt_valid(dte))
496 goto print_it;
497
498 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
499 pte_addr = phys_to_virt(pte_addr_phys);
500 pte = *pte_addr;
501
502 if (!rk_pte_is_page_valid(pte))
503 goto print_it;
504
505 page_addr_phys = rk_pte_page_address(pte) + page_offset;
506 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
507
508print_it:
509 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
510 &iova, dte_index, pte_index, page_offset);
511 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
512 &mmu_dte_addr_phys, &dte_addr_phys, dte,
513 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
514 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
515}
516
517static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
518{
519 struct rk_iommu *iommu = dev_id;
520 u32 status;
521 u32 int_status;
522 dma_addr_t iova;
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800523 irqreturn_t ret = IRQ_NONE;
524 int i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800525
Jeffy Chen0f181d32018-03-23 15:38:13 +0800526 if (WARN_ON(!pm_runtime_get_if_in_use(iommu->dev)))
527 return 0;
528
529 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
530 goto out;
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800531
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800532 for (i = 0; i < iommu->num_mmu; i++) {
533 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
534 if (int_status == 0)
535 continue;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800536
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800537 ret = IRQ_HANDLED;
538 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800539
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800540 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
541 int flags;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800542
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800543 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
544 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
545 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800546
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800547 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
548 &iova,
549 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800550
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800551 log_iova(iommu, i, iova);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800552
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800553 /*
554 * Report page fault to any installed handlers.
555 * Ignore the return code, though, since we always zap cache
556 * and clear the page fault anyway.
557 */
558 if (iommu->domain)
559 report_iommu_fault(iommu->domain, iommu->dev, iova,
560 flags);
561 else
562 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800563
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800564 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
565 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
566 }
567
568 if (int_status & RK_MMU_IRQ_BUS_ERROR)
569 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
570
571 if (int_status & ~RK_MMU_IRQ_MASK)
572 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
573 int_status);
574
575 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800576 }
577
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800578 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
579
Jeffy Chen0f181d32018-03-23 15:38:13 +0800580out:
581 pm_runtime_put(iommu->dev);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800582 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800583}
584
585static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
586 dma_addr_t iova)
587{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100588 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800589 unsigned long flags;
590 phys_addr_t pt_phys, phys = 0;
591 u32 dte, pte;
592 u32 *page_table;
593
594 spin_lock_irqsave(&rk_domain->dt_lock, flags);
595
596 dte = rk_domain->dt[rk_iova_dte_index(iova)];
597 if (!rk_dte_is_pt_valid(dte))
598 goto out;
599
600 pt_phys = rk_dte_pt_address(dte);
601 page_table = (u32 *)phys_to_virt(pt_phys);
602 pte = page_table[rk_iova_pte_index(iova)];
603 if (!rk_pte_is_page_valid(pte))
604 goto out;
605
606 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
607out:
608 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
609
610 return phys;
611}
612
613static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
614 dma_addr_t iova, size_t size)
615{
616 struct list_head *pos;
617 unsigned long flags;
618
619 /* shootdown these iova from all iommus using this domain */
620 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
621 list_for_each(pos, &rk_domain->iommus) {
622 struct rk_iommu *iommu;
Jeffy Chen0f181d32018-03-23 15:38:13 +0800623
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800624 iommu = list_entry(pos, struct rk_iommu, node);
Jeffy Chen0f181d32018-03-23 15:38:13 +0800625
626 /* Only zap TLBs of IOMMUs that are powered on. */
627 if (pm_runtime_get_if_in_use(iommu->dev)) {
628 WARN_ON(clk_bulk_enable(iommu->num_clocks,
629 iommu->clocks));
630 rk_iommu_zap_lines(iommu, iova, size);
631 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
632 pm_runtime_put(iommu->dev);
633 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800634 }
635 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
636}
637
Tomasz Figad4dd9202015-04-20 20:43:44 +0900638static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
639 dma_addr_t iova, size_t size)
640{
641 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
642 if (size > SPAGE_SIZE)
643 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
644 SPAGE_SIZE);
645}
646
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800647static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
648 dma_addr_t iova)
649{
650 u32 *page_table, *dte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800651 u32 dte_index, dte;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800652 phys_addr_t pt_phys;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800653 dma_addr_t pt_dma;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800654
655 assert_spin_locked(&rk_domain->dt_lock);
656
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800657 dte_index = rk_iova_dte_index(iova);
658 dte_addr = &rk_domain->dt[dte_index];
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800659 dte = *dte_addr;
660 if (rk_dte_is_pt_valid(dte))
661 goto done;
662
663 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
664 if (!page_table)
665 return ERR_PTR(-ENOMEM);
666
Jeffy Chen9176a302018-03-23 15:38:10 +0800667 pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
668 if (dma_mapping_error(dma_dev, pt_dma)) {
669 dev_err(dma_dev, "DMA mapping error while allocating page table\n");
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800670 free_page((unsigned long)page_table);
671 return ERR_PTR(-ENOMEM);
672 }
673
674 dte = rk_mk_dte(pt_dma);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800675 *dte_addr = dte;
676
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800677 rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
678 rk_table_flush(rk_domain,
679 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800680done:
681 pt_phys = rk_dte_pt_address(dte);
682 return (u32 *)phys_to_virt(pt_phys);
683}
684
685static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800686 u32 *pte_addr, dma_addr_t pte_dma,
687 size_t size)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800688{
689 unsigned int pte_count;
690 unsigned int pte_total = size / SPAGE_SIZE;
691
692 assert_spin_locked(&rk_domain->dt_lock);
693
694 for (pte_count = 0; pte_count < pte_total; pte_count++) {
695 u32 pte = pte_addr[pte_count];
696 if (!rk_pte_is_page_valid(pte))
697 break;
698
699 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
700 }
701
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800702 rk_table_flush(rk_domain, pte_dma, pte_count);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800703
704 return pte_count * SPAGE_SIZE;
705}
706
707static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800708 dma_addr_t pte_dma, dma_addr_t iova,
709 phys_addr_t paddr, size_t size, int prot)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800710{
711 unsigned int pte_count;
712 unsigned int pte_total = size / SPAGE_SIZE;
713 phys_addr_t page_phys;
714
715 assert_spin_locked(&rk_domain->dt_lock);
716
717 for (pte_count = 0; pte_count < pte_total; pte_count++) {
718 u32 pte = pte_addr[pte_count];
719
720 if (rk_pte_is_page_valid(pte))
721 goto unwind;
722
723 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
724
725 paddr += SPAGE_SIZE;
726 }
727
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800728 rk_table_flush(rk_domain, pte_dma, pte_total);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800729
Tomasz Figad4dd9202015-04-20 20:43:44 +0900730 /*
731 * Zap the first and last iova to evict from iotlb any previously
732 * mapped cachelines holding stale values for its dte and pte.
733 * We only zap the first and last iova, since only they could have
734 * dte or pte shared with an existing mapping.
735 */
736 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
737
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800738 return 0;
739unwind:
740 /* Unmap the range of iovas that we just mapped */
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800741 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
742 pte_count * SPAGE_SIZE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800743
744 iova += pte_count * SPAGE_SIZE;
745 page_phys = rk_pte_page_address(pte_addr[pte_count]);
746 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
747 &iova, &page_phys, &paddr, prot);
748
749 return -EADDRINUSE;
750}
751
752static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
753 phys_addr_t paddr, size_t size, int prot)
754{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100755 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800756 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800757 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800758 u32 *page_table, *pte_addr;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800759 u32 dte_index, pte_index;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800760 int ret;
761
762 spin_lock_irqsave(&rk_domain->dt_lock, flags);
763
764 /*
765 * pgsize_bitmap specifies iova sizes that fit in one page table
766 * (1024 4-KiB pages = 4 MiB).
767 * So, size will always be 4096 <= size <= 4194304.
768 * Since iommu_map() guarantees that both iova and size will be
769 * aligned, we will always only be mapping from a single dte here.
770 */
771 page_table = rk_dte_get_page_table(rk_domain, iova);
772 if (IS_ERR(page_table)) {
773 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
774 return PTR_ERR(page_table);
775 }
776
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800777 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
778 pte_index = rk_iova_pte_index(iova);
779 pte_addr = &page_table[pte_index];
780 pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
781 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
782 paddr, size, prot);
783
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800784 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
785
786 return ret;
787}
788
789static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
790 size_t size)
791{
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100792 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800793 unsigned long flags;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800794 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800795 phys_addr_t pt_phys;
796 u32 dte;
797 u32 *pte_addr;
798 size_t unmap_size;
799
800 spin_lock_irqsave(&rk_domain->dt_lock, flags);
801
802 /*
803 * pgsize_bitmap specifies iova sizes that fit in one page table
804 * (1024 4-KiB pages = 4 MiB).
805 * So, size will always be 4096 <= size <= 4194304.
806 * Since iommu_unmap() guarantees that both iova and size will be
807 * aligned, we will always only be unmapping from a single dte here.
808 */
809 dte = rk_domain->dt[rk_iova_dte_index(iova)];
810 /* Just return 0 if iova is unmapped */
811 if (!rk_dte_is_pt_valid(dte)) {
812 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
813 return 0;
814 }
815
816 pt_phys = rk_dte_pt_address(dte);
817 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800818 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
819 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800820
821 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
822
823 /* Shootdown iotlb entries for iova range that was just unmapped */
824 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
825
826 return unmap_size;
827}
828
829static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
830{
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800831 struct rk_iommudata *data = dev->archdata.iommu;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800832
Jeffy Chen5fd577c2018-03-23 15:38:11 +0800833 return data ? data->iommu : NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800834}
835
Jeffy Chen0f181d32018-03-23 15:38:13 +0800836/* Must be called with iommu powered on and attached */
837static void rk_iommu_disable(struct rk_iommu *iommu)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800838{
Jeffy Chen0f181d32018-03-23 15:38:13 +0800839 int i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800840
Jeffy Chen0f181d32018-03-23 15:38:13 +0800841 /* Ignore error while disabling, just keep going */
842 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
843 rk_iommu_enable_stall(iommu);
844 rk_iommu_disable_paging(iommu);
845 for (i = 0; i < iommu->num_mmu; i++) {
846 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
847 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
848 }
849 rk_iommu_disable_stall(iommu);
850 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
851}
852
853/* Must be called with iommu powered on and attached */
854static int rk_iommu_enable(struct rk_iommu *iommu)
855{
856 struct iommu_domain *domain = iommu->domain;
857 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
858 int ret, i;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800859
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800860 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800861 if (ret)
862 return ret;
863
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800864 ret = rk_iommu_enable_stall(iommu);
865 if (ret)
866 goto out_disable_clocks;
867
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800868 ret = rk_iommu_force_reset(iommu);
869 if (ret)
Tomasz Figaf6717d72018-03-23 15:38:04 +0800870 goto out_disable_stall;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800871
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800872 for (i = 0; i < iommu->num_mmu; i++) {
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800873 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
874 rk_domain->dt_dma);
John Keepingae8a7912016-06-01 16:46:10 +0100875 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800876 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
877 }
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800878
879 ret = rk_iommu_enable_paging(iommu);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800880
Tomasz Figaf6717d72018-03-23 15:38:04 +0800881out_disable_stall:
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800882 rk_iommu_disable_stall(iommu);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +0800883out_disable_clocks:
884 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
Tomasz Figaf6717d72018-03-23 15:38:04 +0800885 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800886}
887
888static void rk_iommu_detach_device(struct iommu_domain *domain,
889 struct device *dev)
890{
891 struct rk_iommu *iommu;
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100892 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800893 unsigned long flags;
894
895 /* Allow 'virtual devices' (eg drm) to detach from domain */
896 iommu = rk_iommu_from_dev(dev);
897 if (!iommu)
898 return;
899
Jeffy Chen0f181d32018-03-23 15:38:13 +0800900 dev_dbg(dev, "Detaching from iommu domain\n");
901
902 /* iommu already detached */
903 if (iommu->domain != domain)
904 return;
905
906 iommu->domain = NULL;
907
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800908 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
909 list_del_init(&iommu->node);
910 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
911
Jeffy Chen0f181d32018-03-23 15:38:13 +0800912 if (pm_runtime_get_if_in_use(iommu->dev)) {
913 rk_iommu_disable(iommu);
914 pm_runtime_put(iommu->dev);
ZhengShunQiancd6438c2016-01-19 15:03:00 +0800915 }
Jeffy Chen0f181d32018-03-23 15:38:13 +0800916}
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800917
Jeffy Chen0f181d32018-03-23 15:38:13 +0800918static int rk_iommu_attach_device(struct iommu_domain *domain,
919 struct device *dev)
920{
921 struct rk_iommu *iommu;
922 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
923 unsigned long flags;
924 int ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800925
Jeffy Chen0f181d32018-03-23 15:38:13 +0800926 /*
927 * Allow 'virtual devices' (e.g., drm) to attach to domain.
928 * Such a device does not belong to an iommu group.
929 */
930 iommu = rk_iommu_from_dev(dev);
931 if (!iommu)
932 return 0;
933
934 dev_dbg(dev, "Attaching to iommu domain\n");
935
936 /* iommu already attached */
937 if (iommu->domain == domain)
938 return 0;
939
940 if (iommu->domain)
941 rk_iommu_detach_device(iommu->domain, dev);
942
943 iommu->domain = domain;
944
945 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
946 list_add_tail(&iommu->node, &rk_domain->iommus);
947 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
948
949 if (!pm_runtime_get_if_in_use(iommu->dev))
950 return 0;
951
952 ret = rk_iommu_enable(iommu);
953 if (ret)
954 rk_iommu_detach_device(iommu->domain, dev);
955
956 pm_runtime_put(iommu->dev);
957
958 return ret;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800959}
960
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100961static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800962{
963 struct rk_iommu_domain *rk_domain;
964
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800965 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100966 return NULL;
967
Jeffy Chen9176a302018-03-23 15:38:10 +0800968 if (!dma_dev)
Joerg Roedelbcd516a2015-03-26 13:43:17 +0100969 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800970
Jeffy Chen9176a302018-03-23 15:38:10 +0800971 rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800972 if (!rk_domain)
Jeffy Chen9176a302018-03-23 15:38:10 +0800973 return NULL;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800974
Shunqian Zhenga93db2f2016-06-24 10:13:30 +0800975 if (type == IOMMU_DOMAIN_DMA &&
976 iommu_get_dma_cookie(&rk_domain->domain))
Jeffy Chen9176a302018-03-23 15:38:10 +0800977 return NULL;
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800978
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800979 /*
980 * rk32xx iommus use a 2 level pagetable.
981 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
982 * Allocate one 4 KiB page for each table.
983 */
984 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
985 if (!rk_domain->dt)
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800986 goto err_put_cookie;
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800987
Jeffy Chen9176a302018-03-23 15:38:10 +0800988 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800989 SPAGE_SIZE, DMA_TO_DEVICE);
Jeffy Chen9176a302018-03-23 15:38:10 +0800990 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
991 dev_err(dma_dev, "DMA map error for DT\n");
Shunqian Zheng4f0aba62016-06-24 10:13:29 +0800992 goto err_free_dt;
993 }
994
995 rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
Daniel Kurtzc68a2922014-11-03 10:53:27 +0800996
997 spin_lock_init(&rk_domain->iommus_lock);
998 spin_lock_init(&rk_domain->dt_lock);
999 INIT_LIST_HEAD(&rk_domain->iommus);
1000
Shunqian Zhenga93db2f2016-06-24 10:13:30 +08001001 rk_domain->domain.geometry.aperture_start = 0;
1002 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
1003 rk_domain->domain.geometry.force_aperture = true;
1004
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001005 return &rk_domain->domain;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001006
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001007err_free_dt:
1008 free_page((unsigned long)rk_domain->dt);
1009err_put_cookie:
Shunqian Zhenga93db2f2016-06-24 10:13:30 +08001010 if (type == IOMMU_DOMAIN_DMA)
1011 iommu_put_dma_cookie(&rk_domain->domain);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001012
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001013 return NULL;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001014}
1015
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001016static void rk_iommu_domain_free(struct iommu_domain *domain)
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001017{
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001018 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001019 int i;
1020
1021 WARN_ON(!list_empty(&rk_domain->iommus));
1022
1023 for (i = 0; i < NUM_DT_ENTRIES; i++) {
1024 u32 dte = rk_domain->dt[i];
1025 if (rk_dte_is_pt_valid(dte)) {
1026 phys_addr_t pt_phys = rk_dte_pt_address(dte);
1027 u32 *page_table = phys_to_virt(pt_phys);
Jeffy Chen9176a302018-03-23 15:38:10 +08001028 dma_unmap_single(dma_dev, pt_phys,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001029 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001030 free_page((unsigned long)page_table);
1031 }
1032 }
1033
Jeffy Chen9176a302018-03-23 15:38:10 +08001034 dma_unmap_single(dma_dev, rk_domain->dt_dma,
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001035 SPAGE_SIZE, DMA_TO_DEVICE);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001036 free_page((unsigned long)rk_domain->dt);
Shunqian Zheng4f0aba62016-06-24 10:13:29 +08001037
Shunqian Zhenga93db2f2016-06-24 10:13:30 +08001038 if (domain->type == IOMMU_DOMAIN_DMA)
1039 iommu_put_dma_cookie(&rk_domain->domain);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001040}
1041
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001042static int rk_iommu_add_device(struct device *dev)
1043{
1044 struct iommu_group *group;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001045 struct rk_iommu *iommu;
Jeffy Chen0f181d32018-03-23 15:38:13 +08001046 struct rk_iommudata *data;
1047
1048 data = dev->archdata.iommu;
1049 if (!data)
1050 return -ENODEV;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001051
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001052 iommu = rk_iommu_from_dev(dev);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001053
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001054 group = iommu_group_get_for_dev(dev);
1055 if (IS_ERR(group))
1056 return PTR_ERR(group);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001057 iommu_group_put(group);
1058
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001059 iommu_device_link(&iommu->iommu, dev);
Jeffy Chen0f181d32018-03-23 15:38:13 +08001060 data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME);
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001061
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001062 return 0;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001063}
1064
1065static void rk_iommu_remove_device(struct device *dev)
1066{
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001067 struct rk_iommu *iommu;
Jeffy Chen0f181d32018-03-23 15:38:13 +08001068 struct rk_iommudata *data = dev->archdata.iommu;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001069
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001070 iommu = rk_iommu_from_dev(dev);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001071
Jeffy Chen0f181d32018-03-23 15:38:13 +08001072 device_link_del(data->link);
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001073 iommu_device_unlink(&iommu->iommu, dev);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001074 iommu_group_remove_device(dev);
1075}
1076
Jeffy Chen57c26952018-03-23 15:38:14 +08001077static struct iommu_group *rk_iommu_device_group(struct device *dev)
1078{
1079 struct rk_iommu *iommu;
1080
1081 iommu = rk_iommu_from_dev(dev);
1082
1083 return iommu_group_ref_get(iommu->group);
1084}
1085
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001086static int rk_iommu_of_xlate(struct device *dev,
1087 struct of_phandle_args *args)
1088{
1089 struct platform_device *iommu_dev;
1090 struct rk_iommudata *data;
1091
1092 data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
1093 if (!data)
1094 return -ENOMEM;
1095
1096 iommu_dev = of_find_device_by_node(args->np);
1097
1098 data->iommu = platform_get_drvdata(iommu_dev);
1099 dev->archdata.iommu = data;
1100
Arnd Bergmann40fa84e2018-04-04 12:23:53 +02001101 platform_device_put(iommu_dev);
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001102
1103 return 0;
1104}
1105
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001106static const struct iommu_ops rk_iommu_ops = {
Joerg Roedelbcd516a2015-03-26 13:43:17 +01001107 .domain_alloc = rk_iommu_domain_alloc,
1108 .domain_free = rk_iommu_domain_free,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001109 .attach_dev = rk_iommu_attach_device,
1110 .detach_dev = rk_iommu_detach_device,
1111 .map = rk_iommu_map,
1112 .unmap = rk_iommu_unmap,
Simon Xuee6d0f472016-06-24 10:13:27 +08001113 .map_sg = default_iommu_map_sg,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001114 .add_device = rk_iommu_add_device,
1115 .remove_device = rk_iommu_remove_device,
1116 .iova_to_phys = rk_iommu_iova_to_phys,
Jeffy Chen57c26952018-03-23 15:38:14 +08001117 .device_group = rk_iommu_device_group,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001118 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001119 .of_xlate = rk_iommu_of_xlate,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001120};
1121
1122static int rk_iommu_probe(struct platform_device *pdev)
1123{
1124 struct device *dev = &pdev->dev;
1125 struct rk_iommu *iommu;
1126 struct resource *res;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001127 int num_res = pdev->num_resources;
Jeffy Chend0b912b2018-03-23 15:38:03 +08001128 int err, i, irq;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001129
1130 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1131 if (!iommu)
1132 return -ENOMEM;
1133
1134 platform_set_drvdata(pdev, iommu);
1135 iommu->dev = dev;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001136 iommu->num_mmu = 0;
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001137
Kees Cooka86854d2018-06-12 14:07:58 -07001138 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001139 GFP_KERNEL);
1140 if (!iommu->bases)
1141 return -ENOMEM;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001142
Shunqian Zheng3d08f4342016-06-24 10:13:28 +08001143 for (i = 0; i < num_res; i++) {
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001144 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
Tomeu Vizoso8d7f2d82016-03-21 12:00:23 +01001145 if (!res)
1146 continue;
ZhengShunQiancd6438c2016-01-19 15:03:00 +08001147 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1148 if (IS_ERR(iommu->bases[i]))
1149 continue;
1150 iommu->num_mmu++;
1151 }
1152 if (iommu->num_mmu == 0)
1153 return PTR_ERR(iommu->bases[0]);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001154
Jeffy Chend0b912b2018-03-23 15:38:03 +08001155 i = 0;
1156 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
1157 if (irq < 0)
1158 return irq;
Simon Xue03f732f2017-07-24 10:37:14 +08001159
Jeffy Chend0b912b2018-03-23 15:38:03 +08001160 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1161 IRQF_SHARED, dev_name(dev), iommu);
1162 if (err)
1163 return err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001164 }
1165
Simon Xuec3aa4742017-07-24 10:37:15 +08001166 iommu->reset_disabled = device_property_read_bool(dev,
1167 "rockchip,disable-mmu-reset");
1168
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001169 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1170 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1171 sizeof(*iommu->clocks), GFP_KERNEL);
1172 if (!iommu->clocks)
1173 return -ENOMEM;
1174
1175 for (i = 0; i < iommu->num_clocks; ++i)
1176 iommu->clocks[i].id = rk_iommu_clocks[i];
1177
Heiko Stuebner2f8c7f22018-04-17 14:09:15 +02001178 /*
1179 * iommu clocks should be present for all new devices and devicetrees
1180 * but there are older devicetrees without clocks out in the wild.
1181 * So clocks as optional for the time being.
1182 */
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001183 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
Heiko Stuebner2f8c7f22018-04-17 14:09:15 +02001184 if (err == -ENOENT)
1185 iommu->num_clocks = 0;
1186 else if (err)
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001187 return err;
1188
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001189 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1190 if (err)
1191 return err;
1192
Jeffy Chen57c26952018-03-23 15:38:14 +08001193 iommu->group = iommu_group_alloc();
1194 if (IS_ERR(iommu->group)) {
1195 err = PTR_ERR(iommu->group);
1196 goto err_unprepare_clocks;
1197 }
1198
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001199 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1200 if (err)
Jeffy Chen57c26952018-03-23 15:38:14 +08001201 goto err_put_group;
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001202
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001203 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001204 iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
1205
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001206 err = iommu_device_register(&iommu->iommu);
Jeffy Chen6d9ffaa2018-03-23 15:38:02 +08001207 if (err)
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001208 goto err_remove_sysfs;
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001209
Jeffy Chen9176a302018-03-23 15:38:10 +08001210 /*
1211 * Use the first registered IOMMU device for domain to use with DMA
1212 * API, since a domain might not physically correspond to a single
1213 * IOMMU device..
1214 */
1215 if (!dma_dev)
1216 dma_dev = &pdev->dev;
1217
Jeffy Chen4d88a8a2018-03-23 15:38:12 +08001218 bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1219
Jeffy Chen0f181d32018-03-23 15:38:13 +08001220 pm_runtime_enable(dev);
1221
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001222 return 0;
1223err_remove_sysfs:
1224 iommu_device_sysfs_remove(&iommu->iommu);
Jeffy Chen57c26952018-03-23 15:38:14 +08001225err_put_group:
1226 iommu_group_put(iommu->group);
Tomasz Figaf2e3a5f2018-03-23 15:38:08 +08001227err_unprepare_clocks:
1228 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
Joerg Roedelc9d9f232017-03-31 16:26:03 +02001229 return err;
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001230}
1231
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001232static void rk_iommu_shutdown(struct platform_device *pdev)
1233{
Jeffy Chen0f181d32018-03-23 15:38:13 +08001234 pm_runtime_force_suspend(&pdev->dev);
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001235}
1236
Jeffy Chen0f181d32018-03-23 15:38:13 +08001237static int __maybe_unused rk_iommu_suspend(struct device *dev)
1238{
1239 struct rk_iommu *iommu = dev_get_drvdata(dev);
1240
1241 if (!iommu->domain)
1242 return 0;
1243
1244 rk_iommu_disable(iommu);
1245 return 0;
1246}
1247
1248static int __maybe_unused rk_iommu_resume(struct device *dev)
1249{
1250 struct rk_iommu *iommu = dev_get_drvdata(dev);
1251
1252 if (!iommu->domain)
1253 return 0;
1254
1255 return rk_iommu_enable(iommu);
1256}
1257
1258static const struct dev_pm_ops rk_iommu_pm_ops = {
1259 SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
1260 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1261 pm_runtime_force_resume)
1262};
1263
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001264static const struct of_device_id rk_iommu_dt_ids[] = {
1265 { .compatible = "rockchip,iommu" },
1266 { /* sentinel */ }
1267};
1268MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001269
1270static struct platform_driver rk_iommu_driver = {
1271 .probe = rk_iommu_probe,
Marc Zyngier1a4e90f2018-02-20 20:25:04 +00001272 .shutdown = rk_iommu_shutdown,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001273 .driver = {
1274 .name = "rk_iommu",
Arnd Bergmannd9e7eb12015-04-10 23:58:24 +02001275 .of_match_table = rk_iommu_dt_ids,
Jeffy Chen0f181d32018-03-23 15:38:13 +08001276 .pm = &rk_iommu_pm_ops,
Jeffy Chen98b72b92018-03-23 15:38:01 +08001277 .suppress_bind_attrs = true,
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001278 },
1279};
1280
1281static int __init rk_iommu_init(void)
1282{
Jeffy Chen9176a302018-03-23 15:38:10 +08001283 return platform_driver_register(&rk_iommu_driver);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001284}
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001285subsys_initcall(rk_iommu_init);
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001286
Jeffy Chen5fd577c2018-03-23 15:38:11 +08001287IOMMU_OF_DECLARE(rk_iommu_of, "rockchip,iommu");
1288
Daniel Kurtzc68a2922014-11-03 10:53:27 +08001289MODULE_DESCRIPTION("IOMMU API for Rockchip");
1290MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1291MODULE_ALIAS("platform:rockchip-iommu");
1292MODULE_LICENSE("GPL v2");