blob: ba1271f519485d67f936ad0e130aaccf39141966 [file] [log] [blame]
KyongHo Cho2a965362012-05-12 05:56:09 +09001/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/pm_runtime.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/mm.h>
23#include <linux/iommu.h>
24#include <linux/errno.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/export.h>
28
29#include <asm/cacheflush.h>
30#include <asm/pgtable.h>
31
Cho KyongHod09d78f2014-05-12 11:44:58 +053032typedef u32 sysmmu_iova_t;
33typedef u32 sysmmu_pte_t;
34
Sachin Kamatf171aba2014-08-04 10:06:28 +053035/* We do not consider super section mapping (16MB) */
KyongHo Cho2a965362012-05-12 05:56:09 +090036#define SECT_ORDER 20
37#define LPAGE_ORDER 16
38#define SPAGE_ORDER 12
39
40#define SECT_SIZE (1 << SECT_ORDER)
41#define LPAGE_SIZE (1 << LPAGE_ORDER)
42#define SPAGE_SIZE (1 << SPAGE_ORDER)
43
44#define SECT_MASK (~(SECT_SIZE - 1))
45#define LPAGE_MASK (~(LPAGE_SIZE - 1))
46#define SPAGE_MASK (~(SPAGE_SIZE - 1))
47
Cho KyongHo66a7ed82014-05-12 11:45:04 +053048#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
49 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
50#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
51#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
52#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
53 ((*(sent) & 3) == 1))
KyongHo Cho2a965362012-05-12 05:56:09 +090054#define lv1ent_section(sent) ((*(sent) & 3) == 2)
55
56#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
57#define lv2ent_small(pent) ((*(pent) & 2) == 2)
58#define lv2ent_large(pent) ((*(pent) & 3) == 1)
59
Cho KyongHod09d78f2014-05-12 11:44:58 +053060static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
61{
62 return iova & (size - 1);
63}
KyongHo Cho2a965362012-05-12 05:56:09 +090064
Cho KyongHod09d78f2014-05-12 11:44:58 +053065#define section_phys(sent) (*(sent) & SECT_MASK)
66#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
67#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
68#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
69#define spage_phys(pent) (*(pent) & SPAGE_MASK)
70#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090071
72#define NUM_LV1ENTRIES 4096
Cho KyongHod09d78f2014-05-12 11:44:58 +053073#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090074
Cho KyongHod09d78f2014-05-12 11:44:58 +053075static u32 lv1ent_offset(sysmmu_iova_t iova)
76{
77 return iova >> SECT_ORDER;
78}
79
80static u32 lv2ent_offset(sysmmu_iova_t iova)
81{
82 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
83}
84
85#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
KyongHo Cho2a965362012-05-12 05:56:09 +090086
87#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
88
89#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
90
91#define mk_lv1ent_sect(pa) ((pa) | 2)
92#define mk_lv1ent_page(pa) ((pa) | 1)
93#define mk_lv2ent_lpage(pa) ((pa) | 1)
94#define mk_lv2ent_spage(pa) ((pa) | 2)
95
96#define CTRL_ENABLE 0x5
97#define CTRL_BLOCK 0x7
98#define CTRL_DISABLE 0x0
99
Cho KyongHoeeb51842014-05-12 11:45:03 +0530100#define CFG_LRU 0x1
101#define CFG_QOS(n) ((n & 0xF) << 7)
102#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
103#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
104#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
105#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
106
KyongHo Cho2a965362012-05-12 05:56:09 +0900107#define REG_MMU_CTRL 0x000
108#define REG_MMU_CFG 0x004
109#define REG_MMU_STATUS 0x008
110#define REG_MMU_FLUSH 0x00C
111#define REG_MMU_FLUSH_ENTRY 0x010
112#define REG_PT_BASE_ADDR 0x014
113#define REG_INT_STATUS 0x018
114#define REG_INT_CLEAR 0x01C
115
116#define REG_PAGE_FAULT_ADDR 0x024
117#define REG_AW_FAULT_ADDR 0x028
118#define REG_AR_FAULT_ADDR 0x02C
119#define REG_DEFAULT_SLAVE_ADDR 0x030
120
121#define REG_MMU_VERSION 0x034
122
Cho KyongHoeeb51842014-05-12 11:45:03 +0530123#define MMU_MAJ_VER(val) ((val) >> 7)
124#define MMU_MIN_VER(val) ((val) & 0x7F)
125#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
126
127#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
128
KyongHo Cho2a965362012-05-12 05:56:09 +0900129#define REG_PB0_SADDR 0x04C
130#define REG_PB0_EADDR 0x050
131#define REG_PB1_SADDR 0x054
132#define REG_PB1_EADDR 0x058
133
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530134#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
135
Cho KyongHo734c3c72014-05-12 11:44:48 +0530136static struct kmem_cache *lv2table_kmem_cache;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530137static sysmmu_pte_t *zero_lv2_table;
138#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530139
Cho KyongHod09d78f2014-05-12 11:44:58 +0530140static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900141{
142 return pgtable + lv1ent_offset(iova);
143}
144
Cho KyongHod09d78f2014-05-12 11:44:58 +0530145static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900146{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530147 return (sysmmu_pte_t *)phys_to_virt(
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530148 lv2table_base(sent)) + lv2ent_offset(iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900149}
150
151enum exynos_sysmmu_inttype {
152 SYSMMU_PAGEFAULT,
153 SYSMMU_AR_MULTIHIT,
154 SYSMMU_AW_MULTIHIT,
155 SYSMMU_BUSERROR,
156 SYSMMU_AR_SECURITY,
157 SYSMMU_AR_ACCESS,
158 SYSMMU_AW_SECURITY,
159 SYSMMU_AW_PROTECTION, /* 7 */
160 SYSMMU_FAULT_UNKNOWN,
161 SYSMMU_FAULTS_NUM
162};
163
KyongHo Cho2a965362012-05-12 05:56:09 +0900164static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
165 REG_PAGE_FAULT_ADDR,
166 REG_AR_FAULT_ADDR,
167 REG_AW_FAULT_ADDR,
168 REG_DEFAULT_SLAVE_ADDR,
169 REG_AR_FAULT_ADDR,
170 REG_AR_FAULT_ADDR,
171 REG_AW_FAULT_ADDR,
172 REG_AW_FAULT_ADDR
173};
174
175static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
176 "PAGE FAULT",
177 "AR MULTI-HIT FAULT",
178 "AW MULTI-HIT FAULT",
179 "BUS ERROR",
180 "AR SECURITY PROTECTION FAULT",
181 "AR ACCESS PROTECTION FAULT",
182 "AW SECURITY PROTECTION FAULT",
183 "AW ACCESS PROTECTION FAULT",
184 "UNKNOWN FAULT"
185};
186
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530187/* attached to dev.archdata.iommu of the master device */
188struct exynos_iommu_owner {
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530189 struct device *sysmmu;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530190};
191
KyongHo Cho2a965362012-05-12 05:56:09 +0900192struct exynos_iommu_domain {
193 struct list_head clients; /* list of sysmmu_drvdata.node */
Cho KyongHod09d78f2014-05-12 11:44:58 +0530194 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
KyongHo Cho2a965362012-05-12 05:56:09 +0900195 short *lv2entcnt; /* free lv2 entry counter for each section */
196 spinlock_t lock; /* lock for this structure */
197 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100198 struct iommu_domain domain; /* generic domain data structure */
KyongHo Cho2a965362012-05-12 05:56:09 +0900199};
200
201struct sysmmu_drvdata {
KyongHo Cho2a965362012-05-12 05:56:09 +0900202 struct device *sysmmu; /* System MMU's device descriptor */
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530203 struct device *master; /* Owner of system MMU */
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530204 void __iomem *sfrbase;
205 struct clk *clk;
Cho KyongHo70605872014-05-12 11:44:55 +0530206 struct clk *clk_master;
KyongHo Cho2a965362012-05-12 05:56:09 +0900207 int activations;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530208 spinlock_t lock;
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200209 struct exynos_iommu_domain *domain;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200210 struct list_head domain_node;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530211 phys_addr_t pgtable;
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200212 unsigned int version;
KyongHo Cho2a965362012-05-12 05:56:09 +0900213};
214
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100215static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
216{
217 return container_of(dom, struct exynos_iommu_domain, domain);
218}
219
KyongHo Cho2a965362012-05-12 05:56:09 +0900220static bool set_sysmmu_active(struct sysmmu_drvdata *data)
221{
222 /* return true if the System MMU was not active previously
223 and it needs to be initialized */
224 return ++data->activations == 1;
225}
226
227static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
228{
229 /* return true if the System MMU is needed to be disabled */
230 BUG_ON(data->activations < 1);
231 return --data->activations == 0;
232}
233
234static bool is_sysmmu_active(struct sysmmu_drvdata *data)
235{
236 return data->activations > 0;
237}
238
239static void sysmmu_unblock(void __iomem *sfrbase)
240{
241 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
242}
243
244static bool sysmmu_block(void __iomem *sfrbase)
245{
246 int i = 120;
247
248 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
249 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
250 --i;
251
252 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
253 sysmmu_unblock(sfrbase);
254 return false;
255 }
256
257 return true;
258}
259
260static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
261{
262 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
263}
264
265static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530266 sysmmu_iova_t iova, unsigned int num_inv)
KyongHo Cho2a965362012-05-12 05:56:09 +0900267{
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530268 unsigned int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530269
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530270 for (i = 0; i < num_inv; i++) {
271 __raw_writel((iova & SPAGE_MASK) | 1,
272 sfrbase + REG_MMU_FLUSH_ENTRY);
273 iova += SPAGE_SIZE;
274 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900275}
276
277static void __sysmmu_set_ptbase(void __iomem *sfrbase,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530278 phys_addr_t pgd)
KyongHo Cho2a965362012-05-12 05:56:09 +0900279{
KyongHo Cho2a965362012-05-12 05:56:09 +0900280 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
281
282 __sysmmu_tlb_invalidate(sfrbase);
283}
284
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530285static void show_fault_information(const char *name,
286 enum exynos_sysmmu_inttype itype,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530287 phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
KyongHo Cho2a965362012-05-12 05:56:09 +0900288{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530289 sysmmu_pte_t *ent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900290
291 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
292 itype = SYSMMU_FAULT_UNKNOWN;
293
Cho KyongHod09d78f2014-05-12 11:44:58 +0530294 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530295 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
KyongHo Cho2a965362012-05-12 05:56:09 +0900296
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530297 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530298 pr_err("\tLv1 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900299
300 if (lv1ent_page(ent)) {
301 ent = page_entry(ent, fault_addr);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530302 pr_err("\t Lv2 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900303 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900304}
305
306static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
307{
Sachin Kamatf171aba2014-08-04 10:06:28 +0530308 /* SYSMMU is in blocked state when interrupt occurred. */
KyongHo Cho2a965362012-05-12 05:56:09 +0900309 struct sysmmu_drvdata *data = dev_id;
KyongHo Cho2a965362012-05-12 05:56:09 +0900310 enum exynos_sysmmu_inttype itype;
Cho KyongHod09d78f2014-05-12 11:44:58 +0530311 sysmmu_iova_t addr = -1;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530312 int ret = -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900313
KyongHo Cho2a965362012-05-12 05:56:09 +0900314 WARN_ON(!is_sysmmu_active(data));
315
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530316 spin_lock(&data->lock);
317
Cho KyongHo70605872014-05-12 11:44:55 +0530318 if (!IS_ERR(data->clk_master))
319 clk_enable(data->clk_master);
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530320
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530321 itype = (enum exynos_sysmmu_inttype)
322 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
323 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
KyongHo Cho2a965362012-05-12 05:56:09 +0900324 itype = SYSMMU_FAULT_UNKNOWN;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530325 else
326 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900327
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530328 if (itype == SYSMMU_FAULT_UNKNOWN) {
329 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
330 __func__, dev_name(data->sysmmu));
331 pr_err("%s: Please check if IRQ is correctly configured.\n",
332 __func__);
333 BUG();
334 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530335 unsigned int base =
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530336 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
337 show_fault_information(dev_name(data->sysmmu),
338 itype, base, addr);
339 if (data->domain)
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200340 ret = report_iommu_fault(&data->domain->domain,
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530341 data->master, addr, itype);
KyongHo Cho2a965362012-05-12 05:56:09 +0900342 }
343
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530344 /* fault is not recovered by fault handler */
345 BUG_ON(ret != 0);
KyongHo Cho2a965362012-05-12 05:56:09 +0900346
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530347 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
348
349 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900350
Cho KyongHo70605872014-05-12 11:44:55 +0530351 if (!IS_ERR(data->clk_master))
352 clk_disable(data->clk_master);
353
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530354 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900355
356 return IRQ_HANDLED;
357}
358
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530359static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900360{
Cho KyongHo70605872014-05-12 11:44:55 +0530361 if (!IS_ERR(data->clk_master))
362 clk_enable(data->clk_master);
363
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530364 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530365 __raw_writel(0, data->sfrbase + REG_MMU_CFG);
KyongHo Cho2a965362012-05-12 05:56:09 +0900366
Cho KyongHo46c16d12014-05-12 11:44:54 +0530367 clk_disable(data->clk);
Cho KyongHo70605872014-05-12 11:44:55 +0530368 if (!IS_ERR(data->clk_master))
369 clk_disable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530370}
KyongHo Cho2a965362012-05-12 05:56:09 +0900371
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530372static bool __sysmmu_disable(struct sysmmu_drvdata *data)
373{
374 bool disabled;
375 unsigned long flags;
376
377 spin_lock_irqsave(&data->lock, flags);
378
379 disabled = set_sysmmu_inactive(data);
380
381 if (disabled) {
382 data->pgtable = 0;
383 data->domain = NULL;
384
385 __sysmmu_disable_nocount(data);
386
387 dev_dbg(data->sysmmu, "Disabled\n");
388 } else {
389 dev_dbg(data->sysmmu, "%d times left to disable\n",
390 data->activations);
391 }
392
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530393 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900394
KyongHo Cho2a965362012-05-12 05:56:09 +0900395 return disabled;
396}
397
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530398static void __sysmmu_init_config(struct sysmmu_drvdata *data)
399{
Cho KyongHoeeb51842014-05-12 11:45:03 +0530400 unsigned int cfg = CFG_LRU | CFG_QOS(15);
401 unsigned int ver;
402
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200403 ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
Cho KyongHoeeb51842014-05-12 11:45:03 +0530404 if (MMU_MAJ_VER(ver) == 3) {
405 if (MMU_MIN_VER(ver) >= 2) {
406 cfg |= CFG_FLPDCACHE;
407 if (MMU_MIN_VER(ver) == 3) {
408 cfg |= CFG_ACGEN;
409 cfg &= ~CFG_LRU;
410 } else {
411 cfg |= CFG_SYSSEL;
412 }
413 }
414 }
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530415
416 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200417 data->version = ver;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530418}
419
420static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
421{
422 if (!IS_ERR(data->clk_master))
423 clk_enable(data->clk_master);
424 clk_enable(data->clk);
425
426 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
427
428 __sysmmu_init_config(data);
429
430 __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
431
432 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
433
434 if (!IS_ERR(data->clk_master))
435 clk_disable(data->clk_master);
436}
437
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200438static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200439 struct exynos_iommu_domain *domain)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530440{
441 int ret = 0;
442 unsigned long flags;
443
444 spin_lock_irqsave(&data->lock, flags);
445 if (set_sysmmu_active(data)) {
446 data->pgtable = pgtable;
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200447 data->domain = domain;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530448
449 __sysmmu_enable_nocount(data);
450
451 dev_dbg(data->sysmmu, "Enabled\n");
452 } else {
453 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
454
455 dev_dbg(data->sysmmu, "already enabled\n");
456 }
457
458 if (WARN_ON(ret < 0))
459 set_sysmmu_inactive(data); /* decrement count */
460
461 spin_unlock_irqrestore(&data->lock, flags);
462
463 return ret;
464}
465
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530466static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
467 sysmmu_iova_t iova)
468{
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200469 if (data->version == MAKE_MMU_VER(3, 3))
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530470 __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
471}
472
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200473static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530474 sysmmu_iova_t iova)
475{
476 unsigned long flags;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530477
478 if (!IS_ERR(data->clk_master))
479 clk_enable(data->clk_master);
480
481 spin_lock_irqsave(&data->lock, flags);
482 if (is_sysmmu_active(data))
483 __sysmmu_tlb_invalidate_flpdcache(data, iova);
484 spin_unlock_irqrestore(&data->lock, flags);
485
486 if (!IS_ERR(data->clk_master))
487 clk_disable(data->clk_master);
488}
489
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200490static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
491 sysmmu_iova_t iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900492{
493 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900494
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530495 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900496 if (is_sysmmu_active(data)) {
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530497 unsigned int num_inv = 1;
Cho KyongHo70605872014-05-12 11:44:55 +0530498
499 if (!IS_ERR(data->clk_master))
500 clk_enable(data->clk_master);
501
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530502 /*
503 * L2TLB invalidation required
504 * 4KB page: 1 invalidation
Sachin Kamatf171aba2014-08-04 10:06:28 +0530505 * 64KB page: 16 invalidations
506 * 1MB page: 64 invalidations
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530507 * because it is set-associative TLB
508 * with 8-way and 64 sets.
509 * 1MB page can be cached in one of all sets.
510 * 64KB page can be one of 16 consecutive sets.
511 */
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200512 if (MMU_MAJ_VER(data->version) == 2)
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530513 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
514
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530515 if (sysmmu_block(data->sfrbase)) {
516 __sysmmu_tlb_invalidate_entry(
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530517 data->sfrbase, iova, num_inv);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530518 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900519 }
Cho KyongHo70605872014-05-12 11:44:55 +0530520 if (!IS_ERR(data->clk_master))
521 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900522 } else {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200523 dev_dbg(data->master,
524 "disabled. Skipping TLB invalidation @ %#x\n", iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900525 }
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530526 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900527}
528
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530529static int __init exynos_sysmmu_probe(struct platform_device *pdev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900530{
Cho KyongHo46c16d12014-05-12 11:44:54 +0530531 int irq, ret;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530532 struct device *dev = &pdev->dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900533 struct sysmmu_drvdata *data;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530534 struct resource *res;
KyongHo Cho2a965362012-05-12 05:56:09 +0900535
Cho KyongHo46c16d12014-05-12 11:44:54 +0530536 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
537 if (!data)
538 return -ENOMEM;
KyongHo Cho2a965362012-05-12 05:56:09 +0900539
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530540 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Cho KyongHo46c16d12014-05-12 11:44:54 +0530541 data->sfrbase = devm_ioremap_resource(dev, res);
542 if (IS_ERR(data->sfrbase))
543 return PTR_ERR(data->sfrbase);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530544
Cho KyongHo46c16d12014-05-12 11:44:54 +0530545 irq = platform_get_irq(pdev, 0);
546 if (irq <= 0) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530547 dev_err(dev, "Unable to find IRQ resource\n");
Cho KyongHo46c16d12014-05-12 11:44:54 +0530548 return irq;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530549 }
550
Cho KyongHo46c16d12014-05-12 11:44:54 +0530551 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530552 dev_name(dev), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900553 if (ret) {
Cho KyongHo46c16d12014-05-12 11:44:54 +0530554 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
555 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900556 }
557
Cho KyongHo46c16d12014-05-12 11:44:54 +0530558 data->clk = devm_clk_get(dev, "sysmmu");
559 if (IS_ERR(data->clk)) {
560 dev_err(dev, "Failed to get clock!\n");
561 return PTR_ERR(data->clk);
562 } else {
563 ret = clk_prepare(data->clk);
564 if (ret) {
565 dev_err(dev, "Failed to prepare clk\n");
566 return ret;
567 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900568 }
569
Cho KyongHo70605872014-05-12 11:44:55 +0530570 data->clk_master = devm_clk_get(dev, "master");
571 if (!IS_ERR(data->clk_master)) {
572 ret = clk_prepare(data->clk_master);
573 if (ret) {
574 clk_unprepare(data->clk);
575 dev_err(dev, "Failed to prepare master's clk\n");
576 return ret;
577 }
578 }
579
KyongHo Cho2a965362012-05-12 05:56:09 +0900580 data->sysmmu = dev;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530581 spin_lock_init(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900582
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530583 platform_set_drvdata(pdev, data);
584
Cho KyongHof4723ec2014-05-12 11:44:52 +0530585 pm_runtime_enable(dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900586
KyongHo Cho2a965362012-05-12 05:56:09 +0900587 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900588}
589
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530590static const struct of_device_id sysmmu_of_match[] __initconst = {
591 { .compatible = "samsung,exynos-sysmmu", },
592 { },
593};
594
595static struct platform_driver exynos_sysmmu_driver __refdata = {
596 .probe = exynos_sysmmu_probe,
597 .driver = {
KyongHo Cho2a965362012-05-12 05:56:09 +0900598 .name = "exynos-sysmmu",
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530599 .of_match_table = sysmmu_of_match,
KyongHo Cho2a965362012-05-12 05:56:09 +0900600 }
601};
602
603static inline void pgtable_flush(void *vastart, void *vaend)
604{
605 dmac_flush_range(vastart, vaend);
606 outer_flush_range(virt_to_phys(vastart),
607 virt_to_phys(vaend));
608}
609
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100610static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
KyongHo Cho2a965362012-05-12 05:56:09 +0900611{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200612 struct exynos_iommu_domain *domain;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530613 int i;
KyongHo Cho2a965362012-05-12 05:56:09 +0900614
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100615 if (type != IOMMU_DOMAIN_UNMANAGED)
616 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900617
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200618 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
619 if (!domain)
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100620 return NULL;
621
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200622 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
623 if (!domain->pgtable)
KyongHo Cho2a965362012-05-12 05:56:09 +0900624 goto err_pgtable;
625
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200626 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
627 if (!domain->lv2entcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900628 goto err_counter;
629
Sachin Kamatf171aba2014-08-04 10:06:28 +0530630 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530631 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200632 domain->pgtable[i + 0] = ZERO_LV2LINK;
633 domain->pgtable[i + 1] = ZERO_LV2LINK;
634 domain->pgtable[i + 2] = ZERO_LV2LINK;
635 domain->pgtable[i + 3] = ZERO_LV2LINK;
636 domain->pgtable[i + 4] = ZERO_LV2LINK;
637 domain->pgtable[i + 5] = ZERO_LV2LINK;
638 domain->pgtable[i + 6] = ZERO_LV2LINK;
639 domain->pgtable[i + 7] = ZERO_LV2LINK;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530640 }
641
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200642 pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES);
KyongHo Cho2a965362012-05-12 05:56:09 +0900643
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200644 spin_lock_init(&domain->lock);
645 spin_lock_init(&domain->pgtablelock);
646 INIT_LIST_HEAD(&domain->clients);
KyongHo Cho2a965362012-05-12 05:56:09 +0900647
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200648 domain->domain.geometry.aperture_start = 0;
649 domain->domain.geometry.aperture_end = ~0UL;
650 domain->domain.geometry.force_aperture = true;
Joerg Roedel3177bb72012-07-11 12:41:10 +0200651
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200652 return &domain->domain;
KyongHo Cho2a965362012-05-12 05:56:09 +0900653
654err_counter:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200655 free_pages((unsigned long)domain->pgtable, 2);
KyongHo Cho2a965362012-05-12 05:56:09 +0900656err_pgtable:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200657 kfree(domain);
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100658 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900659}
660
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200661static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
KyongHo Cho2a965362012-05-12 05:56:09 +0900662{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200663 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200664 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900665 unsigned long flags;
666 int i;
667
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200668 WARN_ON(!list_empty(&domain->clients));
KyongHo Cho2a965362012-05-12 05:56:09 +0900669
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200670 spin_lock_irqsave(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900671
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200672 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200673 if (__sysmmu_disable(data))
674 data->master = NULL;
675 list_del_init(&data->domain_node);
KyongHo Cho2a965362012-05-12 05:56:09 +0900676 }
677
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200678 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900679
680 for (i = 0; i < NUM_LV1ENTRIES; i++)
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200681 if (lv1ent_page(domain->pgtable + i))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530682 kmem_cache_free(lv2table_kmem_cache,
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200683 phys_to_virt(lv2table_base(domain->pgtable + i)));
KyongHo Cho2a965362012-05-12 05:56:09 +0900684
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200685 free_pages((unsigned long)domain->pgtable, 2);
686 free_pages((unsigned long)domain->lv2entcnt, 1);
687 kfree(domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900688}
689
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200690static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900691 struct device *dev)
692{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530693 struct exynos_iommu_owner *owner = dev->archdata.iommu;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200694 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200695 struct sysmmu_drvdata *data;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200696 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900697 unsigned long flags;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200698 int ret = -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900699
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200700 if (!has_sysmmu(dev))
701 return -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900702
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200703 data = dev_get_drvdata(owner->sysmmu);
704 if (data) {
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200705 ret = __sysmmu_enable(data, pagetable, domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200706 if (ret >= 0) {
707 data->master = dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900708
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200709 spin_lock_irqsave(&domain->lock, flags);
710 list_add_tail(&data->domain_node, &domain->clients);
711 spin_unlock_irqrestore(&domain->lock, flags);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200712 }
713 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900714
715 if (ret < 0) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530716 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
717 __func__, &pagetable);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530718 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900719 }
720
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530721 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
722 __func__, &pagetable, (ret == 0) ? "" : ", again");
723
KyongHo Cho2a965362012-05-12 05:56:09 +0900724 return ret;
725}
726
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200727static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900728 struct device *dev)
729{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200730 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
731 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200732 struct sysmmu_drvdata *data;
KyongHo Cho2a965362012-05-12 05:56:09 +0900733 unsigned long flags;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200734 bool found = false;
735
736 if (!has_sysmmu(dev))
737 return;
KyongHo Cho2a965362012-05-12 05:56:09 +0900738
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200739 spin_lock_irqsave(&domain->lock, flags);
740 list_for_each_entry(data, &domain->clients, domain_node) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200741 if (data->master == dev) {
742 if (__sysmmu_disable(data)) {
743 data->master = NULL;
744 list_del_init(&data->domain_node);
745 }
746 found = true;
KyongHo Cho2a965362012-05-12 05:56:09 +0900747 break;
748 }
749 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200750 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900751
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200752 if (found)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530753 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
754 __func__, &pagetable);
755 else
756 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
KyongHo Cho2a965362012-05-12 05:56:09 +0900757}
758
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200759static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530760 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
KyongHo Cho2a965362012-05-12 05:56:09 +0900761{
Cho KyongHo61128f02014-05-12 11:44:47 +0530762 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530763 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
Cho KyongHo61128f02014-05-12 11:44:47 +0530764 return ERR_PTR(-EADDRINUSE);
765 }
766
KyongHo Cho2a965362012-05-12 05:56:09 +0900767 if (lv1ent_fault(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530768 sysmmu_pte_t *pent;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530769 bool need_flush_flpd_cache = lv1ent_zero(sent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900770
Cho KyongHo734c3c72014-05-12 11:44:48 +0530771 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530772 BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
KyongHo Cho2a965362012-05-12 05:56:09 +0900773 if (!pent)
Cho KyongHo61128f02014-05-12 11:44:47 +0530774 return ERR_PTR(-ENOMEM);
KyongHo Cho2a965362012-05-12 05:56:09 +0900775
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530776 *sent = mk_lv1ent_page(virt_to_phys(pent));
Colin Crossdc3814f2015-05-08 17:05:44 -0700777 kmemleak_ignore(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900778 *pgcounter = NUM_LV2ENTRIES;
779 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
780 pgtable_flush(sent, sent + 1);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530781
782 /*
Sachin Kamatf171aba2014-08-04 10:06:28 +0530783 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
784 * FLPD cache may cache the address of zero_l2_table. This
785 * function replaces the zero_l2_table with new L2 page table
786 * to write valid mappings.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530787 * Accessing the valid area may cause page fault since FLPD
Sachin Kamatf171aba2014-08-04 10:06:28 +0530788 * cache may still cache zero_l2_table for the valid area
789 * instead of new L2 page table that has the mapping
790 * information of the valid area.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530791 * Thus any replacement of zero_l2_table with other valid L2
792 * page table must involve FLPD cache invalidation for System
793 * MMU v3.3.
794 * FLPD cache invalidation is performed with TLB invalidation
795 * by VPN without blocking. It is safe to invalidate TLB without
796 * blocking because the target address of TLB invalidation is
797 * not currently mapped.
798 */
799 if (need_flush_flpd_cache) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200800 struct sysmmu_drvdata *data;
Sachin Kamat365409d2014-05-22 09:50:56 +0530801
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200802 spin_lock(&domain->lock);
803 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200804 sysmmu_tlb_invalidate_flpdcache(data, iova);
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200805 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530806 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900807 }
808
809 return page_entry(sent, iova);
810}
811
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200812static int lv1set_section(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530813 sysmmu_pte_t *sent, sysmmu_iova_t iova,
Cho KyongHo61128f02014-05-12 11:44:47 +0530814 phys_addr_t paddr, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900815{
Cho KyongHo61128f02014-05-12 11:44:47 +0530816 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530817 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530818 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900819 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530820 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900821
822 if (lv1ent_page(sent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530823 if (*pgcnt != NUM_LV2ENTRIES) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530824 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530825 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900826 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530827 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900828
Cho KyongHo734c3c72014-05-12 11:44:48 +0530829 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
KyongHo Cho2a965362012-05-12 05:56:09 +0900830 *pgcnt = 0;
831 }
832
833 *sent = mk_lv1ent_sect(paddr);
834
835 pgtable_flush(sent, sent + 1);
836
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200837 spin_lock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530838 if (lv1ent_page_zero(sent)) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200839 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530840 /*
841 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
842 * entry by speculative prefetch of SLPD which has no mapping.
843 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200844 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200845 sysmmu_tlb_invalidate_flpdcache(data, iova);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530846 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200847 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530848
KyongHo Cho2a965362012-05-12 05:56:09 +0900849 return 0;
850}
851
Cho KyongHod09d78f2014-05-12 11:44:58 +0530852static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
KyongHo Cho2a965362012-05-12 05:56:09 +0900853 short *pgcnt)
854{
855 if (size == SPAGE_SIZE) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530856 if (WARN_ON(!lv2ent_fault(pent)))
KyongHo Cho2a965362012-05-12 05:56:09 +0900857 return -EADDRINUSE;
858
859 *pent = mk_lv2ent_spage(paddr);
860 pgtable_flush(pent, pent + 1);
861 *pgcnt -= 1;
862 } else { /* size == LPAGE_SIZE */
863 int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530864
KyongHo Cho2a965362012-05-12 05:56:09 +0900865 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530866 if (WARN_ON(!lv2ent_fault(pent))) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530867 if (i > 0)
868 memset(pent - i, 0, sizeof(*pent) * i);
KyongHo Cho2a965362012-05-12 05:56:09 +0900869 return -EADDRINUSE;
870 }
871
872 *pent = mk_lv2ent_lpage(paddr);
873 }
874 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
875 *pgcnt -= SPAGES_PER_LPAGE;
876 }
877
878 return 0;
879}
880
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530881/*
882 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
883 *
Sachin Kamatf171aba2014-08-04 10:06:28 +0530884 * System MMU v3.x has advanced logic to improve address translation
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530885 * performance with caching more page table entries by a page table walk.
Sachin Kamatf171aba2014-08-04 10:06:28 +0530886 * However, the logic has a bug that while caching faulty page table entries,
887 * System MMU reports page fault if the cached fault entry is hit even though
888 * the fault entry is updated to a valid entry after the entry is cached.
889 * To prevent caching faulty page table entries which may be updated to valid
890 * entries later, the virtual memory manager should care about the workaround
891 * for the problem. The following describes the workaround.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530892 *
893 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
Sachin Kamatf171aba2014-08-04 10:06:28 +0530894 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530895 *
Sachin Kamatf171aba2014-08-04 10:06:28 +0530896 * Precisely, any start address of I/O virtual region must be aligned with
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530897 * the following sizes for System MMU v3.1 and v3.2.
898 * System MMU v3.1: 128KiB
899 * System MMU v3.2: 256KiB
900 *
901 * Because System MMU v3.3 caches page table entries more aggressively, it needs
Sachin Kamatf171aba2014-08-04 10:06:28 +0530902 * more workarounds.
903 * - Any two consecutive I/O virtual regions must have a hole of size larger
904 * than or equal to 128KiB.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530905 * - Start address of an I/O virtual region must be aligned by 128KiB.
906 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200907static int exynos_iommu_map(struct iommu_domain *iommu_domain,
908 unsigned long l_iova, phys_addr_t paddr, size_t size,
909 int prot)
KyongHo Cho2a965362012-05-12 05:56:09 +0900910{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200911 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530912 sysmmu_pte_t *entry;
913 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
KyongHo Cho2a965362012-05-12 05:56:09 +0900914 unsigned long flags;
915 int ret = -ENOMEM;
916
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200917 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900918
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200919 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900920
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200921 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900922
923 if (size == SECT_SIZE) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200924 ret = lv1set_section(domain, entry, iova, paddr,
925 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900926 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530927 sysmmu_pte_t *pent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900928
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200929 pent = alloc_lv2entry(domain, entry, iova,
930 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900931
Cho KyongHo61128f02014-05-12 11:44:47 +0530932 if (IS_ERR(pent))
933 ret = PTR_ERR(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900934 else
935 ret = lv2set_page(pent, paddr, size,
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200936 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900937 }
938
Cho KyongHo61128f02014-05-12 11:44:47 +0530939 if (ret)
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530940 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
941 __func__, ret, size, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900942
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200943 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900944
945 return ret;
946}
947
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200948static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
949 sysmmu_iova_t iova, size_t size)
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530950{
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200951 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530952 unsigned long flags;
953
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200954 spin_lock_irqsave(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530955
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200956 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200957 sysmmu_tlb_invalidate_entry(data, iova, size);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530958
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200959 spin_unlock_irqrestore(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530960}
961
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200962static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
963 unsigned long l_iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900964{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200965 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530966 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
967 sysmmu_pte_t *ent;
Cho KyongHo61128f02014-05-12 11:44:47 +0530968 size_t err_pgsize;
Cho KyongHod09d78f2014-05-12 11:44:58 +0530969 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900970
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200971 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900972
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200973 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900974
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200975 ent = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900976
977 if (lv1ent_section(ent)) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530978 if (WARN_ON(size < SECT_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530979 err_pgsize = SECT_SIZE;
980 goto err;
981 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900982
Sachin Kamatf171aba2014-08-04 10:06:28 +0530983 /* workaround for h/w bug in System MMU v3.3 */
984 *ent = ZERO_LV2LINK;
KyongHo Cho2a965362012-05-12 05:56:09 +0900985 pgtable_flush(ent, ent + 1);
986 size = SECT_SIZE;
987 goto done;
988 }
989
990 if (unlikely(lv1ent_fault(ent))) {
991 if (size > SECT_SIZE)
992 size = SECT_SIZE;
993 goto done;
994 }
995
996 /* lv1ent_page(sent) == true here */
997
998 ent = page_entry(ent, iova);
999
1000 if (unlikely(lv2ent_fault(ent))) {
1001 size = SPAGE_SIZE;
1002 goto done;
1003 }
1004
1005 if (lv2ent_small(ent)) {
1006 *ent = 0;
1007 size = SPAGE_SIZE;
Cho KyongHo6cb47ed2014-05-12 11:44:51 +05301008 pgtable_flush(ent, ent + 1);
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001009 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
KyongHo Cho2a965362012-05-12 05:56:09 +09001010 goto done;
1011 }
1012
1013 /* lv1ent_large(ent) == true here */
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301014 if (WARN_ON(size < LPAGE_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301015 err_pgsize = LPAGE_SIZE;
1016 goto err;
1017 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001018
1019 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
Cho KyongHo6cb47ed2014-05-12 11:44:51 +05301020 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001021
1022 size = LPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001023 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
KyongHo Cho2a965362012-05-12 05:56:09 +09001024done:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001025 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001026
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001027 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +09001028
KyongHo Cho2a965362012-05-12 05:56:09 +09001029 return size;
Cho KyongHo61128f02014-05-12 11:44:47 +05301030err:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001031 spin_unlock_irqrestore(&domain->pgtablelock, flags);
Cho KyongHo61128f02014-05-12 11:44:47 +05301032
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301033 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1034 __func__, size, iova, err_pgsize);
Cho KyongHo61128f02014-05-12 11:44:47 +05301035
1036 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +09001037}
1038
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001039static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05301040 dma_addr_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +09001041{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001042 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301043 sysmmu_pte_t *entry;
KyongHo Cho2a965362012-05-12 05:56:09 +09001044 unsigned long flags;
1045 phys_addr_t phys = 0;
1046
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001047 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001048
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001049 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001050
1051 if (lv1ent_section(entry)) {
1052 phys = section_phys(entry) + section_offs(iova);
1053 } else if (lv1ent_page(entry)) {
1054 entry = page_entry(entry, iova);
1055
1056 if (lv2ent_large(entry))
1057 phys = lpage_phys(entry) + lpage_offs(iova);
1058 else if (lv2ent_small(entry))
1059 phys = spage_phys(entry) + spage_offs(iova);
1060 }
1061
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001062 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001063
1064 return phys;
1065}
1066
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301067static int exynos_iommu_add_device(struct device *dev)
1068{
1069 struct iommu_group *group;
1070 int ret;
1071
1072 group = iommu_group_get(dev);
1073
1074 if (!group) {
1075 group = iommu_group_alloc();
1076 if (IS_ERR(group)) {
1077 dev_err(dev, "Failed to allocate IOMMU group\n");
1078 return PTR_ERR(group);
1079 }
1080 }
1081
1082 ret = iommu_group_add_device(group, dev);
1083 iommu_group_put(group);
1084
1085 return ret;
1086}
1087
1088static void exynos_iommu_remove_device(struct device *dev)
1089{
1090 iommu_group_remove_device(dev);
1091}
1092
Thierry Redingb22f6432014-06-27 09:03:12 +02001093static const struct iommu_ops exynos_iommu_ops = {
Joerg Roedele1fd1ea2015-03-26 13:43:11 +01001094 .domain_alloc = exynos_iommu_domain_alloc,
1095 .domain_free = exynos_iommu_domain_free,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001096 .attach_dev = exynos_iommu_attach_device,
1097 .detach_dev = exynos_iommu_detach_device,
1098 .map = exynos_iommu_map,
1099 .unmap = exynos_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07001100 .map_sg = default_iommu_map_sg,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001101 .iova_to_phys = exynos_iommu_iova_to_phys,
1102 .add_device = exynos_iommu_add_device,
1103 .remove_device = exynos_iommu_remove_device,
KyongHo Cho2a965362012-05-12 05:56:09 +09001104 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1105};
1106
1107static int __init exynos_iommu_init(void)
1108{
Thierry Redinga7b67cd2015-02-06 11:44:05 +01001109 struct device_node *np;
KyongHo Cho2a965362012-05-12 05:56:09 +09001110 int ret;
1111
Thierry Redinga7b67cd2015-02-06 11:44:05 +01001112 np = of_find_matching_node(NULL, sysmmu_of_match);
1113 if (!np)
1114 return 0;
1115
1116 of_node_put(np);
1117
Cho KyongHo734c3c72014-05-12 11:44:48 +05301118 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1119 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1120 if (!lv2table_kmem_cache) {
1121 pr_err("%s: Failed to create kmem cache\n", __func__);
1122 return -ENOMEM;
1123 }
1124
KyongHo Cho2a965362012-05-12 05:56:09 +09001125 ret = platform_driver_register(&exynos_sysmmu_driver);
Cho KyongHo734c3c72014-05-12 11:44:48 +05301126 if (ret) {
1127 pr_err("%s: Failed to register driver\n", __func__);
1128 goto err_reg_driver;
1129 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001130
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301131 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1132 if (zero_lv2_table == NULL) {
1133 pr_err("%s: Failed to allocate zero level2 page table\n",
1134 __func__);
1135 ret = -ENOMEM;
1136 goto err_zero_lv2;
1137 }
1138
Cho KyongHo734c3c72014-05-12 11:44:48 +05301139 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1140 if (ret) {
1141 pr_err("%s: Failed to register exynos-iommu driver.\n",
1142 __func__);
1143 goto err_set_iommu;
1144 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001145
Cho KyongHo734c3c72014-05-12 11:44:48 +05301146 return 0;
1147err_set_iommu:
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301148 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1149err_zero_lv2:
Cho KyongHo734c3c72014-05-12 11:44:48 +05301150 platform_driver_unregister(&exynos_sysmmu_driver);
1151err_reg_driver:
1152 kmem_cache_destroy(lv2table_kmem_cache);
KyongHo Cho2a965362012-05-12 05:56:09 +09001153 return ret;
1154}
1155subsys_initcall(exynos_iommu_init);