blob: c658268478199e6a66dc61a869e9f3cff3ec8f6d [file] [log] [blame]
KyongHo Cho2a965362012-05-12 05:56:09 +09001/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
KyongHo Cho2a965362012-05-12 05:56:09 +090015#include <linux/clk.h>
16#include <linux/err.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020017#include <linux/io.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090018#include <linux/iommu.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020019#include <linux/interrupt.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090020#include <linux/list.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020021#include <linux/platform_device.h>
22#include <linux/pm_runtime.h>
23#include <linux/slab.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090024
25#include <asm/cacheflush.h>
26#include <asm/pgtable.h>
27
Cho KyongHod09d78f2014-05-12 11:44:58 +053028typedef u32 sysmmu_iova_t;
29typedef u32 sysmmu_pte_t;
30
Sachin Kamatf171aba2014-08-04 10:06:28 +053031/* We do not consider super section mapping (16MB) */
KyongHo Cho2a965362012-05-12 05:56:09 +090032#define SECT_ORDER 20
33#define LPAGE_ORDER 16
34#define SPAGE_ORDER 12
35
36#define SECT_SIZE (1 << SECT_ORDER)
37#define LPAGE_SIZE (1 << LPAGE_ORDER)
38#define SPAGE_SIZE (1 << SPAGE_ORDER)
39
40#define SECT_MASK (~(SECT_SIZE - 1))
41#define LPAGE_MASK (~(LPAGE_SIZE - 1))
42#define SPAGE_MASK (~(SPAGE_SIZE - 1))
43
Cho KyongHo66a7ed82014-05-12 11:45:04 +053044#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
45 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
46#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
47#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
48#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
49 ((*(sent) & 3) == 1))
KyongHo Cho2a965362012-05-12 05:56:09 +090050#define lv1ent_section(sent) ((*(sent) & 3) == 2)
51
52#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
53#define lv2ent_small(pent) ((*(pent) & 2) == 2)
54#define lv2ent_large(pent) ((*(pent) & 3) == 1)
55
Cho KyongHod09d78f2014-05-12 11:44:58 +053056static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
57{
58 return iova & (size - 1);
59}
KyongHo Cho2a965362012-05-12 05:56:09 +090060
Cho KyongHod09d78f2014-05-12 11:44:58 +053061#define section_phys(sent) (*(sent) & SECT_MASK)
62#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
63#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
64#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
65#define spage_phys(pent) (*(pent) & SPAGE_MASK)
66#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090067
68#define NUM_LV1ENTRIES 4096
Cho KyongHod09d78f2014-05-12 11:44:58 +053069#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090070
Cho KyongHod09d78f2014-05-12 11:44:58 +053071static u32 lv1ent_offset(sysmmu_iova_t iova)
72{
73 return iova >> SECT_ORDER;
74}
75
76static u32 lv2ent_offset(sysmmu_iova_t iova)
77{
78 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
79}
80
81#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
KyongHo Cho2a965362012-05-12 05:56:09 +090082
83#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
84
85#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
86
87#define mk_lv1ent_sect(pa) ((pa) | 2)
88#define mk_lv1ent_page(pa) ((pa) | 1)
89#define mk_lv2ent_lpage(pa) ((pa) | 1)
90#define mk_lv2ent_spage(pa) ((pa) | 2)
91
92#define CTRL_ENABLE 0x5
93#define CTRL_BLOCK 0x7
94#define CTRL_DISABLE 0x0
95
Cho KyongHoeeb51842014-05-12 11:45:03 +053096#define CFG_LRU 0x1
97#define CFG_QOS(n) ((n & 0xF) << 7)
98#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
99#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
100#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
101#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
102
KyongHo Cho2a965362012-05-12 05:56:09 +0900103#define REG_MMU_CTRL 0x000
104#define REG_MMU_CFG 0x004
105#define REG_MMU_STATUS 0x008
106#define REG_MMU_FLUSH 0x00C
107#define REG_MMU_FLUSH_ENTRY 0x010
108#define REG_PT_BASE_ADDR 0x014
109#define REG_INT_STATUS 0x018
110#define REG_INT_CLEAR 0x01C
111
112#define REG_PAGE_FAULT_ADDR 0x024
113#define REG_AW_FAULT_ADDR 0x028
114#define REG_AR_FAULT_ADDR 0x02C
115#define REG_DEFAULT_SLAVE_ADDR 0x030
116
117#define REG_MMU_VERSION 0x034
118
Cho KyongHoeeb51842014-05-12 11:45:03 +0530119#define MMU_MAJ_VER(val) ((val) >> 7)
120#define MMU_MIN_VER(val) ((val) & 0x7F)
121#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
122
123#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
124
KyongHo Cho2a965362012-05-12 05:56:09 +0900125#define REG_PB0_SADDR 0x04C
126#define REG_PB0_EADDR 0x050
127#define REG_PB1_SADDR 0x054
128#define REG_PB1_EADDR 0x058
129
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530130#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
131
Cho KyongHo734c3c72014-05-12 11:44:48 +0530132static struct kmem_cache *lv2table_kmem_cache;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530133static sysmmu_pte_t *zero_lv2_table;
134#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530135
Cho KyongHod09d78f2014-05-12 11:44:58 +0530136static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900137{
138 return pgtable + lv1ent_offset(iova);
139}
140
Cho KyongHod09d78f2014-05-12 11:44:58 +0530141static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900142{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530143 return (sysmmu_pte_t *)phys_to_virt(
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530144 lv2table_base(sent)) + lv2ent_offset(iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900145}
146
147enum exynos_sysmmu_inttype {
148 SYSMMU_PAGEFAULT,
149 SYSMMU_AR_MULTIHIT,
150 SYSMMU_AW_MULTIHIT,
151 SYSMMU_BUSERROR,
152 SYSMMU_AR_SECURITY,
153 SYSMMU_AR_ACCESS,
154 SYSMMU_AW_SECURITY,
155 SYSMMU_AW_PROTECTION, /* 7 */
156 SYSMMU_FAULT_UNKNOWN,
157 SYSMMU_FAULTS_NUM
158};
159
KyongHo Cho2a965362012-05-12 05:56:09 +0900160static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
161 REG_PAGE_FAULT_ADDR,
162 REG_AR_FAULT_ADDR,
163 REG_AW_FAULT_ADDR,
164 REG_DEFAULT_SLAVE_ADDR,
165 REG_AR_FAULT_ADDR,
166 REG_AR_FAULT_ADDR,
167 REG_AW_FAULT_ADDR,
168 REG_AW_FAULT_ADDR
169};
170
171static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
172 "PAGE FAULT",
173 "AR MULTI-HIT FAULT",
174 "AW MULTI-HIT FAULT",
175 "BUS ERROR",
176 "AR SECURITY PROTECTION FAULT",
177 "AR ACCESS PROTECTION FAULT",
178 "AW SECURITY PROTECTION FAULT",
179 "AW ACCESS PROTECTION FAULT",
180 "UNKNOWN FAULT"
181};
182
Marek Szyprowski2860af32015-05-19 15:20:31 +0200183/*
184 * This structure is attached to dev.archdata.iommu of the master device
185 * on device add, contains a list of SYSMMU controllers defined by device tree,
186 * which are bound to given master device. It is usually referenced by 'owner'
187 * pointer.
188*/
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530189struct exynos_iommu_owner {
Marek Szyprowski1b092052015-05-19 15:20:33 +0200190 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530191};
192
Marek Szyprowski2860af32015-05-19 15:20:31 +0200193/*
194 * This structure exynos specific generalization of struct iommu_domain.
195 * It contains list of SYSMMU controllers from all master devices, which has
196 * been attached to this domain and page tables of IO address space defined by
197 * it. It is usually referenced by 'domain' pointer.
198 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900199struct exynos_iommu_domain {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200200 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
201 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
202 short *lv2entcnt; /* free lv2 entry counter for each section */
203 spinlock_t lock; /* lock for modyfying list of clients */
204 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100205 struct iommu_domain domain; /* generic domain data structure */
KyongHo Cho2a965362012-05-12 05:56:09 +0900206};
207
Marek Szyprowski2860af32015-05-19 15:20:31 +0200208/*
209 * This structure hold all data of a single SYSMMU controller, this includes
210 * hw resources like registers and clocks, pointers and list nodes to connect
211 * it to all other structures, internal state and parameters read from device
212 * tree. It is usually referenced by 'data' pointer.
213 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900214struct sysmmu_drvdata {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200215 struct device *sysmmu; /* SYSMMU controller device */
216 struct device *master; /* master device (owner) */
217 void __iomem *sfrbase; /* our registers */
218 struct clk *clk; /* SYSMMU's clock */
219 struct clk *clk_master; /* master's device clock */
220 int activations; /* number of calls to sysmmu_enable */
221 spinlock_t lock; /* lock for modyfying state */
222 struct exynos_iommu_domain *domain; /* domain we belong to */
223 struct list_head domain_node; /* node for domain clients list */
Marek Szyprowski1b092052015-05-19 15:20:33 +0200224 struct list_head owner_node; /* node for owner controllers list */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200225 phys_addr_t pgtable; /* assigned page table structure */
226 unsigned int version; /* our version */
KyongHo Cho2a965362012-05-12 05:56:09 +0900227};
228
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100229static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
230{
231 return container_of(dom, struct exynos_iommu_domain, domain);
232}
233
KyongHo Cho2a965362012-05-12 05:56:09 +0900234static bool set_sysmmu_active(struct sysmmu_drvdata *data)
235{
236 /* return true if the System MMU was not active previously
237 and it needs to be initialized */
238 return ++data->activations == 1;
239}
240
241static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
242{
243 /* return true if the System MMU is needed to be disabled */
244 BUG_ON(data->activations < 1);
245 return --data->activations == 0;
246}
247
248static bool is_sysmmu_active(struct sysmmu_drvdata *data)
249{
250 return data->activations > 0;
251}
252
253static void sysmmu_unblock(void __iomem *sfrbase)
254{
255 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
256}
257
258static bool sysmmu_block(void __iomem *sfrbase)
259{
260 int i = 120;
261
262 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
263 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
264 --i;
265
266 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
267 sysmmu_unblock(sfrbase);
268 return false;
269 }
270
271 return true;
272}
273
274static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
275{
276 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
277}
278
279static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530280 sysmmu_iova_t iova, unsigned int num_inv)
KyongHo Cho2a965362012-05-12 05:56:09 +0900281{
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530282 unsigned int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530283
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530284 for (i = 0; i < num_inv; i++) {
285 __raw_writel((iova & SPAGE_MASK) | 1,
286 sfrbase + REG_MMU_FLUSH_ENTRY);
287 iova += SPAGE_SIZE;
288 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900289}
290
291static void __sysmmu_set_ptbase(void __iomem *sfrbase,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530292 phys_addr_t pgd)
KyongHo Cho2a965362012-05-12 05:56:09 +0900293{
KyongHo Cho2a965362012-05-12 05:56:09 +0900294 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
295
296 __sysmmu_tlb_invalidate(sfrbase);
297}
298
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530299static void show_fault_information(const char *name,
300 enum exynos_sysmmu_inttype itype,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530301 phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
KyongHo Cho2a965362012-05-12 05:56:09 +0900302{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530303 sysmmu_pte_t *ent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900304
305 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
306 itype = SYSMMU_FAULT_UNKNOWN;
307
Cho KyongHod09d78f2014-05-12 11:44:58 +0530308 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530309 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
KyongHo Cho2a965362012-05-12 05:56:09 +0900310
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530311 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530312 pr_err("\tLv1 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900313
314 if (lv1ent_page(ent)) {
315 ent = page_entry(ent, fault_addr);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530316 pr_err("\t Lv2 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900317 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900318}
319
320static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
321{
Sachin Kamatf171aba2014-08-04 10:06:28 +0530322 /* SYSMMU is in blocked state when interrupt occurred. */
KyongHo Cho2a965362012-05-12 05:56:09 +0900323 struct sysmmu_drvdata *data = dev_id;
KyongHo Cho2a965362012-05-12 05:56:09 +0900324 enum exynos_sysmmu_inttype itype;
Cho KyongHod09d78f2014-05-12 11:44:58 +0530325 sysmmu_iova_t addr = -1;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530326 int ret = -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900327
KyongHo Cho2a965362012-05-12 05:56:09 +0900328 WARN_ON(!is_sysmmu_active(data));
329
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530330 spin_lock(&data->lock);
331
Cho KyongHo70605872014-05-12 11:44:55 +0530332 if (!IS_ERR(data->clk_master))
333 clk_enable(data->clk_master);
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530334
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530335 itype = (enum exynos_sysmmu_inttype)
336 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
337 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
KyongHo Cho2a965362012-05-12 05:56:09 +0900338 itype = SYSMMU_FAULT_UNKNOWN;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530339 else
340 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900341
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530342 if (itype == SYSMMU_FAULT_UNKNOWN) {
343 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
344 __func__, dev_name(data->sysmmu));
345 pr_err("%s: Please check if IRQ is correctly configured.\n",
346 __func__);
347 BUG();
348 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530349 unsigned int base =
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530350 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
351 show_fault_information(dev_name(data->sysmmu),
352 itype, base, addr);
353 if (data->domain)
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200354 ret = report_iommu_fault(&data->domain->domain,
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530355 data->master, addr, itype);
KyongHo Cho2a965362012-05-12 05:56:09 +0900356 }
357
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530358 /* fault is not recovered by fault handler */
359 BUG_ON(ret != 0);
KyongHo Cho2a965362012-05-12 05:56:09 +0900360
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530361 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
362
363 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900364
Cho KyongHo70605872014-05-12 11:44:55 +0530365 if (!IS_ERR(data->clk_master))
366 clk_disable(data->clk_master);
367
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530368 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900369
370 return IRQ_HANDLED;
371}
372
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530373static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900374{
Cho KyongHo70605872014-05-12 11:44:55 +0530375 if (!IS_ERR(data->clk_master))
376 clk_enable(data->clk_master);
377
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530378 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530379 __raw_writel(0, data->sfrbase + REG_MMU_CFG);
KyongHo Cho2a965362012-05-12 05:56:09 +0900380
Cho KyongHo46c16d12014-05-12 11:44:54 +0530381 clk_disable(data->clk);
Cho KyongHo70605872014-05-12 11:44:55 +0530382 if (!IS_ERR(data->clk_master))
383 clk_disable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530384}
KyongHo Cho2a965362012-05-12 05:56:09 +0900385
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530386static bool __sysmmu_disable(struct sysmmu_drvdata *data)
387{
388 bool disabled;
389 unsigned long flags;
390
391 spin_lock_irqsave(&data->lock, flags);
392
393 disabled = set_sysmmu_inactive(data);
394
395 if (disabled) {
396 data->pgtable = 0;
397 data->domain = NULL;
398
399 __sysmmu_disable_nocount(data);
400
401 dev_dbg(data->sysmmu, "Disabled\n");
402 } else {
403 dev_dbg(data->sysmmu, "%d times left to disable\n",
404 data->activations);
405 }
406
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530407 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900408
KyongHo Cho2a965362012-05-12 05:56:09 +0900409 return disabled;
410}
411
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530412static void __sysmmu_init_config(struct sysmmu_drvdata *data)
413{
Cho KyongHoeeb51842014-05-12 11:45:03 +0530414 unsigned int cfg = CFG_LRU | CFG_QOS(15);
415 unsigned int ver;
416
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200417 ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
Cho KyongHoeeb51842014-05-12 11:45:03 +0530418 if (MMU_MAJ_VER(ver) == 3) {
419 if (MMU_MIN_VER(ver) >= 2) {
420 cfg |= CFG_FLPDCACHE;
421 if (MMU_MIN_VER(ver) == 3) {
422 cfg |= CFG_ACGEN;
423 cfg &= ~CFG_LRU;
424 } else {
425 cfg |= CFG_SYSSEL;
426 }
427 }
428 }
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530429
430 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200431 data->version = ver;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530432}
433
434static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
435{
436 if (!IS_ERR(data->clk_master))
437 clk_enable(data->clk_master);
438 clk_enable(data->clk);
439
440 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
441
442 __sysmmu_init_config(data);
443
444 __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
445
446 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
447
448 if (!IS_ERR(data->clk_master))
449 clk_disable(data->clk_master);
450}
451
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200452static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200453 struct exynos_iommu_domain *domain)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530454{
455 int ret = 0;
456 unsigned long flags;
457
458 spin_lock_irqsave(&data->lock, flags);
459 if (set_sysmmu_active(data)) {
460 data->pgtable = pgtable;
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200461 data->domain = domain;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530462
463 __sysmmu_enable_nocount(data);
464
465 dev_dbg(data->sysmmu, "Enabled\n");
466 } else {
467 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
468
469 dev_dbg(data->sysmmu, "already enabled\n");
470 }
471
472 if (WARN_ON(ret < 0))
473 set_sysmmu_inactive(data); /* decrement count */
474
475 spin_unlock_irqrestore(&data->lock, flags);
476
477 return ret;
478}
479
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530480static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
481 sysmmu_iova_t iova)
482{
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200483 if (data->version == MAKE_MMU_VER(3, 3))
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530484 __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
485}
486
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200487static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530488 sysmmu_iova_t iova)
489{
490 unsigned long flags;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530491
492 if (!IS_ERR(data->clk_master))
493 clk_enable(data->clk_master);
494
495 spin_lock_irqsave(&data->lock, flags);
496 if (is_sysmmu_active(data))
497 __sysmmu_tlb_invalidate_flpdcache(data, iova);
498 spin_unlock_irqrestore(&data->lock, flags);
499
500 if (!IS_ERR(data->clk_master))
501 clk_disable(data->clk_master);
502}
503
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200504static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
505 sysmmu_iova_t iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900506{
507 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900508
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530509 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900510 if (is_sysmmu_active(data)) {
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530511 unsigned int num_inv = 1;
Cho KyongHo70605872014-05-12 11:44:55 +0530512
513 if (!IS_ERR(data->clk_master))
514 clk_enable(data->clk_master);
515
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530516 /*
517 * L2TLB invalidation required
518 * 4KB page: 1 invalidation
Sachin Kamatf171aba2014-08-04 10:06:28 +0530519 * 64KB page: 16 invalidations
520 * 1MB page: 64 invalidations
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530521 * because it is set-associative TLB
522 * with 8-way and 64 sets.
523 * 1MB page can be cached in one of all sets.
524 * 64KB page can be one of 16 consecutive sets.
525 */
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200526 if (MMU_MAJ_VER(data->version) == 2)
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530527 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
528
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530529 if (sysmmu_block(data->sfrbase)) {
530 __sysmmu_tlb_invalidate_entry(
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530531 data->sfrbase, iova, num_inv);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530532 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900533 }
Cho KyongHo70605872014-05-12 11:44:55 +0530534 if (!IS_ERR(data->clk_master))
535 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900536 } else {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200537 dev_dbg(data->master,
538 "disabled. Skipping TLB invalidation @ %#x\n", iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900539 }
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530540 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900541}
542
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530543static int __init exynos_sysmmu_probe(struct platform_device *pdev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900544{
Cho KyongHo46c16d12014-05-12 11:44:54 +0530545 int irq, ret;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530546 struct device *dev = &pdev->dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900547 struct sysmmu_drvdata *data;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530548 struct resource *res;
KyongHo Cho2a965362012-05-12 05:56:09 +0900549
Cho KyongHo46c16d12014-05-12 11:44:54 +0530550 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
551 if (!data)
552 return -ENOMEM;
KyongHo Cho2a965362012-05-12 05:56:09 +0900553
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530554 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Cho KyongHo46c16d12014-05-12 11:44:54 +0530555 data->sfrbase = devm_ioremap_resource(dev, res);
556 if (IS_ERR(data->sfrbase))
557 return PTR_ERR(data->sfrbase);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530558
Cho KyongHo46c16d12014-05-12 11:44:54 +0530559 irq = platform_get_irq(pdev, 0);
560 if (irq <= 0) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530561 dev_err(dev, "Unable to find IRQ resource\n");
Cho KyongHo46c16d12014-05-12 11:44:54 +0530562 return irq;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530563 }
564
Cho KyongHo46c16d12014-05-12 11:44:54 +0530565 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530566 dev_name(dev), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900567 if (ret) {
Cho KyongHo46c16d12014-05-12 11:44:54 +0530568 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
569 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900570 }
571
Cho KyongHo46c16d12014-05-12 11:44:54 +0530572 data->clk = devm_clk_get(dev, "sysmmu");
573 if (IS_ERR(data->clk)) {
574 dev_err(dev, "Failed to get clock!\n");
575 return PTR_ERR(data->clk);
576 } else {
577 ret = clk_prepare(data->clk);
578 if (ret) {
579 dev_err(dev, "Failed to prepare clk\n");
580 return ret;
581 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900582 }
583
Cho KyongHo70605872014-05-12 11:44:55 +0530584 data->clk_master = devm_clk_get(dev, "master");
585 if (!IS_ERR(data->clk_master)) {
586 ret = clk_prepare(data->clk_master);
587 if (ret) {
588 clk_unprepare(data->clk);
589 dev_err(dev, "Failed to prepare master's clk\n");
590 return ret;
591 }
592 }
593
KyongHo Cho2a965362012-05-12 05:56:09 +0900594 data->sysmmu = dev;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530595 spin_lock_init(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900596
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530597 platform_set_drvdata(pdev, data);
598
Cho KyongHof4723ec2014-05-12 11:44:52 +0530599 pm_runtime_enable(dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900600
KyongHo Cho2a965362012-05-12 05:56:09 +0900601 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900602}
603
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530604static const struct of_device_id sysmmu_of_match[] __initconst = {
605 { .compatible = "samsung,exynos-sysmmu", },
606 { },
607};
608
609static struct platform_driver exynos_sysmmu_driver __refdata = {
610 .probe = exynos_sysmmu_probe,
611 .driver = {
KyongHo Cho2a965362012-05-12 05:56:09 +0900612 .name = "exynos-sysmmu",
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530613 .of_match_table = sysmmu_of_match,
KyongHo Cho2a965362012-05-12 05:56:09 +0900614 }
615};
616
617static inline void pgtable_flush(void *vastart, void *vaend)
618{
619 dmac_flush_range(vastart, vaend);
620 outer_flush_range(virt_to_phys(vastart),
621 virt_to_phys(vaend));
622}
623
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100624static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
KyongHo Cho2a965362012-05-12 05:56:09 +0900625{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200626 struct exynos_iommu_domain *domain;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530627 int i;
KyongHo Cho2a965362012-05-12 05:56:09 +0900628
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100629 if (type != IOMMU_DOMAIN_UNMANAGED)
630 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900631
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200632 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
633 if (!domain)
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100634 return NULL;
635
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200636 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
637 if (!domain->pgtable)
KyongHo Cho2a965362012-05-12 05:56:09 +0900638 goto err_pgtable;
639
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200640 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
641 if (!domain->lv2entcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900642 goto err_counter;
643
Sachin Kamatf171aba2014-08-04 10:06:28 +0530644 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530645 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200646 domain->pgtable[i + 0] = ZERO_LV2LINK;
647 domain->pgtable[i + 1] = ZERO_LV2LINK;
648 domain->pgtable[i + 2] = ZERO_LV2LINK;
649 domain->pgtable[i + 3] = ZERO_LV2LINK;
650 domain->pgtable[i + 4] = ZERO_LV2LINK;
651 domain->pgtable[i + 5] = ZERO_LV2LINK;
652 domain->pgtable[i + 6] = ZERO_LV2LINK;
653 domain->pgtable[i + 7] = ZERO_LV2LINK;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530654 }
655
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200656 pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES);
KyongHo Cho2a965362012-05-12 05:56:09 +0900657
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200658 spin_lock_init(&domain->lock);
659 spin_lock_init(&domain->pgtablelock);
660 INIT_LIST_HEAD(&domain->clients);
KyongHo Cho2a965362012-05-12 05:56:09 +0900661
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200662 domain->domain.geometry.aperture_start = 0;
663 domain->domain.geometry.aperture_end = ~0UL;
664 domain->domain.geometry.force_aperture = true;
Joerg Roedel3177bb72012-07-11 12:41:10 +0200665
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200666 return &domain->domain;
KyongHo Cho2a965362012-05-12 05:56:09 +0900667
668err_counter:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200669 free_pages((unsigned long)domain->pgtable, 2);
KyongHo Cho2a965362012-05-12 05:56:09 +0900670err_pgtable:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200671 kfree(domain);
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100672 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900673}
674
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200675static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
KyongHo Cho2a965362012-05-12 05:56:09 +0900676{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200677 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200678 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900679 unsigned long flags;
680 int i;
681
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200682 WARN_ON(!list_empty(&domain->clients));
KyongHo Cho2a965362012-05-12 05:56:09 +0900683
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200684 spin_lock_irqsave(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900685
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200686 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200687 if (__sysmmu_disable(data))
688 data->master = NULL;
689 list_del_init(&data->domain_node);
KyongHo Cho2a965362012-05-12 05:56:09 +0900690 }
691
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200692 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900693
694 for (i = 0; i < NUM_LV1ENTRIES; i++)
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200695 if (lv1ent_page(domain->pgtable + i))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530696 kmem_cache_free(lv2table_kmem_cache,
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200697 phys_to_virt(lv2table_base(domain->pgtable + i)));
KyongHo Cho2a965362012-05-12 05:56:09 +0900698
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200699 free_pages((unsigned long)domain->pgtable, 2);
700 free_pages((unsigned long)domain->lv2entcnt, 1);
701 kfree(domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900702}
703
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200704static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900705 struct device *dev)
706{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530707 struct exynos_iommu_owner *owner = dev->archdata.iommu;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200708 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200709 struct sysmmu_drvdata *data;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200710 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900711 unsigned long flags;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200712 int ret = -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900713
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200714 if (!has_sysmmu(dev))
715 return -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900716
Marek Szyprowski1b092052015-05-19 15:20:33 +0200717 list_for_each_entry(data, &owner->controllers, owner_node) {
Marek Szyprowskice70ca52015-05-19 15:20:34 +0200718 pm_runtime_get_sync(data->sysmmu);
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200719 ret = __sysmmu_enable(data, pagetable, domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200720 if (ret >= 0) {
721 data->master = dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900722
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200723 spin_lock_irqsave(&domain->lock, flags);
724 list_add_tail(&data->domain_node, &domain->clients);
725 spin_unlock_irqrestore(&domain->lock, flags);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200726 }
727 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900728
729 if (ret < 0) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530730 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
731 __func__, &pagetable);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530732 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900733 }
734
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530735 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
736 __func__, &pagetable, (ret == 0) ? "" : ", again");
737
KyongHo Cho2a965362012-05-12 05:56:09 +0900738 return ret;
739}
740
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200741static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900742 struct device *dev)
743{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200744 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
745 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
Marek Szyprowski1b092052015-05-19 15:20:33 +0200746 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900747 unsigned long flags;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200748 bool found = false;
749
750 if (!has_sysmmu(dev))
751 return;
KyongHo Cho2a965362012-05-12 05:56:09 +0900752
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200753 spin_lock_irqsave(&domain->lock, flags);
Marek Szyprowski1b092052015-05-19 15:20:33 +0200754 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200755 if (data->master == dev) {
756 if (__sysmmu_disable(data)) {
757 data->master = NULL;
758 list_del_init(&data->domain_node);
759 }
Marek Szyprowskice70ca52015-05-19 15:20:34 +0200760 pm_runtime_put(data->sysmmu);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200761 found = true;
KyongHo Cho2a965362012-05-12 05:56:09 +0900762 }
763 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200764 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900765
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200766 if (found)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530767 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
768 __func__, &pagetable);
769 else
770 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
KyongHo Cho2a965362012-05-12 05:56:09 +0900771}
772
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200773static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530774 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
KyongHo Cho2a965362012-05-12 05:56:09 +0900775{
Cho KyongHo61128f02014-05-12 11:44:47 +0530776 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530777 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
Cho KyongHo61128f02014-05-12 11:44:47 +0530778 return ERR_PTR(-EADDRINUSE);
779 }
780
KyongHo Cho2a965362012-05-12 05:56:09 +0900781 if (lv1ent_fault(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530782 sysmmu_pte_t *pent;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530783 bool need_flush_flpd_cache = lv1ent_zero(sent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900784
Cho KyongHo734c3c72014-05-12 11:44:48 +0530785 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530786 BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
KyongHo Cho2a965362012-05-12 05:56:09 +0900787 if (!pent)
Cho KyongHo61128f02014-05-12 11:44:47 +0530788 return ERR_PTR(-ENOMEM);
KyongHo Cho2a965362012-05-12 05:56:09 +0900789
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530790 *sent = mk_lv1ent_page(virt_to_phys(pent));
Colin Crossdc3814f2015-05-08 17:05:44 -0700791 kmemleak_ignore(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900792 *pgcounter = NUM_LV2ENTRIES;
793 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
794 pgtable_flush(sent, sent + 1);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530795
796 /*
Sachin Kamatf171aba2014-08-04 10:06:28 +0530797 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
798 * FLPD cache may cache the address of zero_l2_table. This
799 * function replaces the zero_l2_table with new L2 page table
800 * to write valid mappings.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530801 * Accessing the valid area may cause page fault since FLPD
Sachin Kamatf171aba2014-08-04 10:06:28 +0530802 * cache may still cache zero_l2_table for the valid area
803 * instead of new L2 page table that has the mapping
804 * information of the valid area.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530805 * Thus any replacement of zero_l2_table with other valid L2
806 * page table must involve FLPD cache invalidation for System
807 * MMU v3.3.
808 * FLPD cache invalidation is performed with TLB invalidation
809 * by VPN without blocking. It is safe to invalidate TLB without
810 * blocking because the target address of TLB invalidation is
811 * not currently mapped.
812 */
813 if (need_flush_flpd_cache) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200814 struct sysmmu_drvdata *data;
Sachin Kamat365409d2014-05-22 09:50:56 +0530815
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200816 spin_lock(&domain->lock);
817 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200818 sysmmu_tlb_invalidate_flpdcache(data, iova);
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200819 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530820 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900821 }
822
823 return page_entry(sent, iova);
824}
825
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200826static int lv1set_section(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530827 sysmmu_pte_t *sent, sysmmu_iova_t iova,
Cho KyongHo61128f02014-05-12 11:44:47 +0530828 phys_addr_t paddr, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900829{
Cho KyongHo61128f02014-05-12 11:44:47 +0530830 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530831 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530832 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900833 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530834 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900835
836 if (lv1ent_page(sent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530837 if (*pgcnt != NUM_LV2ENTRIES) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530838 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530839 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900840 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530841 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900842
Cho KyongHo734c3c72014-05-12 11:44:48 +0530843 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
KyongHo Cho2a965362012-05-12 05:56:09 +0900844 *pgcnt = 0;
845 }
846
847 *sent = mk_lv1ent_sect(paddr);
848
849 pgtable_flush(sent, sent + 1);
850
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200851 spin_lock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530852 if (lv1ent_page_zero(sent)) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200853 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530854 /*
855 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
856 * entry by speculative prefetch of SLPD which has no mapping.
857 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200858 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200859 sysmmu_tlb_invalidate_flpdcache(data, iova);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530860 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200861 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530862
KyongHo Cho2a965362012-05-12 05:56:09 +0900863 return 0;
864}
865
Cho KyongHod09d78f2014-05-12 11:44:58 +0530866static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
KyongHo Cho2a965362012-05-12 05:56:09 +0900867 short *pgcnt)
868{
869 if (size == SPAGE_SIZE) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530870 if (WARN_ON(!lv2ent_fault(pent)))
KyongHo Cho2a965362012-05-12 05:56:09 +0900871 return -EADDRINUSE;
872
873 *pent = mk_lv2ent_spage(paddr);
874 pgtable_flush(pent, pent + 1);
875 *pgcnt -= 1;
876 } else { /* size == LPAGE_SIZE */
877 int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530878
KyongHo Cho2a965362012-05-12 05:56:09 +0900879 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530880 if (WARN_ON(!lv2ent_fault(pent))) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530881 if (i > 0)
882 memset(pent - i, 0, sizeof(*pent) * i);
KyongHo Cho2a965362012-05-12 05:56:09 +0900883 return -EADDRINUSE;
884 }
885
886 *pent = mk_lv2ent_lpage(paddr);
887 }
888 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
889 *pgcnt -= SPAGES_PER_LPAGE;
890 }
891
892 return 0;
893}
894
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530895/*
896 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
897 *
Sachin Kamatf171aba2014-08-04 10:06:28 +0530898 * System MMU v3.x has advanced logic to improve address translation
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530899 * performance with caching more page table entries by a page table walk.
Sachin Kamatf171aba2014-08-04 10:06:28 +0530900 * However, the logic has a bug that while caching faulty page table entries,
901 * System MMU reports page fault if the cached fault entry is hit even though
902 * the fault entry is updated to a valid entry after the entry is cached.
903 * To prevent caching faulty page table entries which may be updated to valid
904 * entries later, the virtual memory manager should care about the workaround
905 * for the problem. The following describes the workaround.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530906 *
907 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
Sachin Kamatf171aba2014-08-04 10:06:28 +0530908 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530909 *
Sachin Kamatf171aba2014-08-04 10:06:28 +0530910 * Precisely, any start address of I/O virtual region must be aligned with
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530911 * the following sizes for System MMU v3.1 and v3.2.
912 * System MMU v3.1: 128KiB
913 * System MMU v3.2: 256KiB
914 *
915 * Because System MMU v3.3 caches page table entries more aggressively, it needs
Sachin Kamatf171aba2014-08-04 10:06:28 +0530916 * more workarounds.
917 * - Any two consecutive I/O virtual regions must have a hole of size larger
918 * than or equal to 128KiB.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530919 * - Start address of an I/O virtual region must be aligned by 128KiB.
920 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200921static int exynos_iommu_map(struct iommu_domain *iommu_domain,
922 unsigned long l_iova, phys_addr_t paddr, size_t size,
923 int prot)
KyongHo Cho2a965362012-05-12 05:56:09 +0900924{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200925 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530926 sysmmu_pte_t *entry;
927 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
KyongHo Cho2a965362012-05-12 05:56:09 +0900928 unsigned long flags;
929 int ret = -ENOMEM;
930
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200931 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900932
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200933 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900934
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200935 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900936
937 if (size == SECT_SIZE) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200938 ret = lv1set_section(domain, entry, iova, paddr,
939 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900940 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530941 sysmmu_pte_t *pent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900942
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200943 pent = alloc_lv2entry(domain, entry, iova,
944 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900945
Cho KyongHo61128f02014-05-12 11:44:47 +0530946 if (IS_ERR(pent))
947 ret = PTR_ERR(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900948 else
949 ret = lv2set_page(pent, paddr, size,
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200950 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900951 }
952
Cho KyongHo61128f02014-05-12 11:44:47 +0530953 if (ret)
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530954 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
955 __func__, ret, size, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900956
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200957 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900958
959 return ret;
960}
961
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200962static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
963 sysmmu_iova_t iova, size_t size)
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530964{
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200965 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530966 unsigned long flags;
967
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200968 spin_lock_irqsave(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530969
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200970 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200971 sysmmu_tlb_invalidate_entry(data, iova, size);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530972
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200973 spin_unlock_irqrestore(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530974}
975
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200976static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
977 unsigned long l_iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900978{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200979 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530980 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
981 sysmmu_pte_t *ent;
Cho KyongHo61128f02014-05-12 11:44:47 +0530982 size_t err_pgsize;
Cho KyongHod09d78f2014-05-12 11:44:58 +0530983 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900984
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200985 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900986
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200987 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900988
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200989 ent = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900990
991 if (lv1ent_section(ent)) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530992 if (WARN_ON(size < SECT_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530993 err_pgsize = SECT_SIZE;
994 goto err;
995 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900996
Sachin Kamatf171aba2014-08-04 10:06:28 +0530997 /* workaround for h/w bug in System MMU v3.3 */
998 *ent = ZERO_LV2LINK;
KyongHo Cho2a965362012-05-12 05:56:09 +0900999 pgtable_flush(ent, ent + 1);
1000 size = SECT_SIZE;
1001 goto done;
1002 }
1003
1004 if (unlikely(lv1ent_fault(ent))) {
1005 if (size > SECT_SIZE)
1006 size = SECT_SIZE;
1007 goto done;
1008 }
1009
1010 /* lv1ent_page(sent) == true here */
1011
1012 ent = page_entry(ent, iova);
1013
1014 if (unlikely(lv2ent_fault(ent))) {
1015 size = SPAGE_SIZE;
1016 goto done;
1017 }
1018
1019 if (lv2ent_small(ent)) {
1020 *ent = 0;
1021 size = SPAGE_SIZE;
Cho KyongHo6cb47ed2014-05-12 11:44:51 +05301022 pgtable_flush(ent, ent + 1);
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001023 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
KyongHo Cho2a965362012-05-12 05:56:09 +09001024 goto done;
1025 }
1026
1027 /* lv1ent_large(ent) == true here */
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301028 if (WARN_ON(size < LPAGE_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301029 err_pgsize = LPAGE_SIZE;
1030 goto err;
1031 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001032
1033 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
Cho KyongHo6cb47ed2014-05-12 11:44:51 +05301034 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001035
1036 size = LPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001037 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
KyongHo Cho2a965362012-05-12 05:56:09 +09001038done:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001039 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001040
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001041 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +09001042
KyongHo Cho2a965362012-05-12 05:56:09 +09001043 return size;
Cho KyongHo61128f02014-05-12 11:44:47 +05301044err:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001045 spin_unlock_irqrestore(&domain->pgtablelock, flags);
Cho KyongHo61128f02014-05-12 11:44:47 +05301046
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301047 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1048 __func__, size, iova, err_pgsize);
Cho KyongHo61128f02014-05-12 11:44:47 +05301049
1050 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +09001051}
1052
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001053static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05301054 dma_addr_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +09001055{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001056 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301057 sysmmu_pte_t *entry;
KyongHo Cho2a965362012-05-12 05:56:09 +09001058 unsigned long flags;
1059 phys_addr_t phys = 0;
1060
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001061 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001062
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001063 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001064
1065 if (lv1ent_section(entry)) {
1066 phys = section_phys(entry) + section_offs(iova);
1067 } else if (lv1ent_page(entry)) {
1068 entry = page_entry(entry, iova);
1069
1070 if (lv2ent_large(entry))
1071 phys = lpage_phys(entry) + lpage_offs(iova);
1072 else if (lv2ent_small(entry))
1073 phys = spage_phys(entry) + spage_offs(iova);
1074 }
1075
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001076 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001077
1078 return phys;
1079}
1080
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301081static int exynos_iommu_add_device(struct device *dev)
1082{
1083 struct iommu_group *group;
1084 int ret;
1085
Marek Szyprowski06801db2015-05-19 15:20:32 +02001086 if (!has_sysmmu(dev))
1087 return -ENODEV;
1088
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301089 group = iommu_group_get(dev);
1090
1091 if (!group) {
1092 group = iommu_group_alloc();
1093 if (IS_ERR(group)) {
1094 dev_err(dev, "Failed to allocate IOMMU group\n");
1095 return PTR_ERR(group);
1096 }
1097 }
1098
1099 ret = iommu_group_add_device(group, dev);
1100 iommu_group_put(group);
1101
1102 return ret;
1103}
1104
1105static void exynos_iommu_remove_device(struct device *dev)
1106{
Marek Szyprowski06801db2015-05-19 15:20:32 +02001107 if (!has_sysmmu(dev))
1108 return;
1109
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301110 iommu_group_remove_device(dev);
1111}
1112
Thierry Redingb22f6432014-06-27 09:03:12 +02001113static const struct iommu_ops exynos_iommu_ops = {
Joerg Roedele1fd1ea2015-03-26 13:43:11 +01001114 .domain_alloc = exynos_iommu_domain_alloc,
1115 .domain_free = exynos_iommu_domain_free,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001116 .attach_dev = exynos_iommu_attach_device,
1117 .detach_dev = exynos_iommu_detach_device,
1118 .map = exynos_iommu_map,
1119 .unmap = exynos_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07001120 .map_sg = default_iommu_map_sg,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001121 .iova_to_phys = exynos_iommu_iova_to_phys,
1122 .add_device = exynos_iommu_add_device,
1123 .remove_device = exynos_iommu_remove_device,
KyongHo Cho2a965362012-05-12 05:56:09 +09001124 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1125};
1126
1127static int __init exynos_iommu_init(void)
1128{
Thierry Redinga7b67cd2015-02-06 11:44:05 +01001129 struct device_node *np;
KyongHo Cho2a965362012-05-12 05:56:09 +09001130 int ret;
1131
Thierry Redinga7b67cd2015-02-06 11:44:05 +01001132 np = of_find_matching_node(NULL, sysmmu_of_match);
1133 if (!np)
1134 return 0;
1135
1136 of_node_put(np);
1137
Cho KyongHo734c3c72014-05-12 11:44:48 +05301138 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1139 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1140 if (!lv2table_kmem_cache) {
1141 pr_err("%s: Failed to create kmem cache\n", __func__);
1142 return -ENOMEM;
1143 }
1144
KyongHo Cho2a965362012-05-12 05:56:09 +09001145 ret = platform_driver_register(&exynos_sysmmu_driver);
Cho KyongHo734c3c72014-05-12 11:44:48 +05301146 if (ret) {
1147 pr_err("%s: Failed to register driver\n", __func__);
1148 goto err_reg_driver;
1149 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001150
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301151 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1152 if (zero_lv2_table == NULL) {
1153 pr_err("%s: Failed to allocate zero level2 page table\n",
1154 __func__);
1155 ret = -ENOMEM;
1156 goto err_zero_lv2;
1157 }
1158
Cho KyongHo734c3c72014-05-12 11:44:48 +05301159 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1160 if (ret) {
1161 pr_err("%s: Failed to register exynos-iommu driver.\n",
1162 __func__);
1163 goto err_set_iommu;
1164 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001165
Cho KyongHo734c3c72014-05-12 11:44:48 +05301166 return 0;
1167err_set_iommu:
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301168 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1169err_zero_lv2:
Cho KyongHo734c3c72014-05-12 11:44:48 +05301170 platform_driver_unregister(&exynos_sysmmu_driver);
1171err_reg_driver:
1172 kmem_cache_destroy(lv2table_kmem_cache);
KyongHo Cho2a965362012-05-12 05:56:09 +09001173 return ret;
1174}
1175subsys_initcall(exynos_iommu_init);