blob: 06fc70ee4389d363bc690989a08d6d72cb0247c8 [file] [log] [blame]
KyongHo Cho2a965362012-05-12 05:56:09 +09001/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/pm_runtime.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/mm.h>
23#include <linux/iommu.h>
24#include <linux/errno.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/export.h>
28
29#include <asm/cacheflush.h>
30#include <asm/pgtable.h>
31
KyongHo Cho2a965362012-05-12 05:56:09 +090032/* We does not consider super section mapping (16MB) */
33#define SECT_ORDER 20
34#define LPAGE_ORDER 16
35#define SPAGE_ORDER 12
36
37#define SECT_SIZE (1 << SECT_ORDER)
38#define LPAGE_SIZE (1 << LPAGE_ORDER)
39#define SPAGE_SIZE (1 << SPAGE_ORDER)
40
41#define SECT_MASK (~(SECT_SIZE - 1))
42#define LPAGE_MASK (~(LPAGE_SIZE - 1))
43#define SPAGE_MASK (~(SPAGE_SIZE - 1))
44
45#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
46#define lv1ent_page(sent) ((*(sent) & 3) == 1)
47#define lv1ent_section(sent) ((*(sent) & 3) == 2)
48
49#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
50#define lv2ent_small(pent) ((*(pent) & 2) == 2)
51#define lv2ent_large(pent) ((*(pent) & 3) == 1)
52
53#define section_phys(sent) (*(sent) & SECT_MASK)
54#define section_offs(iova) ((iova) & 0xFFFFF)
55#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
56#define lpage_offs(iova) ((iova) & 0xFFFF)
57#define spage_phys(pent) (*(pent) & SPAGE_MASK)
58#define spage_offs(iova) ((iova) & 0xFFF)
59
60#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
61#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
62
63#define NUM_LV1ENTRIES 4096
64#define NUM_LV2ENTRIES 256
65
66#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
67
68#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
69
70#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
71
72#define mk_lv1ent_sect(pa) ((pa) | 2)
73#define mk_lv1ent_page(pa) ((pa) | 1)
74#define mk_lv2ent_lpage(pa) ((pa) | 1)
75#define mk_lv2ent_spage(pa) ((pa) | 2)
76
77#define CTRL_ENABLE 0x5
78#define CTRL_BLOCK 0x7
79#define CTRL_DISABLE 0x0
80
81#define REG_MMU_CTRL 0x000
82#define REG_MMU_CFG 0x004
83#define REG_MMU_STATUS 0x008
84#define REG_MMU_FLUSH 0x00C
85#define REG_MMU_FLUSH_ENTRY 0x010
86#define REG_PT_BASE_ADDR 0x014
87#define REG_INT_STATUS 0x018
88#define REG_INT_CLEAR 0x01C
89
90#define REG_PAGE_FAULT_ADDR 0x024
91#define REG_AW_FAULT_ADDR 0x028
92#define REG_AR_FAULT_ADDR 0x02C
93#define REG_DEFAULT_SLAVE_ADDR 0x030
94
95#define REG_MMU_VERSION 0x034
96
97#define REG_PB0_SADDR 0x04C
98#define REG_PB0_EADDR 0x050
99#define REG_PB1_SADDR 0x054
100#define REG_PB1_EADDR 0x058
101
Cho KyongHo734c3c72014-05-12 11:44:48 +0530102static struct kmem_cache *lv2table_kmem_cache;
103
KyongHo Cho2a965362012-05-12 05:56:09 +0900104static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
105{
106 return pgtable + lv1ent_offset(iova);
107}
108
109static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
110{
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530111 return (unsigned long *)phys_to_virt(
112 lv2table_base(sent)) + lv2ent_offset(iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900113}
114
115enum exynos_sysmmu_inttype {
116 SYSMMU_PAGEFAULT,
117 SYSMMU_AR_MULTIHIT,
118 SYSMMU_AW_MULTIHIT,
119 SYSMMU_BUSERROR,
120 SYSMMU_AR_SECURITY,
121 SYSMMU_AR_ACCESS,
122 SYSMMU_AW_SECURITY,
123 SYSMMU_AW_PROTECTION, /* 7 */
124 SYSMMU_FAULT_UNKNOWN,
125 SYSMMU_FAULTS_NUM
126};
127
128/*
129 * @itype: type of fault.
130 * @pgtable_base: the physical address of page table base. This is 0 if @itype
131 * is SYSMMU_BUSERROR.
132 * @fault_addr: the device (virtual) address that the System MMU tried to
133 * translated. This is 0 if @itype is SYSMMU_BUSERROR.
134 */
135typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530136 phys_addr_t pgtable_base, unsigned long fault_addr);
KyongHo Cho2a965362012-05-12 05:56:09 +0900137
138static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
139 REG_PAGE_FAULT_ADDR,
140 REG_AR_FAULT_ADDR,
141 REG_AW_FAULT_ADDR,
142 REG_DEFAULT_SLAVE_ADDR,
143 REG_AR_FAULT_ADDR,
144 REG_AR_FAULT_ADDR,
145 REG_AW_FAULT_ADDR,
146 REG_AW_FAULT_ADDR
147};
148
149static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
150 "PAGE FAULT",
151 "AR MULTI-HIT FAULT",
152 "AW MULTI-HIT FAULT",
153 "BUS ERROR",
154 "AR SECURITY PROTECTION FAULT",
155 "AR ACCESS PROTECTION FAULT",
156 "AW SECURITY PROTECTION FAULT",
157 "AW ACCESS PROTECTION FAULT",
158 "UNKNOWN FAULT"
159};
160
161struct exynos_iommu_domain {
162 struct list_head clients; /* list of sysmmu_drvdata.node */
163 unsigned long *pgtable; /* lv1 page table, 16KB */
164 short *lv2entcnt; /* free lv2 entry counter for each section */
165 spinlock_t lock; /* lock for this structure */
166 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
167};
168
169struct sysmmu_drvdata {
170 struct list_head node; /* entry of exynos_iommu_domain.clients */
171 struct device *sysmmu; /* System MMU's device descriptor */
172 struct device *dev; /* Owner of system MMU */
173 char *dbgname;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530174 void __iomem *sfrbase;
175 struct clk *clk;
KyongHo Cho2a965362012-05-12 05:56:09 +0900176 int activations;
177 rwlock_t lock;
178 struct iommu_domain *domain;
179 sysmmu_fault_handler_t fault_handler;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530180 phys_addr_t pgtable;
KyongHo Cho2a965362012-05-12 05:56:09 +0900181};
182
183static bool set_sysmmu_active(struct sysmmu_drvdata *data)
184{
185 /* return true if the System MMU was not active previously
186 and it needs to be initialized */
187 return ++data->activations == 1;
188}
189
190static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
191{
192 /* return true if the System MMU is needed to be disabled */
193 BUG_ON(data->activations < 1);
194 return --data->activations == 0;
195}
196
197static bool is_sysmmu_active(struct sysmmu_drvdata *data)
198{
199 return data->activations > 0;
200}
201
202static void sysmmu_unblock(void __iomem *sfrbase)
203{
204 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
205}
206
207static bool sysmmu_block(void __iomem *sfrbase)
208{
209 int i = 120;
210
211 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
212 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
213 --i;
214
215 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
216 sysmmu_unblock(sfrbase);
217 return false;
218 }
219
220 return true;
221}
222
223static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
224{
225 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
226}
227
228static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530229 unsigned long iova, unsigned int num_inv)
KyongHo Cho2a965362012-05-12 05:56:09 +0900230{
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530231 unsigned int i;
232 for (i = 0; i < num_inv; i++) {
233 __raw_writel((iova & SPAGE_MASK) | 1,
234 sfrbase + REG_MMU_FLUSH_ENTRY);
235 iova += SPAGE_SIZE;
236 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900237}
238
239static void __sysmmu_set_ptbase(void __iomem *sfrbase,
240 unsigned long pgd)
241{
242 __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
243 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
244
245 __sysmmu_tlb_invalidate(sfrbase);
246}
247
248static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
249 unsigned long size, int idx)
250{
251 __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
252 __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
253}
254
KyongHo Cho2a965362012-05-12 05:56:09 +0900255static void __set_fault_handler(struct sysmmu_drvdata *data,
256 sysmmu_fault_handler_t handler)
257{
258 unsigned long flags;
259
260 write_lock_irqsave(&data->lock, flags);
261 data->fault_handler = handler;
262 write_unlock_irqrestore(&data->lock, flags);
263}
264
265void exynos_sysmmu_set_fault_handler(struct device *dev,
266 sysmmu_fault_handler_t handler)
267{
268 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
269
270 __set_fault_handler(data, handler);
271}
272
273static int default_fault_handler(enum exynos_sysmmu_inttype itype,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530274 phys_addr_t pgtable_base, unsigned long fault_addr)
KyongHo Cho2a965362012-05-12 05:56:09 +0900275{
276 unsigned long *ent;
277
278 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
279 itype = SYSMMU_FAULT_UNKNOWN;
280
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530281 pr_err("%s occurred at 0x%lx(Page table base: %pa)\n",
282 sysmmu_fault_name[itype], fault_addr, &pgtable_base);
KyongHo Cho2a965362012-05-12 05:56:09 +0900283
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530284 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
KyongHo Cho2a965362012-05-12 05:56:09 +0900285 pr_err("\tLv1 entry: 0x%lx\n", *ent);
286
287 if (lv1ent_page(ent)) {
288 ent = page_entry(ent, fault_addr);
289 pr_err("\t Lv2 entry: 0x%lx\n", *ent);
290 }
291
292 pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
293
294 BUG();
295
296 return 0;
297}
298
299static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
300{
301 /* SYSMMU is in blocked when interrupt occurred. */
302 struct sysmmu_drvdata *data = dev_id;
KyongHo Cho2a965362012-05-12 05:56:09 +0900303 enum exynos_sysmmu_inttype itype;
304 unsigned long addr = -1;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530305 int ret = -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900306
307 read_lock(&data->lock);
308
309 WARN_ON(!is_sysmmu_active(data));
310
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530311 itype = (enum exynos_sysmmu_inttype)
312 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
313 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
KyongHo Cho2a965362012-05-12 05:56:09 +0900314 itype = SYSMMU_FAULT_UNKNOWN;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530315 else
316 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900317
318 if (data->domain)
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530319 ret = report_iommu_fault(data->domain, data->dev, addr, itype);
KyongHo Cho2a965362012-05-12 05:56:09 +0900320
321 if ((ret == -ENOSYS) && data->fault_handler) {
322 unsigned long base = data->pgtable;
323 if (itype != SYSMMU_FAULT_UNKNOWN)
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530324 base = __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
KyongHo Cho2a965362012-05-12 05:56:09 +0900325 ret = data->fault_handler(itype, base, addr);
326 }
327
328 if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530329 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
KyongHo Cho2a965362012-05-12 05:56:09 +0900330 else
331 dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
332 data->dbgname, sysmmu_fault_name[itype]);
333
334 if (itype != SYSMMU_FAULT_UNKNOWN)
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530335 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900336
337 read_unlock(&data->lock);
338
339 return IRQ_HANDLED;
340}
341
342static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
343{
344 unsigned long flags;
345 bool disabled = false;
KyongHo Cho2a965362012-05-12 05:56:09 +0900346
347 write_lock_irqsave(&data->lock, flags);
348
349 if (!set_sysmmu_inactive(data))
350 goto finish;
351
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530352 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900353
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530354 if (!IS_ERR(data->clk))
355 clk_disable(data->clk);
KyongHo Cho2a965362012-05-12 05:56:09 +0900356
357 disabled = true;
358 data->pgtable = 0;
359 data->domain = NULL;
360finish:
361 write_unlock_irqrestore(&data->lock, flags);
362
363 if (disabled)
364 dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
365 else
366 dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
367 data->dbgname, data->activations);
368
369 return disabled;
370}
371
372/* __exynos_sysmmu_enable: Enables System MMU
373 *
374 * returns -error if an error occurred and System MMU is not enabled,
375 * 0 if the System MMU has been just enabled and 1 if System MMU was already
376 * enabled before.
377 */
378static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
379 unsigned long pgtable, struct iommu_domain *domain)
380{
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530381 int ret = 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900382 unsigned long flags;
383
384 write_lock_irqsave(&data->lock, flags);
385
386 if (!set_sysmmu_active(data)) {
387 if (WARN_ON(pgtable != data->pgtable)) {
388 ret = -EBUSY;
389 set_sysmmu_inactive(data);
390 } else {
391 ret = 1;
392 }
393
394 dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
395 goto finish;
396 }
397
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530398 if (!IS_ERR(data->clk))
399 clk_enable(data->clk);
KyongHo Cho2a965362012-05-12 05:56:09 +0900400
401 data->pgtable = pgtable;
402
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530403 __sysmmu_set_ptbase(data->sfrbase, pgtable);
404 if ((readl(data->sfrbase + REG_MMU_VERSION) >> 28) == 3) {
405 /* System MMU version is 3.x */
406 __raw_writel((1 << 12) | (2 << 28),
407 data->sfrbase + REG_MMU_CFG);
408 __sysmmu_set_prefbuf(data->sfrbase, 0, -1, 0);
409 __sysmmu_set_prefbuf(data->sfrbase, 0, -1, 1);
KyongHo Cho2a965362012-05-12 05:56:09 +0900410 }
411
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530412 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
413
KyongHo Cho2a965362012-05-12 05:56:09 +0900414 data->domain = domain;
415
416 dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
417finish:
418 write_unlock_irqrestore(&data->lock, flags);
419
420 return ret;
421}
422
423int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
424{
425 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
426 int ret;
427
428 BUG_ON(!memblock_is_memory(pgtable));
429
430 ret = pm_runtime_get_sync(data->sysmmu);
431 if (ret < 0) {
432 dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
433 return ret;
434 }
435
436 ret = __exynos_sysmmu_enable(data, pgtable, NULL);
437 if (WARN_ON(ret < 0)) {
438 pm_runtime_put(data->sysmmu);
439 dev_err(data->sysmmu,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530440 "(%s) Already enabled with page table %#x\n",
KyongHo Cho2a965362012-05-12 05:56:09 +0900441 data->dbgname, data->pgtable);
442 } else {
443 data->dev = dev;
444 }
445
446 return ret;
447}
448
Sachin Kamat77e38352013-02-06 13:55:17 +0530449static bool exynos_sysmmu_disable(struct device *dev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900450{
451 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
452 bool disabled;
453
454 disabled = __exynos_sysmmu_disable(data);
455 pm_runtime_put(data->sysmmu);
456
457 return disabled;
458}
459
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530460static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova,
461 size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900462{
463 unsigned long flags;
464 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
465
466 read_lock_irqsave(&data->lock, flags);
467
468 if (is_sysmmu_active(data)) {
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530469 unsigned int maj;
470 unsigned int num_inv = 1;
471 maj = __raw_readl(data->sfrbase + REG_MMU_VERSION);
472 /*
473 * L2TLB invalidation required
474 * 4KB page: 1 invalidation
475 * 64KB page: 16 invalidation
476 * 1MB page: 64 invalidation
477 * because it is set-associative TLB
478 * with 8-way and 64 sets.
479 * 1MB page can be cached in one of all sets.
480 * 64KB page can be one of 16 consecutive sets.
481 */
482 if ((maj >> 28) == 2) /* major version number */
483 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
484
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530485 if (sysmmu_block(data->sfrbase)) {
486 __sysmmu_tlb_invalidate_entry(
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530487 data->sfrbase, iova, num_inv);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530488 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900489 }
490 } else {
491 dev_dbg(data->sysmmu,
492 "(%s) Disabled. Skipping invalidating TLB.\n",
493 data->dbgname);
494 }
495
496 read_unlock_irqrestore(&data->lock, flags);
497}
498
499void exynos_sysmmu_tlb_invalidate(struct device *dev)
500{
501 unsigned long flags;
502 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
503
504 read_lock_irqsave(&data->lock, flags);
505
506 if (is_sysmmu_active(data)) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530507 if (sysmmu_block(data->sfrbase)) {
508 __sysmmu_tlb_invalidate(data->sfrbase);
509 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900510 }
511 } else {
512 dev_dbg(data->sysmmu,
513 "(%s) Disabled. Skipping invalidating TLB.\n",
514 data->dbgname);
515 }
516
517 read_unlock_irqrestore(&data->lock, flags);
518}
519
520static int exynos_sysmmu_probe(struct platform_device *pdev)
521{
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530522 int ret;
523 struct device *dev = &pdev->dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900524 struct sysmmu_drvdata *data;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530525 struct resource *res;
KyongHo Cho2a965362012-05-12 05:56:09 +0900526
527 data = kzalloc(sizeof(*data), GFP_KERNEL);
528 if (!data) {
529 dev_dbg(dev, "Not enough memory\n");
530 ret = -ENOMEM;
531 goto err_alloc;
532 }
533
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530534 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
535 if (!res) {
536 dev_dbg(dev, "Unable to find IOMEM region\n");
537 ret = -ENOENT;
538 goto err_init;
539 }
540
541 data->sfrbase = ioremap(res->start, resource_size(res));
542 if (!data->sfrbase) {
543 dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n", res->start);
544 ret = -ENOENT;
545 goto err_res;
546 }
547
548 ret = platform_get_irq(pdev, 0);
549 if (ret <= 0) {
550 dev_dbg(dev, "Unable to find IRQ resource\n");
551 goto err_irq;
552 }
553
554 ret = request_irq(ret, exynos_sysmmu_irq, 0,
555 dev_name(dev), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900556 if (ret) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530557 dev_dbg(dev, "Unabled to register interrupt handler\n");
558 goto err_irq;
KyongHo Cho2a965362012-05-12 05:56:09 +0900559 }
560
561 if (dev_get_platdata(dev)) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530562 data->clk = clk_get(dev, "sysmmu");
563 if (IS_ERR(data->clk))
KyongHo Cho2a965362012-05-12 05:56:09 +0900564 dev_dbg(dev, "No clock descriptor registered\n");
KyongHo Cho2a965362012-05-12 05:56:09 +0900565 }
566
567 data->sysmmu = dev;
568 rwlock_init(&data->lock);
569 INIT_LIST_HEAD(&data->node);
570
571 __set_fault_handler(data, &default_fault_handler);
572
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530573 platform_set_drvdata(pdev, data);
574
KyongHo Cho2a965362012-05-12 05:56:09 +0900575 if (dev->parent)
576 pm_runtime_enable(dev);
577
578 dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
579 return 0;
580err_irq:
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530581 free_irq(platform_get_irq(pdev, 0), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900582err_res:
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530583 iounmap(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900584err_init:
585 kfree(data);
586err_alloc:
587 dev_err(dev, "Failed to initialize\n");
588 return ret;
589}
590
591static struct platform_driver exynos_sysmmu_driver = {
592 .probe = exynos_sysmmu_probe,
593 .driver = {
594 .owner = THIS_MODULE,
595 .name = "exynos-sysmmu",
596 }
597};
598
599static inline void pgtable_flush(void *vastart, void *vaend)
600{
601 dmac_flush_range(vastart, vaend);
602 outer_flush_range(virt_to_phys(vastart),
603 virt_to_phys(vaend));
604}
605
606static int exynos_iommu_domain_init(struct iommu_domain *domain)
607{
608 struct exynos_iommu_domain *priv;
609
610 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
611 if (!priv)
612 return -ENOMEM;
613
614 priv->pgtable = (unsigned long *)__get_free_pages(
615 GFP_KERNEL | __GFP_ZERO, 2);
616 if (!priv->pgtable)
617 goto err_pgtable;
618
619 priv->lv2entcnt = (short *)__get_free_pages(
620 GFP_KERNEL | __GFP_ZERO, 1);
621 if (!priv->lv2entcnt)
622 goto err_counter;
623
624 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
625
626 spin_lock_init(&priv->lock);
627 spin_lock_init(&priv->pgtablelock);
628 INIT_LIST_HEAD(&priv->clients);
629
Sachin Kamateb516372012-08-01 14:35:17 +0530630 domain->geometry.aperture_start = 0;
631 domain->geometry.aperture_end = ~0UL;
632 domain->geometry.force_aperture = true;
Joerg Roedel3177bb72012-07-11 12:41:10 +0200633
KyongHo Cho2a965362012-05-12 05:56:09 +0900634 domain->priv = priv;
635 return 0;
636
637err_counter:
638 free_pages((unsigned long)priv->pgtable, 2);
639err_pgtable:
640 kfree(priv);
641 return -ENOMEM;
642}
643
644static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
645{
646 struct exynos_iommu_domain *priv = domain->priv;
647 struct sysmmu_drvdata *data;
648 unsigned long flags;
649 int i;
650
651 WARN_ON(!list_empty(&priv->clients));
652
653 spin_lock_irqsave(&priv->lock, flags);
654
655 list_for_each_entry(data, &priv->clients, node) {
656 while (!exynos_sysmmu_disable(data->dev))
657 ; /* until System MMU is actually disabled */
658 }
659
660 spin_unlock_irqrestore(&priv->lock, flags);
661
662 for (i = 0; i < NUM_LV1ENTRIES; i++)
663 if (lv1ent_page(priv->pgtable + i))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530664 kmem_cache_free(lv2table_kmem_cache,
665 phys_to_virt(lv2table_base(priv->pgtable + i)));
KyongHo Cho2a965362012-05-12 05:56:09 +0900666
667 free_pages((unsigned long)priv->pgtable, 2);
668 free_pages((unsigned long)priv->lv2entcnt, 1);
669 kfree(domain->priv);
670 domain->priv = NULL;
671}
672
673static int exynos_iommu_attach_device(struct iommu_domain *domain,
674 struct device *dev)
675{
676 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
677 struct exynos_iommu_domain *priv = domain->priv;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530678 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900679 unsigned long flags;
680 int ret;
681
682 ret = pm_runtime_get_sync(data->sysmmu);
683 if (ret < 0)
684 return ret;
685
686 ret = 0;
687
688 spin_lock_irqsave(&priv->lock, flags);
689
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530690 ret = __exynos_sysmmu_enable(data, pagetable, domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900691
692 if (ret == 0) {
693 /* 'data->node' must not be appeared in priv->clients */
694 BUG_ON(!list_empty(&data->node));
695 data->dev = dev;
696 list_add_tail(&data->node, &priv->clients);
697 }
698
699 spin_unlock_irqrestore(&priv->lock, flags);
700
701 if (ret < 0) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530702 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
703 __func__, &pagetable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900704 pm_runtime_put(data->sysmmu);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530705 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900706 }
707
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530708 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
709 __func__, &pagetable, (ret == 0) ? "" : ", again");
710
KyongHo Cho2a965362012-05-12 05:56:09 +0900711 return ret;
712}
713
714static void exynos_iommu_detach_device(struct iommu_domain *domain,
715 struct device *dev)
716{
717 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
718 struct exynos_iommu_domain *priv = domain->priv;
719 struct list_head *pos;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530720 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900721 unsigned long flags;
722 bool found = false;
723
724 spin_lock_irqsave(&priv->lock, flags);
725
726 list_for_each(pos, &priv->clients) {
727 if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
728 found = true;
729 break;
730 }
731 }
732
733 if (!found)
734 goto finish;
735
736 if (__exynos_sysmmu_disable(data)) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530737 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
738 __func__, &pagetable);
Wei Yongjunf8ffcc92012-09-06 12:34:09 +0800739 list_del_init(&data->node);
KyongHo Cho2a965362012-05-12 05:56:09 +0900740
741 } else {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530742 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %pa delayed",
743 __func__, &pagetable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900744 }
745
746finish:
747 spin_unlock_irqrestore(&priv->lock, flags);
748
749 if (found)
750 pm_runtime_put(data->sysmmu);
751}
752
753static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
754 short *pgcounter)
755{
Cho KyongHo61128f02014-05-12 11:44:47 +0530756 if (lv1ent_section(sent)) {
757 WARN(1, "Trying mapping on %#08lx mapped with 1MiB page", iova);
758 return ERR_PTR(-EADDRINUSE);
759 }
760
KyongHo Cho2a965362012-05-12 05:56:09 +0900761 if (lv1ent_fault(sent)) {
762 unsigned long *pent;
763
Cho KyongHo734c3c72014-05-12 11:44:48 +0530764 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
KyongHo Cho2a965362012-05-12 05:56:09 +0900765 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
766 if (!pent)
Cho KyongHo61128f02014-05-12 11:44:47 +0530767 return ERR_PTR(-ENOMEM);
KyongHo Cho2a965362012-05-12 05:56:09 +0900768
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530769 *sent = mk_lv1ent_page(virt_to_phys(pent));
KyongHo Cho2a965362012-05-12 05:56:09 +0900770 *pgcounter = NUM_LV2ENTRIES;
771 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
772 pgtable_flush(sent, sent + 1);
773 }
774
775 return page_entry(sent, iova);
776}
777
Cho KyongHo61128f02014-05-12 11:44:47 +0530778static int lv1set_section(unsigned long *sent, unsigned long iova,
779 phys_addr_t paddr, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900780{
Cho KyongHo61128f02014-05-12 11:44:47 +0530781 if (lv1ent_section(sent)) {
782 WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped",
783 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900784 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530785 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900786
787 if (lv1ent_page(sent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530788 if (*pgcnt != NUM_LV2ENTRIES) {
789 WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped",
790 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900791 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530792 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900793
Cho KyongHo734c3c72014-05-12 11:44:48 +0530794 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
KyongHo Cho2a965362012-05-12 05:56:09 +0900795 *pgcnt = 0;
796 }
797
798 *sent = mk_lv1ent_sect(paddr);
799
800 pgtable_flush(sent, sent + 1);
801
802 return 0;
803}
804
805static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
806 short *pgcnt)
807{
808 if (size == SPAGE_SIZE) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530809 if (!lv2ent_fault(pent)) {
810 WARN(1, "Trying mapping on 4KiB where mapping exists");
KyongHo Cho2a965362012-05-12 05:56:09 +0900811 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530812 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900813
814 *pent = mk_lv2ent_spage(paddr);
815 pgtable_flush(pent, pent + 1);
816 *pgcnt -= 1;
817 } else { /* size == LPAGE_SIZE */
818 int i;
819 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
820 if (!lv2ent_fault(pent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530821 WARN(1,
822 "Trying mapping on 64KiB where mapping exists");
823 if (i > 0)
824 memset(pent - i, 0, sizeof(*pent) * i);
KyongHo Cho2a965362012-05-12 05:56:09 +0900825 return -EADDRINUSE;
826 }
827
828 *pent = mk_lv2ent_lpage(paddr);
829 }
830 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
831 *pgcnt -= SPAGES_PER_LPAGE;
832 }
833
834 return 0;
835}
836
837static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
838 phys_addr_t paddr, size_t size, int prot)
839{
840 struct exynos_iommu_domain *priv = domain->priv;
841 unsigned long *entry;
842 unsigned long flags;
843 int ret = -ENOMEM;
844
845 BUG_ON(priv->pgtable == NULL);
846
847 spin_lock_irqsave(&priv->pgtablelock, flags);
848
849 entry = section_entry(priv->pgtable, iova);
850
851 if (size == SECT_SIZE) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530852 ret = lv1set_section(entry, iova, paddr,
KyongHo Cho2a965362012-05-12 05:56:09 +0900853 &priv->lv2entcnt[lv1ent_offset(iova)]);
854 } else {
855 unsigned long *pent;
856
857 pent = alloc_lv2entry(entry, iova,
858 &priv->lv2entcnt[lv1ent_offset(iova)]);
859
Cho KyongHo61128f02014-05-12 11:44:47 +0530860 if (IS_ERR(pent))
861 ret = PTR_ERR(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900862 else
863 ret = lv2set_page(pent, paddr, size,
864 &priv->lv2entcnt[lv1ent_offset(iova)]);
865 }
866
Cho KyongHo61128f02014-05-12 11:44:47 +0530867 if (ret)
KyongHo Cho2a965362012-05-12 05:56:09 +0900868 pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
869 __func__, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +0900870
871 spin_unlock_irqrestore(&priv->pgtablelock, flags);
872
873 return ret;
874}
875
876static size_t exynos_iommu_unmap(struct iommu_domain *domain,
877 unsigned long iova, size_t size)
878{
879 struct exynos_iommu_domain *priv = domain->priv;
880 struct sysmmu_drvdata *data;
881 unsigned long flags;
882 unsigned long *ent;
Cho KyongHo61128f02014-05-12 11:44:47 +0530883 size_t err_pgsize;
KyongHo Cho2a965362012-05-12 05:56:09 +0900884
885 BUG_ON(priv->pgtable == NULL);
886
887 spin_lock_irqsave(&priv->pgtablelock, flags);
888
889 ent = section_entry(priv->pgtable, iova);
890
891 if (lv1ent_section(ent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530892 if (size < SECT_SIZE) {
893 err_pgsize = SECT_SIZE;
894 goto err;
895 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900896
897 *ent = 0;
898 pgtable_flush(ent, ent + 1);
899 size = SECT_SIZE;
900 goto done;
901 }
902
903 if (unlikely(lv1ent_fault(ent))) {
904 if (size > SECT_SIZE)
905 size = SECT_SIZE;
906 goto done;
907 }
908
909 /* lv1ent_page(sent) == true here */
910
911 ent = page_entry(ent, iova);
912
913 if (unlikely(lv2ent_fault(ent))) {
914 size = SPAGE_SIZE;
915 goto done;
916 }
917
918 if (lv2ent_small(ent)) {
919 *ent = 0;
920 size = SPAGE_SIZE;
921 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
922 goto done;
923 }
924
925 /* lv1ent_large(ent) == true here */
Cho KyongHo61128f02014-05-12 11:44:47 +0530926 if (size < LPAGE_SIZE) {
927 err_pgsize = LPAGE_SIZE;
928 goto err;
929 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900930
931 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
932
933 size = LPAGE_SIZE;
934 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
935done:
936 spin_unlock_irqrestore(&priv->pgtablelock, flags);
937
938 spin_lock_irqsave(&priv->lock, flags);
939 list_for_each_entry(data, &priv->clients, node)
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530940 sysmmu_tlb_invalidate_entry(data->dev, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +0900941 spin_unlock_irqrestore(&priv->lock, flags);
942
KyongHo Cho2a965362012-05-12 05:56:09 +0900943 return size;
Cho KyongHo61128f02014-05-12 11:44:47 +0530944err:
945 spin_unlock_irqrestore(&priv->pgtablelock, flags);
946
947 WARN(1,
948 "%s: Failed due to size(%#x) @ %#08lx is smaller than page size %#x\n",
949 __func__, size, iova, err_pgsize);
950
951 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900952}
953
954static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +0530955 dma_addr_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900956{
957 struct exynos_iommu_domain *priv = domain->priv;
958 unsigned long *entry;
959 unsigned long flags;
960 phys_addr_t phys = 0;
961
962 spin_lock_irqsave(&priv->pgtablelock, flags);
963
964 entry = section_entry(priv->pgtable, iova);
965
966 if (lv1ent_section(entry)) {
967 phys = section_phys(entry) + section_offs(iova);
968 } else if (lv1ent_page(entry)) {
969 entry = page_entry(entry, iova);
970
971 if (lv2ent_large(entry))
972 phys = lpage_phys(entry) + lpage_offs(iova);
973 else if (lv2ent_small(entry))
974 phys = spage_phys(entry) + spage_offs(iova);
975 }
976
977 spin_unlock_irqrestore(&priv->pgtablelock, flags);
978
979 return phys;
980}
981
982static struct iommu_ops exynos_iommu_ops = {
983 .domain_init = &exynos_iommu_domain_init,
984 .domain_destroy = &exynos_iommu_domain_destroy,
985 .attach_dev = &exynos_iommu_attach_device,
986 .detach_dev = &exynos_iommu_detach_device,
987 .map = &exynos_iommu_map,
988 .unmap = &exynos_iommu_unmap,
989 .iova_to_phys = &exynos_iommu_iova_to_phys,
990 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
991};
992
993static int __init exynos_iommu_init(void)
994{
995 int ret;
996
Cho KyongHo734c3c72014-05-12 11:44:48 +0530997 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
998 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
999 if (!lv2table_kmem_cache) {
1000 pr_err("%s: Failed to create kmem cache\n", __func__);
1001 return -ENOMEM;
1002 }
1003
KyongHo Cho2a965362012-05-12 05:56:09 +09001004 ret = platform_driver_register(&exynos_sysmmu_driver);
Cho KyongHo734c3c72014-05-12 11:44:48 +05301005 if (ret) {
1006 pr_err("%s: Failed to register driver\n", __func__);
1007 goto err_reg_driver;
1008 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001009
Cho KyongHo734c3c72014-05-12 11:44:48 +05301010 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1011 if (ret) {
1012 pr_err("%s: Failed to register exynos-iommu driver.\n",
1013 __func__);
1014 goto err_set_iommu;
1015 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001016
Cho KyongHo734c3c72014-05-12 11:44:48 +05301017 return 0;
1018err_set_iommu:
1019 platform_driver_unregister(&exynos_sysmmu_driver);
1020err_reg_driver:
1021 kmem_cache_destroy(lv2table_kmem_cache);
KyongHo Cho2a965362012-05-12 05:56:09 +09001022 return ret;
1023}
1024subsys_initcall(exynos_iommu_init);