blob: 595e0da55db45643aa15f22feceda8a8ed651f7c [file] [log] [blame]
KyongHo Cho2a965362012-05-12 05:56:09 +09001/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
KyongHo Cho2a965362012-05-12 05:56:09 +090015#include <linux/clk.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020016#include <linux/dma-mapping.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090017#include <linux/err.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020018#include <linux/io.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090019#include <linux/iommu.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020020#include <linux/interrupt.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090021#include <linux/list.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020022#include <linux/of.h>
23#include <linux/of_iommu.h>
24#include <linux/of_platform.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020025#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/slab.h>
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +010028#include <linux/dma-iommu.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090029
30#include <asm/cacheflush.h>
31#include <asm/pgtable.h>
32
Cho KyongHod09d78f2014-05-12 11:44:58 +053033typedef u32 sysmmu_iova_t;
34typedef u32 sysmmu_pte_t;
35
Sachin Kamatf171aba2014-08-04 10:06:28 +053036/* We do not consider super section mapping (16MB) */
KyongHo Cho2a965362012-05-12 05:56:09 +090037#define SECT_ORDER 20
38#define LPAGE_ORDER 16
39#define SPAGE_ORDER 12
40
41#define SECT_SIZE (1 << SECT_ORDER)
42#define LPAGE_SIZE (1 << LPAGE_ORDER)
43#define SPAGE_SIZE (1 << SPAGE_ORDER)
44
45#define SECT_MASK (~(SECT_SIZE - 1))
46#define LPAGE_MASK (~(LPAGE_SIZE - 1))
47#define SPAGE_MASK (~(SPAGE_SIZE - 1))
48
Cho KyongHo66a7ed82014-05-12 11:45:04 +053049#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
50 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
51#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
52#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
53#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
54 ((*(sent) & 3) == 1))
KyongHo Cho2a965362012-05-12 05:56:09 +090055#define lv1ent_section(sent) ((*(sent) & 3) == 2)
56
57#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
58#define lv2ent_small(pent) ((*(pent) & 2) == 2)
59#define lv2ent_large(pent) ((*(pent) & 3) == 1)
60
Cho KyongHod09d78f2014-05-12 11:44:58 +053061static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
62{
63 return iova & (size - 1);
64}
KyongHo Cho2a965362012-05-12 05:56:09 +090065
Cho KyongHod09d78f2014-05-12 11:44:58 +053066#define section_phys(sent) (*(sent) & SECT_MASK)
67#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
68#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
69#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
70#define spage_phys(pent) (*(pent) & SPAGE_MASK)
71#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090072
73#define NUM_LV1ENTRIES 4096
Cho KyongHod09d78f2014-05-12 11:44:58 +053074#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090075
Cho KyongHod09d78f2014-05-12 11:44:58 +053076static u32 lv1ent_offset(sysmmu_iova_t iova)
77{
78 return iova >> SECT_ORDER;
79}
80
81static u32 lv2ent_offset(sysmmu_iova_t iova)
82{
83 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
84}
85
86#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
KyongHo Cho2a965362012-05-12 05:56:09 +090087
88#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
89
90#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
91
92#define mk_lv1ent_sect(pa) ((pa) | 2)
93#define mk_lv1ent_page(pa) ((pa) | 1)
94#define mk_lv2ent_lpage(pa) ((pa) | 1)
95#define mk_lv2ent_spage(pa) ((pa) | 2)
96
97#define CTRL_ENABLE 0x5
98#define CTRL_BLOCK 0x7
99#define CTRL_DISABLE 0x0
100
Cho KyongHoeeb51842014-05-12 11:45:03 +0530101#define CFG_LRU 0x1
102#define CFG_QOS(n) ((n & 0xF) << 7)
103#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
104#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
105#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
106#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
107
KyongHo Cho2a965362012-05-12 05:56:09 +0900108#define REG_MMU_CTRL 0x000
109#define REG_MMU_CFG 0x004
110#define REG_MMU_STATUS 0x008
111#define REG_MMU_FLUSH 0x00C
112#define REG_MMU_FLUSH_ENTRY 0x010
113#define REG_PT_BASE_ADDR 0x014
114#define REG_INT_STATUS 0x018
115#define REG_INT_CLEAR 0x01C
116
117#define REG_PAGE_FAULT_ADDR 0x024
118#define REG_AW_FAULT_ADDR 0x028
119#define REG_AR_FAULT_ADDR 0x02C
120#define REG_DEFAULT_SLAVE_ADDR 0x030
121
122#define REG_MMU_VERSION 0x034
123
Cho KyongHoeeb51842014-05-12 11:45:03 +0530124#define MMU_MAJ_VER(val) ((val) >> 7)
125#define MMU_MIN_VER(val) ((val) & 0x7F)
126#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
127
128#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
129
KyongHo Cho2a965362012-05-12 05:56:09 +0900130#define REG_PB0_SADDR 0x04C
131#define REG_PB0_EADDR 0x050
132#define REG_PB1_SADDR 0x054
133#define REG_PB1_EADDR 0x058
134
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530135#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
136
Cho KyongHo734c3c72014-05-12 11:44:48 +0530137static struct kmem_cache *lv2table_kmem_cache;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530138static sysmmu_pte_t *zero_lv2_table;
139#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530140
Cho KyongHod09d78f2014-05-12 11:44:58 +0530141static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900142{
143 return pgtable + lv1ent_offset(iova);
144}
145
Cho KyongHod09d78f2014-05-12 11:44:58 +0530146static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900147{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530148 return (sysmmu_pte_t *)phys_to_virt(
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530149 lv2table_base(sent)) + lv2ent_offset(iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900150}
151
152enum exynos_sysmmu_inttype {
153 SYSMMU_PAGEFAULT,
154 SYSMMU_AR_MULTIHIT,
155 SYSMMU_AW_MULTIHIT,
156 SYSMMU_BUSERROR,
157 SYSMMU_AR_SECURITY,
158 SYSMMU_AR_ACCESS,
159 SYSMMU_AW_SECURITY,
160 SYSMMU_AW_PROTECTION, /* 7 */
161 SYSMMU_FAULT_UNKNOWN,
162 SYSMMU_FAULTS_NUM
163};
164
KyongHo Cho2a965362012-05-12 05:56:09 +0900165static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
166 REG_PAGE_FAULT_ADDR,
167 REG_AR_FAULT_ADDR,
168 REG_AW_FAULT_ADDR,
169 REG_DEFAULT_SLAVE_ADDR,
170 REG_AR_FAULT_ADDR,
171 REG_AR_FAULT_ADDR,
172 REG_AW_FAULT_ADDR,
173 REG_AW_FAULT_ADDR
174};
175
176static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
177 "PAGE FAULT",
178 "AR MULTI-HIT FAULT",
179 "AW MULTI-HIT FAULT",
180 "BUS ERROR",
181 "AR SECURITY PROTECTION FAULT",
182 "AR ACCESS PROTECTION FAULT",
183 "AW SECURITY PROTECTION FAULT",
184 "AW ACCESS PROTECTION FAULT",
185 "UNKNOWN FAULT"
186};
187
Marek Szyprowski2860af32015-05-19 15:20:31 +0200188/*
189 * This structure is attached to dev.archdata.iommu of the master device
190 * on device add, contains a list of SYSMMU controllers defined by device tree,
191 * which are bound to given master device. It is usually referenced by 'owner'
192 * pointer.
193*/
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530194struct exynos_iommu_owner {
Marek Szyprowski1b092052015-05-19 15:20:33 +0200195 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530196};
197
Marek Szyprowski2860af32015-05-19 15:20:31 +0200198/*
199 * This structure exynos specific generalization of struct iommu_domain.
200 * It contains list of SYSMMU controllers from all master devices, which has
201 * been attached to this domain and page tables of IO address space defined by
202 * it. It is usually referenced by 'domain' pointer.
203 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900204struct exynos_iommu_domain {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200205 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
206 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
207 short *lv2entcnt; /* free lv2 entry counter for each section */
208 spinlock_t lock; /* lock for modyfying list of clients */
209 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100210 struct iommu_domain domain; /* generic domain data structure */
KyongHo Cho2a965362012-05-12 05:56:09 +0900211};
212
Marek Szyprowski2860af32015-05-19 15:20:31 +0200213/*
214 * This structure hold all data of a single SYSMMU controller, this includes
215 * hw resources like registers and clocks, pointers and list nodes to connect
216 * it to all other structures, internal state and parameters read from device
217 * tree. It is usually referenced by 'data' pointer.
218 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900219struct sysmmu_drvdata {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200220 struct device *sysmmu; /* SYSMMU controller device */
221 struct device *master; /* master device (owner) */
222 void __iomem *sfrbase; /* our registers */
223 struct clk *clk; /* SYSMMU's clock */
224 struct clk *clk_master; /* master's device clock */
225 int activations; /* number of calls to sysmmu_enable */
226 spinlock_t lock; /* lock for modyfying state */
227 struct exynos_iommu_domain *domain; /* domain we belong to */
228 struct list_head domain_node; /* node for domain clients list */
Marek Szyprowski1b092052015-05-19 15:20:33 +0200229 struct list_head owner_node; /* node for owner controllers list */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200230 phys_addr_t pgtable; /* assigned page table structure */
231 unsigned int version; /* our version */
KyongHo Cho2a965362012-05-12 05:56:09 +0900232};
233
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100234static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
235{
236 return container_of(dom, struct exynos_iommu_domain, domain);
237}
238
KyongHo Cho2a965362012-05-12 05:56:09 +0900239static bool set_sysmmu_active(struct sysmmu_drvdata *data)
240{
241 /* return true if the System MMU was not active previously
242 and it needs to be initialized */
243 return ++data->activations == 1;
244}
245
246static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
247{
248 /* return true if the System MMU is needed to be disabled */
249 BUG_ON(data->activations < 1);
250 return --data->activations == 0;
251}
252
253static bool is_sysmmu_active(struct sysmmu_drvdata *data)
254{
255 return data->activations > 0;
256}
257
258static void sysmmu_unblock(void __iomem *sfrbase)
259{
260 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
261}
262
263static bool sysmmu_block(void __iomem *sfrbase)
264{
265 int i = 120;
266
267 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
268 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
269 --i;
270
271 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
272 sysmmu_unblock(sfrbase);
273 return false;
274 }
275
276 return true;
277}
278
279static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
280{
281 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
282}
283
284static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530285 sysmmu_iova_t iova, unsigned int num_inv)
KyongHo Cho2a965362012-05-12 05:56:09 +0900286{
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530287 unsigned int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530288
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530289 for (i = 0; i < num_inv; i++) {
290 __raw_writel((iova & SPAGE_MASK) | 1,
291 sfrbase + REG_MMU_FLUSH_ENTRY);
292 iova += SPAGE_SIZE;
293 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900294}
295
296static void __sysmmu_set_ptbase(void __iomem *sfrbase,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530297 phys_addr_t pgd)
KyongHo Cho2a965362012-05-12 05:56:09 +0900298{
KyongHo Cho2a965362012-05-12 05:56:09 +0900299 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
300
301 __sysmmu_tlb_invalidate(sfrbase);
302}
303
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530304static void show_fault_information(const char *name,
305 enum exynos_sysmmu_inttype itype,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530306 phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
KyongHo Cho2a965362012-05-12 05:56:09 +0900307{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530308 sysmmu_pte_t *ent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900309
310 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
311 itype = SYSMMU_FAULT_UNKNOWN;
312
Cho KyongHod09d78f2014-05-12 11:44:58 +0530313 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530314 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
KyongHo Cho2a965362012-05-12 05:56:09 +0900315
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530316 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530317 pr_err("\tLv1 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900318
319 if (lv1ent_page(ent)) {
320 ent = page_entry(ent, fault_addr);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530321 pr_err("\t Lv2 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900322 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900323}
324
325static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
326{
Sachin Kamatf171aba2014-08-04 10:06:28 +0530327 /* SYSMMU is in blocked state when interrupt occurred. */
KyongHo Cho2a965362012-05-12 05:56:09 +0900328 struct sysmmu_drvdata *data = dev_id;
KyongHo Cho2a965362012-05-12 05:56:09 +0900329 enum exynos_sysmmu_inttype itype;
Cho KyongHod09d78f2014-05-12 11:44:58 +0530330 sysmmu_iova_t addr = -1;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530331 int ret = -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900332
KyongHo Cho2a965362012-05-12 05:56:09 +0900333 WARN_ON(!is_sysmmu_active(data));
334
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530335 spin_lock(&data->lock);
336
Cho KyongHo70605872014-05-12 11:44:55 +0530337 if (!IS_ERR(data->clk_master))
338 clk_enable(data->clk_master);
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530339
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530340 itype = (enum exynos_sysmmu_inttype)
341 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
342 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
KyongHo Cho2a965362012-05-12 05:56:09 +0900343 itype = SYSMMU_FAULT_UNKNOWN;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530344 else
345 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900346
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530347 if (itype == SYSMMU_FAULT_UNKNOWN) {
348 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
349 __func__, dev_name(data->sysmmu));
350 pr_err("%s: Please check if IRQ is correctly configured.\n",
351 __func__);
352 BUG();
353 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530354 unsigned int base =
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530355 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
356 show_fault_information(dev_name(data->sysmmu),
357 itype, base, addr);
358 if (data->domain)
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200359 ret = report_iommu_fault(&data->domain->domain,
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530360 data->master, addr, itype);
KyongHo Cho2a965362012-05-12 05:56:09 +0900361 }
362
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530363 /* fault is not recovered by fault handler */
364 BUG_ON(ret != 0);
KyongHo Cho2a965362012-05-12 05:56:09 +0900365
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530366 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
367
368 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900369
Cho KyongHo70605872014-05-12 11:44:55 +0530370 if (!IS_ERR(data->clk_master))
371 clk_disable(data->clk_master);
372
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530373 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900374
375 return IRQ_HANDLED;
376}
377
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530378static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900379{
Cho KyongHo70605872014-05-12 11:44:55 +0530380 if (!IS_ERR(data->clk_master))
381 clk_enable(data->clk_master);
382
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530383 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530384 __raw_writel(0, data->sfrbase + REG_MMU_CFG);
KyongHo Cho2a965362012-05-12 05:56:09 +0900385
Cho KyongHo46c16d12014-05-12 11:44:54 +0530386 clk_disable(data->clk);
Cho KyongHo70605872014-05-12 11:44:55 +0530387 if (!IS_ERR(data->clk_master))
388 clk_disable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530389}
KyongHo Cho2a965362012-05-12 05:56:09 +0900390
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530391static bool __sysmmu_disable(struct sysmmu_drvdata *data)
392{
393 bool disabled;
394 unsigned long flags;
395
396 spin_lock_irqsave(&data->lock, flags);
397
398 disabled = set_sysmmu_inactive(data);
399
400 if (disabled) {
401 data->pgtable = 0;
402 data->domain = NULL;
403
404 __sysmmu_disable_nocount(data);
405
406 dev_dbg(data->sysmmu, "Disabled\n");
407 } else {
408 dev_dbg(data->sysmmu, "%d times left to disable\n",
409 data->activations);
410 }
411
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530412 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900413
KyongHo Cho2a965362012-05-12 05:56:09 +0900414 return disabled;
415}
416
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530417static void __sysmmu_init_config(struct sysmmu_drvdata *data)
418{
Cho KyongHoeeb51842014-05-12 11:45:03 +0530419 unsigned int cfg = CFG_LRU | CFG_QOS(15);
420 unsigned int ver;
421
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200422 ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
Cho KyongHoeeb51842014-05-12 11:45:03 +0530423 if (MMU_MAJ_VER(ver) == 3) {
424 if (MMU_MIN_VER(ver) >= 2) {
425 cfg |= CFG_FLPDCACHE;
426 if (MMU_MIN_VER(ver) == 3) {
427 cfg |= CFG_ACGEN;
428 cfg &= ~CFG_LRU;
429 } else {
430 cfg |= CFG_SYSSEL;
431 }
432 }
433 }
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530434
435 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200436 data->version = ver;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530437}
438
439static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
440{
441 if (!IS_ERR(data->clk_master))
442 clk_enable(data->clk_master);
443 clk_enable(data->clk);
444
445 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
446
447 __sysmmu_init_config(data);
448
449 __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
450
451 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
452
453 if (!IS_ERR(data->clk_master))
454 clk_disable(data->clk_master);
455}
456
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200457static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200458 struct exynos_iommu_domain *domain)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530459{
460 int ret = 0;
461 unsigned long flags;
462
463 spin_lock_irqsave(&data->lock, flags);
464 if (set_sysmmu_active(data)) {
465 data->pgtable = pgtable;
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200466 data->domain = domain;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530467
468 __sysmmu_enable_nocount(data);
469
470 dev_dbg(data->sysmmu, "Enabled\n");
471 } else {
472 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
473
474 dev_dbg(data->sysmmu, "already enabled\n");
475 }
476
477 if (WARN_ON(ret < 0))
478 set_sysmmu_inactive(data); /* decrement count */
479
480 spin_unlock_irqrestore(&data->lock, flags);
481
482 return ret;
483}
484
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530485static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
486 sysmmu_iova_t iova)
487{
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200488 if (data->version == MAKE_MMU_VER(3, 3))
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530489 __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
490}
491
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200492static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530493 sysmmu_iova_t iova)
494{
495 unsigned long flags;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530496
497 if (!IS_ERR(data->clk_master))
498 clk_enable(data->clk_master);
499
500 spin_lock_irqsave(&data->lock, flags);
501 if (is_sysmmu_active(data))
502 __sysmmu_tlb_invalidate_flpdcache(data, iova);
503 spin_unlock_irqrestore(&data->lock, flags);
504
505 if (!IS_ERR(data->clk_master))
506 clk_disable(data->clk_master);
507}
508
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200509static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
510 sysmmu_iova_t iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900511{
512 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900513
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530514 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900515 if (is_sysmmu_active(data)) {
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530516 unsigned int num_inv = 1;
Cho KyongHo70605872014-05-12 11:44:55 +0530517
518 if (!IS_ERR(data->clk_master))
519 clk_enable(data->clk_master);
520
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530521 /*
522 * L2TLB invalidation required
523 * 4KB page: 1 invalidation
Sachin Kamatf171aba2014-08-04 10:06:28 +0530524 * 64KB page: 16 invalidations
525 * 1MB page: 64 invalidations
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530526 * because it is set-associative TLB
527 * with 8-way and 64 sets.
528 * 1MB page can be cached in one of all sets.
529 * 64KB page can be one of 16 consecutive sets.
530 */
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200531 if (MMU_MAJ_VER(data->version) == 2)
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530532 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
533
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530534 if (sysmmu_block(data->sfrbase)) {
535 __sysmmu_tlb_invalidate_entry(
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530536 data->sfrbase, iova, num_inv);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530537 sysmmu_unblock(data->sfrbase);
KyongHo Cho2a965362012-05-12 05:56:09 +0900538 }
Cho KyongHo70605872014-05-12 11:44:55 +0530539 if (!IS_ERR(data->clk_master))
540 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900541 } else {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200542 dev_dbg(data->master,
543 "disabled. Skipping TLB invalidation @ %#x\n", iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900544 }
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530545 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900546}
547
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530548static int __init exynos_sysmmu_probe(struct platform_device *pdev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900549{
Cho KyongHo46c16d12014-05-12 11:44:54 +0530550 int irq, ret;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530551 struct device *dev = &pdev->dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900552 struct sysmmu_drvdata *data;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530553 struct resource *res;
KyongHo Cho2a965362012-05-12 05:56:09 +0900554
Cho KyongHo46c16d12014-05-12 11:44:54 +0530555 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
556 if (!data)
557 return -ENOMEM;
KyongHo Cho2a965362012-05-12 05:56:09 +0900558
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530559 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Cho KyongHo46c16d12014-05-12 11:44:54 +0530560 data->sfrbase = devm_ioremap_resource(dev, res);
561 if (IS_ERR(data->sfrbase))
562 return PTR_ERR(data->sfrbase);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530563
Cho KyongHo46c16d12014-05-12 11:44:54 +0530564 irq = platform_get_irq(pdev, 0);
565 if (irq <= 0) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530566 dev_err(dev, "Unable to find IRQ resource\n");
Cho KyongHo46c16d12014-05-12 11:44:54 +0530567 return irq;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530568 }
569
Cho KyongHo46c16d12014-05-12 11:44:54 +0530570 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530571 dev_name(dev), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900572 if (ret) {
Cho KyongHo46c16d12014-05-12 11:44:54 +0530573 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
574 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900575 }
576
Cho KyongHo46c16d12014-05-12 11:44:54 +0530577 data->clk = devm_clk_get(dev, "sysmmu");
578 if (IS_ERR(data->clk)) {
579 dev_err(dev, "Failed to get clock!\n");
580 return PTR_ERR(data->clk);
581 } else {
582 ret = clk_prepare(data->clk);
583 if (ret) {
584 dev_err(dev, "Failed to prepare clk\n");
585 return ret;
586 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900587 }
588
Cho KyongHo70605872014-05-12 11:44:55 +0530589 data->clk_master = devm_clk_get(dev, "master");
590 if (!IS_ERR(data->clk_master)) {
591 ret = clk_prepare(data->clk_master);
592 if (ret) {
593 clk_unprepare(data->clk);
594 dev_err(dev, "Failed to prepare master's clk\n");
595 return ret;
596 }
597 }
598
KyongHo Cho2a965362012-05-12 05:56:09 +0900599 data->sysmmu = dev;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530600 spin_lock_init(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900601
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530602 platform_set_drvdata(pdev, data);
603
Cho KyongHof4723ec2014-05-12 11:44:52 +0530604 pm_runtime_enable(dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900605
KyongHo Cho2a965362012-05-12 05:56:09 +0900606 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900607}
608
Marek Szyprowski622015e2015-05-19 15:20:35 +0200609#ifdef CONFIG_PM_SLEEP
610static int exynos_sysmmu_suspend(struct device *dev)
611{
612 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
613
614 dev_dbg(dev, "suspend\n");
615 if (is_sysmmu_active(data)) {
616 __sysmmu_disable_nocount(data);
617 pm_runtime_put(dev);
618 }
619 return 0;
620}
621
622static int exynos_sysmmu_resume(struct device *dev)
623{
624 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
625
626 dev_dbg(dev, "resume\n");
627 if (is_sysmmu_active(data)) {
628 pm_runtime_get_sync(dev);
629 __sysmmu_enable_nocount(data);
630 }
631 return 0;
632}
633#endif
634
635static const struct dev_pm_ops sysmmu_pm_ops = {
636 SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
637};
638
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530639static const struct of_device_id sysmmu_of_match[] __initconst = {
640 { .compatible = "samsung,exynos-sysmmu", },
641 { },
642};
643
644static struct platform_driver exynos_sysmmu_driver __refdata = {
645 .probe = exynos_sysmmu_probe,
646 .driver = {
KyongHo Cho2a965362012-05-12 05:56:09 +0900647 .name = "exynos-sysmmu",
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530648 .of_match_table = sysmmu_of_match,
Marek Szyprowski622015e2015-05-19 15:20:35 +0200649 .pm = &sysmmu_pm_ops,
KyongHo Cho2a965362012-05-12 05:56:09 +0900650 }
651};
652
653static inline void pgtable_flush(void *vastart, void *vaend)
654{
655 dmac_flush_range(vastart, vaend);
656 outer_flush_range(virt_to_phys(vastart),
657 virt_to_phys(vaend));
658}
659
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100660static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
KyongHo Cho2a965362012-05-12 05:56:09 +0900661{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200662 struct exynos_iommu_domain *domain;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530663 int i;
KyongHo Cho2a965362012-05-12 05:56:09 +0900664
KyongHo Cho2a965362012-05-12 05:56:09 +0900665
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200666 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
667 if (!domain)
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100668 return NULL;
669
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100670 if (type == IOMMU_DOMAIN_DMA) {
671 if (iommu_get_dma_cookie(&domain->domain) != 0)
672 goto err_pgtable;
673 } else if (type != IOMMU_DOMAIN_UNMANAGED) {
674 goto err_pgtable;
675 }
676
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200677 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
678 if (!domain->pgtable)
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100679 goto err_dma_cookie;
KyongHo Cho2a965362012-05-12 05:56:09 +0900680
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200681 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
682 if (!domain->lv2entcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900683 goto err_counter;
684
Sachin Kamatf171aba2014-08-04 10:06:28 +0530685 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530686 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200687 domain->pgtable[i + 0] = ZERO_LV2LINK;
688 domain->pgtable[i + 1] = ZERO_LV2LINK;
689 domain->pgtable[i + 2] = ZERO_LV2LINK;
690 domain->pgtable[i + 3] = ZERO_LV2LINK;
691 domain->pgtable[i + 4] = ZERO_LV2LINK;
692 domain->pgtable[i + 5] = ZERO_LV2LINK;
693 domain->pgtable[i + 6] = ZERO_LV2LINK;
694 domain->pgtable[i + 7] = ZERO_LV2LINK;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530695 }
696
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200697 pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES);
KyongHo Cho2a965362012-05-12 05:56:09 +0900698
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200699 spin_lock_init(&domain->lock);
700 spin_lock_init(&domain->pgtablelock);
701 INIT_LIST_HEAD(&domain->clients);
KyongHo Cho2a965362012-05-12 05:56:09 +0900702
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200703 domain->domain.geometry.aperture_start = 0;
704 domain->domain.geometry.aperture_end = ~0UL;
705 domain->domain.geometry.force_aperture = true;
Joerg Roedel3177bb72012-07-11 12:41:10 +0200706
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200707 return &domain->domain;
KyongHo Cho2a965362012-05-12 05:56:09 +0900708
709err_counter:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200710 free_pages((unsigned long)domain->pgtable, 2);
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100711err_dma_cookie:
712 if (type == IOMMU_DOMAIN_DMA)
713 iommu_put_dma_cookie(&domain->domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900714err_pgtable:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200715 kfree(domain);
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100716 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900717}
718
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200719static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
KyongHo Cho2a965362012-05-12 05:56:09 +0900720{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200721 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200722 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900723 unsigned long flags;
724 int i;
725
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200726 WARN_ON(!list_empty(&domain->clients));
KyongHo Cho2a965362012-05-12 05:56:09 +0900727
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200728 spin_lock_irqsave(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900729
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200730 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200731 if (__sysmmu_disable(data))
732 data->master = NULL;
733 list_del_init(&data->domain_node);
KyongHo Cho2a965362012-05-12 05:56:09 +0900734 }
735
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200736 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900737
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100738 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
739 iommu_put_dma_cookie(iommu_domain);
740
KyongHo Cho2a965362012-05-12 05:56:09 +0900741 for (i = 0; i < NUM_LV1ENTRIES; i++)
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200742 if (lv1ent_page(domain->pgtable + i))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530743 kmem_cache_free(lv2table_kmem_cache,
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200744 phys_to_virt(lv2table_base(domain->pgtable + i)));
KyongHo Cho2a965362012-05-12 05:56:09 +0900745
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200746 free_pages((unsigned long)domain->pgtable, 2);
747 free_pages((unsigned long)domain->lv2entcnt, 1);
748 kfree(domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900749}
750
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200751static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900752 struct device *dev)
753{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530754 struct exynos_iommu_owner *owner = dev->archdata.iommu;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200755 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200756 struct sysmmu_drvdata *data;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200757 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900758 unsigned long flags;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200759 int ret = -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900760
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200761 if (!has_sysmmu(dev))
762 return -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900763
Marek Szyprowski1b092052015-05-19 15:20:33 +0200764 list_for_each_entry(data, &owner->controllers, owner_node) {
Marek Szyprowskice70ca52015-05-19 15:20:34 +0200765 pm_runtime_get_sync(data->sysmmu);
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200766 ret = __sysmmu_enable(data, pagetable, domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200767 if (ret >= 0) {
768 data->master = dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900769
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200770 spin_lock_irqsave(&domain->lock, flags);
771 list_add_tail(&data->domain_node, &domain->clients);
772 spin_unlock_irqrestore(&domain->lock, flags);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200773 }
774 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900775
776 if (ret < 0) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530777 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
778 __func__, &pagetable);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530779 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900780 }
781
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530782 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
783 __func__, &pagetable, (ret == 0) ? "" : ", again");
784
KyongHo Cho2a965362012-05-12 05:56:09 +0900785 return ret;
786}
787
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200788static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900789 struct device *dev)
790{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200791 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
792 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
Marek Szyprowski1b092052015-05-19 15:20:33 +0200793 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900794 unsigned long flags;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200795 bool found = false;
796
797 if (!has_sysmmu(dev))
798 return;
KyongHo Cho2a965362012-05-12 05:56:09 +0900799
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200800 spin_lock_irqsave(&domain->lock, flags);
Marek Szyprowski1b092052015-05-19 15:20:33 +0200801 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200802 if (data->master == dev) {
803 if (__sysmmu_disable(data)) {
804 data->master = NULL;
805 list_del_init(&data->domain_node);
806 }
Marek Szyprowskice70ca52015-05-19 15:20:34 +0200807 pm_runtime_put(data->sysmmu);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200808 found = true;
KyongHo Cho2a965362012-05-12 05:56:09 +0900809 }
810 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200811 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900812
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200813 if (found)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530814 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
815 __func__, &pagetable);
816 else
817 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
KyongHo Cho2a965362012-05-12 05:56:09 +0900818}
819
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200820static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530821 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
KyongHo Cho2a965362012-05-12 05:56:09 +0900822{
Cho KyongHo61128f02014-05-12 11:44:47 +0530823 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530824 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
Cho KyongHo61128f02014-05-12 11:44:47 +0530825 return ERR_PTR(-EADDRINUSE);
826 }
827
KyongHo Cho2a965362012-05-12 05:56:09 +0900828 if (lv1ent_fault(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530829 sysmmu_pte_t *pent;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530830 bool need_flush_flpd_cache = lv1ent_zero(sent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900831
Cho KyongHo734c3c72014-05-12 11:44:48 +0530832 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530833 BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
KyongHo Cho2a965362012-05-12 05:56:09 +0900834 if (!pent)
Cho KyongHo61128f02014-05-12 11:44:47 +0530835 return ERR_PTR(-ENOMEM);
KyongHo Cho2a965362012-05-12 05:56:09 +0900836
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530837 *sent = mk_lv1ent_page(virt_to_phys(pent));
Colin Crossdc3814f2015-05-08 17:05:44 -0700838 kmemleak_ignore(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900839 *pgcounter = NUM_LV2ENTRIES;
840 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
841 pgtable_flush(sent, sent + 1);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530842
843 /*
Sachin Kamatf171aba2014-08-04 10:06:28 +0530844 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
845 * FLPD cache may cache the address of zero_l2_table. This
846 * function replaces the zero_l2_table with new L2 page table
847 * to write valid mappings.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530848 * Accessing the valid area may cause page fault since FLPD
Sachin Kamatf171aba2014-08-04 10:06:28 +0530849 * cache may still cache zero_l2_table for the valid area
850 * instead of new L2 page table that has the mapping
851 * information of the valid area.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530852 * Thus any replacement of zero_l2_table with other valid L2
853 * page table must involve FLPD cache invalidation for System
854 * MMU v3.3.
855 * FLPD cache invalidation is performed with TLB invalidation
856 * by VPN without blocking. It is safe to invalidate TLB without
857 * blocking because the target address of TLB invalidation is
858 * not currently mapped.
859 */
860 if (need_flush_flpd_cache) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200861 struct sysmmu_drvdata *data;
Sachin Kamat365409d2014-05-22 09:50:56 +0530862
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200863 spin_lock(&domain->lock);
864 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200865 sysmmu_tlb_invalidate_flpdcache(data, iova);
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200866 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530867 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900868 }
869
870 return page_entry(sent, iova);
871}
872
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200873static int lv1set_section(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530874 sysmmu_pte_t *sent, sysmmu_iova_t iova,
Cho KyongHo61128f02014-05-12 11:44:47 +0530875 phys_addr_t paddr, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900876{
Cho KyongHo61128f02014-05-12 11:44:47 +0530877 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530878 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530879 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900880 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530881 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900882
883 if (lv1ent_page(sent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530884 if (*pgcnt != NUM_LV2ENTRIES) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530885 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530886 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900887 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530888 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900889
Cho KyongHo734c3c72014-05-12 11:44:48 +0530890 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
KyongHo Cho2a965362012-05-12 05:56:09 +0900891 *pgcnt = 0;
892 }
893
894 *sent = mk_lv1ent_sect(paddr);
895
896 pgtable_flush(sent, sent + 1);
897
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200898 spin_lock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530899 if (lv1ent_page_zero(sent)) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200900 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530901 /*
902 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
903 * entry by speculative prefetch of SLPD which has no mapping.
904 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200905 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200906 sysmmu_tlb_invalidate_flpdcache(data, iova);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530907 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200908 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530909
KyongHo Cho2a965362012-05-12 05:56:09 +0900910 return 0;
911}
912
Cho KyongHod09d78f2014-05-12 11:44:58 +0530913static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
KyongHo Cho2a965362012-05-12 05:56:09 +0900914 short *pgcnt)
915{
916 if (size == SPAGE_SIZE) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530917 if (WARN_ON(!lv2ent_fault(pent)))
KyongHo Cho2a965362012-05-12 05:56:09 +0900918 return -EADDRINUSE;
919
920 *pent = mk_lv2ent_spage(paddr);
921 pgtable_flush(pent, pent + 1);
922 *pgcnt -= 1;
923 } else { /* size == LPAGE_SIZE */
924 int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530925
KyongHo Cho2a965362012-05-12 05:56:09 +0900926 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530927 if (WARN_ON(!lv2ent_fault(pent))) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530928 if (i > 0)
929 memset(pent - i, 0, sizeof(*pent) * i);
KyongHo Cho2a965362012-05-12 05:56:09 +0900930 return -EADDRINUSE;
931 }
932
933 *pent = mk_lv2ent_lpage(paddr);
934 }
935 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
936 *pgcnt -= SPAGES_PER_LPAGE;
937 }
938
939 return 0;
940}
941
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530942/*
943 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
944 *
Sachin Kamatf171aba2014-08-04 10:06:28 +0530945 * System MMU v3.x has advanced logic to improve address translation
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530946 * performance with caching more page table entries by a page table walk.
Sachin Kamatf171aba2014-08-04 10:06:28 +0530947 * However, the logic has a bug that while caching faulty page table entries,
948 * System MMU reports page fault if the cached fault entry is hit even though
949 * the fault entry is updated to a valid entry after the entry is cached.
950 * To prevent caching faulty page table entries which may be updated to valid
951 * entries later, the virtual memory manager should care about the workaround
952 * for the problem. The following describes the workaround.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530953 *
954 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
Sachin Kamatf171aba2014-08-04 10:06:28 +0530955 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530956 *
Sachin Kamatf171aba2014-08-04 10:06:28 +0530957 * Precisely, any start address of I/O virtual region must be aligned with
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530958 * the following sizes for System MMU v3.1 and v3.2.
959 * System MMU v3.1: 128KiB
960 * System MMU v3.2: 256KiB
961 *
962 * Because System MMU v3.3 caches page table entries more aggressively, it needs
Sachin Kamatf171aba2014-08-04 10:06:28 +0530963 * more workarounds.
964 * - Any two consecutive I/O virtual regions must have a hole of size larger
965 * than or equal to 128KiB.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530966 * - Start address of an I/O virtual region must be aligned by 128KiB.
967 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200968static int exynos_iommu_map(struct iommu_domain *iommu_domain,
969 unsigned long l_iova, phys_addr_t paddr, size_t size,
970 int prot)
KyongHo Cho2a965362012-05-12 05:56:09 +0900971{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200972 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +0530973 sysmmu_pte_t *entry;
974 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
KyongHo Cho2a965362012-05-12 05:56:09 +0900975 unsigned long flags;
976 int ret = -ENOMEM;
977
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200978 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900979
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200980 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900981
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200982 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900983
984 if (size == SECT_SIZE) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200985 ret = lv1set_section(domain, entry, iova, paddr,
986 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900987 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530988 sysmmu_pte_t *pent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900989
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200990 pent = alloc_lv2entry(domain, entry, iova,
991 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900992
Cho KyongHo61128f02014-05-12 11:44:47 +0530993 if (IS_ERR(pent))
994 ret = PTR_ERR(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900995 else
996 ret = lv2set_page(pent, paddr, size,
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200997 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +0900998 }
999
Cho KyongHo61128f02014-05-12 11:44:47 +05301000 if (ret)
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301001 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1002 __func__, ret, size, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001003
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001004 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001005
1006 return ret;
1007}
1008
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001009static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1010 sysmmu_iova_t iova, size_t size)
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301011{
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001012 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301013 unsigned long flags;
1014
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001015 spin_lock_irqsave(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301016
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001017 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001018 sysmmu_tlb_invalidate_entry(data, iova, size);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301019
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001020 spin_unlock_irqrestore(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301021}
1022
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001023static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1024 unsigned long l_iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +09001025{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001026 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301027 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1028 sysmmu_pte_t *ent;
Cho KyongHo61128f02014-05-12 11:44:47 +05301029 size_t err_pgsize;
Cho KyongHod09d78f2014-05-12 11:44:58 +05301030 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +09001031
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001032 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +09001033
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001034 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001035
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001036 ent = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001037
1038 if (lv1ent_section(ent)) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301039 if (WARN_ON(size < SECT_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301040 err_pgsize = SECT_SIZE;
1041 goto err;
1042 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001043
Sachin Kamatf171aba2014-08-04 10:06:28 +05301044 /* workaround for h/w bug in System MMU v3.3 */
1045 *ent = ZERO_LV2LINK;
KyongHo Cho2a965362012-05-12 05:56:09 +09001046 pgtable_flush(ent, ent + 1);
1047 size = SECT_SIZE;
1048 goto done;
1049 }
1050
1051 if (unlikely(lv1ent_fault(ent))) {
1052 if (size > SECT_SIZE)
1053 size = SECT_SIZE;
1054 goto done;
1055 }
1056
1057 /* lv1ent_page(sent) == true here */
1058
1059 ent = page_entry(ent, iova);
1060
1061 if (unlikely(lv2ent_fault(ent))) {
1062 size = SPAGE_SIZE;
1063 goto done;
1064 }
1065
1066 if (lv2ent_small(ent)) {
1067 *ent = 0;
1068 size = SPAGE_SIZE;
Cho KyongHo6cb47ed2014-05-12 11:44:51 +05301069 pgtable_flush(ent, ent + 1);
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001070 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
KyongHo Cho2a965362012-05-12 05:56:09 +09001071 goto done;
1072 }
1073
1074 /* lv1ent_large(ent) == true here */
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301075 if (WARN_ON(size < LPAGE_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301076 err_pgsize = LPAGE_SIZE;
1077 goto err;
1078 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001079
1080 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
Cho KyongHo6cb47ed2014-05-12 11:44:51 +05301081 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001082
1083 size = LPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001084 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
KyongHo Cho2a965362012-05-12 05:56:09 +09001085done:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001086 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001087
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001088 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +09001089
KyongHo Cho2a965362012-05-12 05:56:09 +09001090 return size;
Cho KyongHo61128f02014-05-12 11:44:47 +05301091err:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001092 spin_unlock_irqrestore(&domain->pgtablelock, flags);
Cho KyongHo61128f02014-05-12 11:44:47 +05301093
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301094 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1095 __func__, size, iova, err_pgsize);
Cho KyongHo61128f02014-05-12 11:44:47 +05301096
1097 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +09001098}
1099
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001100static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05301101 dma_addr_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +09001102{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001103 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301104 sysmmu_pte_t *entry;
KyongHo Cho2a965362012-05-12 05:56:09 +09001105 unsigned long flags;
1106 phys_addr_t phys = 0;
1107
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001108 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001109
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001110 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001111
1112 if (lv1ent_section(entry)) {
1113 phys = section_phys(entry) + section_offs(iova);
1114 } else if (lv1ent_page(entry)) {
1115 entry = page_entry(entry, iova);
1116
1117 if (lv2ent_large(entry))
1118 phys = lpage_phys(entry) + lpage_offs(iova);
1119 else if (lv2ent_small(entry))
1120 phys = spage_phys(entry) + spage_offs(iova);
1121 }
1122
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001123 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001124
1125 return phys;
1126}
1127
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001128static struct iommu_group *get_device_iommu_group(struct device *dev)
1129{
1130 struct iommu_group *group;
1131
1132 group = iommu_group_get(dev);
1133 if (!group)
1134 group = iommu_group_alloc();
1135
1136 return group;
1137}
1138
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301139static int exynos_iommu_add_device(struct device *dev)
1140{
1141 struct iommu_group *group;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301142
Marek Szyprowski06801db2015-05-19 15:20:32 +02001143 if (!has_sysmmu(dev))
1144 return -ENODEV;
1145
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001146 group = iommu_group_get_for_dev(dev);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301147
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001148 if (IS_ERR(group))
1149 return PTR_ERR(group);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301150
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301151 iommu_group_put(group);
1152
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001153 return 0;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301154}
1155
1156static void exynos_iommu_remove_device(struct device *dev)
1157{
Marek Szyprowski06801db2015-05-19 15:20:32 +02001158 if (!has_sysmmu(dev))
1159 return;
1160
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301161 iommu_group_remove_device(dev);
1162}
1163
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001164static int exynos_iommu_of_xlate(struct device *dev,
1165 struct of_phandle_args *spec)
1166{
1167 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1168 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1169 struct sysmmu_drvdata *data;
1170
1171 if (!sysmmu)
1172 return -ENODEV;
1173
1174 data = platform_get_drvdata(sysmmu);
1175 if (!data)
1176 return -ENODEV;
1177
1178 if (!owner) {
1179 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1180 if (!owner)
1181 return -ENOMEM;
1182
1183 INIT_LIST_HEAD(&owner->controllers);
1184 dev->archdata.iommu = owner;
1185 }
1186
1187 list_add_tail(&data->owner_node, &owner->controllers);
1188 return 0;
1189}
1190
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001191static struct iommu_ops exynos_iommu_ops = {
Joerg Roedele1fd1ea2015-03-26 13:43:11 +01001192 .domain_alloc = exynos_iommu_domain_alloc,
1193 .domain_free = exynos_iommu_domain_free,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001194 .attach_dev = exynos_iommu_attach_device,
1195 .detach_dev = exynos_iommu_detach_device,
1196 .map = exynos_iommu_map,
1197 .unmap = exynos_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07001198 .map_sg = default_iommu_map_sg,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001199 .iova_to_phys = exynos_iommu_iova_to_phys,
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001200 .device_group = get_device_iommu_group,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001201 .add_device = exynos_iommu_add_device,
1202 .remove_device = exynos_iommu_remove_device,
KyongHo Cho2a965362012-05-12 05:56:09 +09001203 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001204 .of_xlate = exynos_iommu_of_xlate,
KyongHo Cho2a965362012-05-12 05:56:09 +09001205};
1206
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001207static bool init_done;
1208
KyongHo Cho2a965362012-05-12 05:56:09 +09001209static int __init exynos_iommu_init(void)
1210{
1211 int ret;
1212
Cho KyongHo734c3c72014-05-12 11:44:48 +05301213 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1214 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1215 if (!lv2table_kmem_cache) {
1216 pr_err("%s: Failed to create kmem cache\n", __func__);
1217 return -ENOMEM;
1218 }
1219
KyongHo Cho2a965362012-05-12 05:56:09 +09001220 ret = platform_driver_register(&exynos_sysmmu_driver);
Cho KyongHo734c3c72014-05-12 11:44:48 +05301221 if (ret) {
1222 pr_err("%s: Failed to register driver\n", __func__);
1223 goto err_reg_driver;
1224 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001225
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301226 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1227 if (zero_lv2_table == NULL) {
1228 pr_err("%s: Failed to allocate zero level2 page table\n",
1229 __func__);
1230 ret = -ENOMEM;
1231 goto err_zero_lv2;
1232 }
1233
Cho KyongHo734c3c72014-05-12 11:44:48 +05301234 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1235 if (ret) {
1236 pr_err("%s: Failed to register exynos-iommu driver.\n",
1237 __func__);
1238 goto err_set_iommu;
1239 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001240
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001241 init_done = true;
1242
Cho KyongHo734c3c72014-05-12 11:44:48 +05301243 return 0;
1244err_set_iommu:
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301245 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1246err_zero_lv2:
Cho KyongHo734c3c72014-05-12 11:44:48 +05301247 platform_driver_unregister(&exynos_sysmmu_driver);
1248err_reg_driver:
1249 kmem_cache_destroy(lv2table_kmem_cache);
KyongHo Cho2a965362012-05-12 05:56:09 +09001250 return ret;
1251}
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001252
1253static int __init exynos_iommu_of_setup(struct device_node *np)
1254{
1255 struct platform_device *pdev;
1256
1257 if (!init_done)
1258 exynos_iommu_init();
1259
1260 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
1261 if (IS_ERR(pdev))
1262 return PTR_ERR(pdev);
1263
1264 of_iommu_set_ops(np, &exynos_iommu_ops);
1265 return 0;
1266}
1267
1268IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
1269 exynos_iommu_of_setup);