blob: 65bf9372c89009ad79bfebe43f02b3af917fdaf7 [file] [log] [blame]
Marek Szyprowski740a01e2016-02-18 15:12:58 +01001/*
2 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
KyongHo Cho2a965362012-05-12 05:56:09 +09003 * http://www.samsung.com
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
11#define DEBUG
12#endif
13
KyongHo Cho2a965362012-05-12 05:56:09 +090014#include <linux/clk.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020015#include <linux/dma-mapping.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090016#include <linux/err.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020017#include <linux/io.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090018#include <linux/iommu.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020019#include <linux/interrupt.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090020#include <linux/list.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020021#include <linux/of.h>
22#include <linux/of_iommu.h>
23#include <linux/of_platform.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020024#include <linux/platform_device.h>
25#include <linux/pm_runtime.h>
26#include <linux/slab.h>
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +010027#include <linux/dma-iommu.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090028
Cho KyongHod09d78f2014-05-12 11:44:58 +053029typedef u32 sysmmu_iova_t;
30typedef u32 sysmmu_pte_t;
31
Sachin Kamatf171aba2014-08-04 10:06:28 +053032/* We do not consider super section mapping (16MB) */
KyongHo Cho2a965362012-05-12 05:56:09 +090033#define SECT_ORDER 20
34#define LPAGE_ORDER 16
35#define SPAGE_ORDER 12
36
37#define SECT_SIZE (1 << SECT_ORDER)
38#define LPAGE_SIZE (1 << LPAGE_ORDER)
39#define SPAGE_SIZE (1 << SPAGE_ORDER)
40
41#define SECT_MASK (~(SECT_SIZE - 1))
42#define LPAGE_MASK (~(LPAGE_SIZE - 1))
43#define SPAGE_MASK (~(SPAGE_SIZE - 1))
44
Cho KyongHo66a7ed82014-05-12 11:45:04 +053045#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
46 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
47#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
48#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
49#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
50 ((*(sent) & 3) == 1))
KyongHo Cho2a965362012-05-12 05:56:09 +090051#define lv1ent_section(sent) ((*(sent) & 3) == 2)
52
53#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
54#define lv2ent_small(pent) ((*(pent) & 2) == 2)
55#define lv2ent_large(pent) ((*(pent) & 3) == 1)
56
Ben Dooks6ae53432016-06-08 19:31:10 +010057#ifdef CONFIG_BIG_ENDIAN
58#warning "revisit driver if we can enable big-endian ptes"
59#endif
60
Marek Szyprowski740a01e2016-02-18 15:12:58 +010061/*
62 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
63 * v5.0 introduced support for 36bit physical address space by shifting
64 * all page entry values by 4 bits.
65 * All SYSMMU controllers in the system support the address spaces of the same
66 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
67 * value (0 or 4).
68 */
69static short PG_ENT_SHIFT = -1;
70#define SYSMMU_PG_ENT_SHIFT 0
71#define SYSMMU_V5_PG_ENT_SHIFT 4
KyongHo Cho2a965362012-05-12 05:56:09 +090072
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +010073static const sysmmu_pte_t *LV1_PROT;
74static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
75 ((0 << 15) | (0 << 10)), /* no access */
76 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
77 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
78 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
79};
80static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
81 (0 << 4), /* no access */
82 (1 << 4), /* IOMMU_READ only */
83 (2 << 4), /* IOMMU_WRITE only */
84 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
85};
86
87static const sysmmu_pte_t *LV2_PROT;
88static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
89 ((0 << 9) | (0 << 4)), /* no access */
90 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
91 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
92 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
93};
94static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
95 (0 << 2), /* no access */
96 (1 << 2), /* IOMMU_READ only */
97 (2 << 2), /* IOMMU_WRITE only */
98 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
99};
100
101#define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
102
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100103#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
104#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
105#define section_offs(iova) (iova & (SECT_SIZE - 1))
106#define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
107#define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
108#define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
109#define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
KyongHo Cho2a965362012-05-12 05:56:09 +0900110
111#define NUM_LV1ENTRIES 4096
Cho KyongHod09d78f2014-05-12 11:44:58 +0530112#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +0900113
Cho KyongHod09d78f2014-05-12 11:44:58 +0530114static u32 lv1ent_offset(sysmmu_iova_t iova)
115{
116 return iova >> SECT_ORDER;
117}
118
119static u32 lv2ent_offset(sysmmu_iova_t iova)
120{
121 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
122}
123
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100124#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
Cho KyongHod09d78f2014-05-12 11:44:58 +0530125#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
KyongHo Cho2a965362012-05-12 05:56:09 +0900126
127#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100128#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
KyongHo Cho2a965362012-05-12 05:56:09 +0900129
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100130#define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100131#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100132#define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
133#define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
KyongHo Cho2a965362012-05-12 05:56:09 +0900134
135#define CTRL_ENABLE 0x5
136#define CTRL_BLOCK 0x7
137#define CTRL_DISABLE 0x0
138
Cho KyongHoeeb51842014-05-12 11:45:03 +0530139#define CFG_LRU 0x1
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100140#define CFG_EAP (1 << 2)
Cho KyongHoeeb51842014-05-12 11:45:03 +0530141#define CFG_QOS(n) ((n & 0xF) << 7)
Cho KyongHoeeb51842014-05-12 11:45:03 +0530142#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
143#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
144#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
145
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100146/* common registers */
KyongHo Cho2a965362012-05-12 05:56:09 +0900147#define REG_MMU_CTRL 0x000
148#define REG_MMU_CFG 0x004
149#define REG_MMU_STATUS 0x008
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100150#define REG_MMU_VERSION 0x034
151
152#define MMU_MAJ_VER(val) ((val) >> 7)
153#define MMU_MIN_VER(val) ((val) & 0x7F)
154#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
155
156#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
157
158/* v1.x - v3.x registers */
KyongHo Cho2a965362012-05-12 05:56:09 +0900159#define REG_MMU_FLUSH 0x00C
160#define REG_MMU_FLUSH_ENTRY 0x010
161#define REG_PT_BASE_ADDR 0x014
162#define REG_INT_STATUS 0x018
163#define REG_INT_CLEAR 0x01C
164
165#define REG_PAGE_FAULT_ADDR 0x024
166#define REG_AW_FAULT_ADDR 0x028
167#define REG_AR_FAULT_ADDR 0x02C
168#define REG_DEFAULT_SLAVE_ADDR 0x030
169
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100170/* v5.x registers */
171#define REG_V5_PT_BASE_PFN 0x00C
172#define REG_V5_MMU_FLUSH_ALL 0x010
173#define REG_V5_MMU_FLUSH_ENTRY 0x014
174#define REG_V5_INT_STATUS 0x060
175#define REG_V5_INT_CLEAR 0x064
176#define REG_V5_FAULT_AR_VA 0x070
177#define REG_V5_FAULT_AW_VA 0x080
KyongHo Cho2a965362012-05-12 05:56:09 +0900178
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530179#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
180
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100181static struct device *dma_dev;
Cho KyongHo734c3c72014-05-12 11:44:48 +0530182static struct kmem_cache *lv2table_kmem_cache;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530183static sysmmu_pte_t *zero_lv2_table;
184#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530185
Cho KyongHod09d78f2014-05-12 11:44:58 +0530186static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900187{
188 return pgtable + lv1ent_offset(iova);
189}
190
Cho KyongHod09d78f2014-05-12 11:44:58 +0530191static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900192{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530193 return (sysmmu_pte_t *)phys_to_virt(
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530194 lv2table_base(sent)) + lv2ent_offset(iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900195}
196
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100197/*
198 * IOMMU fault information register
199 */
200struct sysmmu_fault_info {
201 unsigned int bit; /* bit number in STATUS register */
202 unsigned short addr_reg; /* register to read VA fault address */
203 const char *name; /* human readable fault name */
204 unsigned int type; /* fault type for report_iommu_fault */
KyongHo Cho2a965362012-05-12 05:56:09 +0900205};
206
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100207static const struct sysmmu_fault_info sysmmu_faults[] = {
208 { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
209 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
210 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
211 { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
212 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
213 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
214 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
215 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
KyongHo Cho2a965362012-05-12 05:56:09 +0900216};
217
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100218static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
219 { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
220 { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
221 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
222 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
223 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
224 { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
225 { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
226 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
227 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
228 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
229};
230
Marek Szyprowski2860af32015-05-19 15:20:31 +0200231/*
232 * This structure is attached to dev.archdata.iommu of the master device
233 * on device add, contains a list of SYSMMU controllers defined by device tree,
234 * which are bound to given master device. It is usually referenced by 'owner'
235 * pointer.
236*/
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530237struct exynos_iommu_owner {
Marek Szyprowski1b092052015-05-19 15:20:33 +0200238 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100239 struct iommu_domain *domain; /* domain this device is attached */
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530240};
241
Marek Szyprowski2860af32015-05-19 15:20:31 +0200242/*
243 * This structure exynos specific generalization of struct iommu_domain.
244 * It contains list of SYSMMU controllers from all master devices, which has
245 * been attached to this domain and page tables of IO address space defined by
246 * it. It is usually referenced by 'domain' pointer.
247 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900248struct exynos_iommu_domain {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200249 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
250 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
251 short *lv2entcnt; /* free lv2 entry counter for each section */
252 spinlock_t lock; /* lock for modyfying list of clients */
253 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100254 struct iommu_domain domain; /* generic domain data structure */
KyongHo Cho2a965362012-05-12 05:56:09 +0900255};
256
Marek Szyprowski2860af32015-05-19 15:20:31 +0200257/*
258 * This structure hold all data of a single SYSMMU controller, this includes
259 * hw resources like registers and clocks, pointers and list nodes to connect
260 * it to all other structures, internal state and parameters read from device
261 * tree. It is usually referenced by 'data' pointer.
262 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900263struct sysmmu_drvdata {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200264 struct device *sysmmu; /* SYSMMU controller device */
265 struct device *master; /* master device (owner) */
266 void __iomem *sfrbase; /* our registers */
267 struct clk *clk; /* SYSMMU's clock */
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100268 struct clk *aclk; /* SYSMMU's aclk clock */
269 struct clk *pclk; /* SYSMMU's pclk clock */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200270 struct clk *clk_master; /* master's device clock */
271 int activations; /* number of calls to sysmmu_enable */
272 spinlock_t lock; /* lock for modyfying state */
273 struct exynos_iommu_domain *domain; /* domain we belong to */
274 struct list_head domain_node; /* node for domain clients list */
Marek Szyprowski1b092052015-05-19 15:20:33 +0200275 struct list_head owner_node; /* node for owner controllers list */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200276 phys_addr_t pgtable; /* assigned page table structure */
277 unsigned int version; /* our version */
KyongHo Cho2a965362012-05-12 05:56:09 +0900278};
279
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100280static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
281{
282 return container_of(dom, struct exynos_iommu_domain, domain);
283}
284
KyongHo Cho2a965362012-05-12 05:56:09 +0900285static bool set_sysmmu_active(struct sysmmu_drvdata *data)
286{
287 /* return true if the System MMU was not active previously
288 and it needs to be initialized */
289 return ++data->activations == 1;
290}
291
292static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
293{
294 /* return true if the System MMU is needed to be disabled */
295 BUG_ON(data->activations < 1);
296 return --data->activations == 0;
297}
298
299static bool is_sysmmu_active(struct sysmmu_drvdata *data)
300{
301 return data->activations > 0;
302}
303
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100304static void sysmmu_unblock(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900305{
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100306 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900307}
308
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100309static bool sysmmu_block(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900310{
311 int i = 120;
312
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100313 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
314 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
KyongHo Cho2a965362012-05-12 05:56:09 +0900315 --i;
316
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100317 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100318 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900319 return false;
320 }
321
322 return true;
323}
324
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100325static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900326{
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100327 if (MMU_MAJ_VER(data->version) < 5)
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100328 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100329 else
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100330 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900331}
332
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100333static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530334 sysmmu_iova_t iova, unsigned int num_inv)
KyongHo Cho2a965362012-05-12 05:56:09 +0900335{
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530336 unsigned int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530337
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530338 for (i = 0; i < num_inv; i++) {
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100339 if (MMU_MAJ_VER(data->version) < 5)
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100340 writel((iova & SPAGE_MASK) | 1,
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100341 data->sfrbase + REG_MMU_FLUSH_ENTRY);
342 else
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100343 writel((iova & SPAGE_MASK) | 1,
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100344 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530345 iova += SPAGE_SIZE;
346 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900347}
348
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100349static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
KyongHo Cho2a965362012-05-12 05:56:09 +0900350{
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100351 if (MMU_MAJ_VER(data->version) < 5)
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100352 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100353 else
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100354 writel(pgd >> PAGE_SHIFT,
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100355 data->sfrbase + REG_V5_PT_BASE_PFN);
KyongHo Cho2a965362012-05-12 05:56:09 +0900356
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100357 __sysmmu_tlb_invalidate(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900358}
359
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200360static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
361{
362 BUG_ON(clk_prepare_enable(data->clk_master));
363 BUG_ON(clk_prepare_enable(data->clk));
364 BUG_ON(clk_prepare_enable(data->pclk));
365 BUG_ON(clk_prepare_enable(data->aclk));
366}
367
368static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
369{
370 clk_disable_unprepare(data->aclk);
371 clk_disable_unprepare(data->pclk);
372 clk_disable_unprepare(data->clk);
373 clk_disable_unprepare(data->clk_master);
374}
375
Marek Szyprowski850d3132016-02-18 15:12:56 +0100376static void __sysmmu_get_version(struct sysmmu_drvdata *data)
377{
378 u32 ver;
379
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200380 __sysmmu_enable_clocks(data);
Marek Szyprowski850d3132016-02-18 15:12:56 +0100381
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100382 ver = readl(data->sfrbase + REG_MMU_VERSION);
Marek Szyprowski850d3132016-02-18 15:12:56 +0100383
384 /* controllers on some SoCs don't report proper version */
385 if (ver == 0x80000001u)
386 data->version = MAKE_MMU_VER(1, 0);
387 else
388 data->version = MMU_RAW_VER(ver);
389
390 dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
391 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
392
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200393 __sysmmu_disable_clocks(data);
Marek Szyprowski850d3132016-02-18 15:12:56 +0100394}
395
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100396static void show_fault_information(struct sysmmu_drvdata *data,
397 const struct sysmmu_fault_info *finfo,
398 sysmmu_iova_t fault_addr)
KyongHo Cho2a965362012-05-12 05:56:09 +0900399{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530400 sysmmu_pte_t *ent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900401
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100402 dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
403 finfo->name, fault_addr, &data->pgtable);
404 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
405 dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900406 if (lv1ent_page(ent)) {
407 ent = page_entry(ent, fault_addr);
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100408 dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900409 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900410}
411
412static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
413{
Sachin Kamatf171aba2014-08-04 10:06:28 +0530414 /* SYSMMU is in blocked state when interrupt occurred. */
KyongHo Cho2a965362012-05-12 05:56:09 +0900415 struct sysmmu_drvdata *data = dev_id;
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100416 const struct sysmmu_fault_info *finfo;
417 unsigned int i, n, itype;
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100418 sysmmu_iova_t fault_addr = -1;
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100419 unsigned short reg_status, reg_clear;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530420 int ret = -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900421
KyongHo Cho2a965362012-05-12 05:56:09 +0900422 WARN_ON(!is_sysmmu_active(data));
423
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100424 if (MMU_MAJ_VER(data->version) < 5) {
425 reg_status = REG_INT_STATUS;
426 reg_clear = REG_INT_CLEAR;
427 finfo = sysmmu_faults;
428 n = ARRAY_SIZE(sysmmu_faults);
429 } else {
430 reg_status = REG_V5_INT_STATUS;
431 reg_clear = REG_V5_INT_CLEAR;
432 finfo = sysmmu_v5_faults;
433 n = ARRAY_SIZE(sysmmu_v5_faults);
434 }
435
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530436 spin_lock(&data->lock);
437
Marek Szyprowskib398af22016-02-18 15:12:51 +0100438 clk_enable(data->clk_master);
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530439
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100440 itype = __ffs(readl(data->sfrbase + reg_status));
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100441 for (i = 0; i < n; i++, finfo++)
442 if (finfo->bit == itype)
443 break;
444 /* unknown/unsupported fault */
445 BUG_ON(i == n);
KyongHo Cho2a965362012-05-12 05:56:09 +0900446
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100447 /* print debug message */
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100448 fault_addr = readl(data->sfrbase + finfo->addr_reg);
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100449 show_fault_information(data, finfo, fault_addr);
KyongHo Cho2a965362012-05-12 05:56:09 +0900450
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100451 if (data->domain)
452 ret = report_iommu_fault(&data->domain->domain,
453 data->master, fault_addr, finfo->type);
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530454 /* fault is not recovered by fault handler */
455 BUG_ON(ret != 0);
KyongHo Cho2a965362012-05-12 05:56:09 +0900456
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100457 writel(1 << itype, data->sfrbase + reg_clear);
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530458
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100459 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900460
Marek Szyprowskib398af22016-02-18 15:12:51 +0100461 clk_disable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530462
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530463 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900464
465 return IRQ_HANDLED;
466}
467
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530468static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900469{
Marek Szyprowskib398af22016-02-18 15:12:51 +0100470 clk_enable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530471
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100472 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
473 writel(0, data->sfrbase + REG_MMU_CFG);
KyongHo Cho2a965362012-05-12 05:56:09 +0900474
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200475 __sysmmu_disable_clocks(data);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530476}
KyongHo Cho2a965362012-05-12 05:56:09 +0900477
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530478static bool __sysmmu_disable(struct sysmmu_drvdata *data)
479{
480 bool disabled;
481 unsigned long flags;
482
483 spin_lock_irqsave(&data->lock, flags);
484
485 disabled = set_sysmmu_inactive(data);
486
487 if (disabled) {
488 data->pgtable = 0;
489 data->domain = NULL;
490
491 __sysmmu_disable_nocount(data);
492
493 dev_dbg(data->sysmmu, "Disabled\n");
494 } else {
495 dev_dbg(data->sysmmu, "%d times left to disable\n",
496 data->activations);
497 }
498
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530499 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900500
KyongHo Cho2a965362012-05-12 05:56:09 +0900501 return disabled;
502}
503
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530504static void __sysmmu_init_config(struct sysmmu_drvdata *data)
505{
Marek Szyprowski83addec2016-02-18 15:12:54 +0100506 unsigned int cfg;
Cho KyongHoeeb51842014-05-12 11:45:03 +0530507
Marek Szyprowski83addec2016-02-18 15:12:54 +0100508 if (data->version <= MAKE_MMU_VER(3, 1))
509 cfg = CFG_LRU | CFG_QOS(15);
510 else if (data->version <= MAKE_MMU_VER(3, 2))
511 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
512 else
513 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530514
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100515 cfg |= CFG_EAP; /* enable access protection bits check */
516
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100517 writel(cfg, data->sfrbase + REG_MMU_CFG);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530518}
519
520static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
521{
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200522 __sysmmu_enable_clocks(data);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530523
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100524 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530525
526 __sysmmu_init_config(data);
527
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100528 __sysmmu_set_ptbase(data, data->pgtable);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530529
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100530 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530531
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200532 /*
533 * SYSMMU driver keeps master's clock enabled only for the short
534 * time, while accessing the registers. For performing address
535 * translation during DMA transaction it relies on the client
536 * driver to enable it.
537 */
Marek Szyprowskib398af22016-02-18 15:12:51 +0100538 clk_disable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530539}
540
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200541static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200542 struct exynos_iommu_domain *domain)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530543{
544 int ret = 0;
545 unsigned long flags;
546
547 spin_lock_irqsave(&data->lock, flags);
548 if (set_sysmmu_active(data)) {
549 data->pgtable = pgtable;
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200550 data->domain = domain;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530551
552 __sysmmu_enable_nocount(data);
553
554 dev_dbg(data->sysmmu, "Enabled\n");
555 } else {
556 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
557
558 dev_dbg(data->sysmmu, "already enabled\n");
559 }
560
561 if (WARN_ON(ret < 0))
562 set_sysmmu_inactive(data); /* decrement count */
563
564 spin_unlock_irqrestore(&data->lock, flags);
565
566 return ret;
567}
568
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200569static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530570 sysmmu_iova_t iova)
571{
572 unsigned long flags;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530573
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530574
575 spin_lock_irqsave(&data->lock, flags);
Marek Szyprowski01324ab2016-05-23 11:30:08 +0200576 if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
577 clk_enable(data->clk_master);
578 __sysmmu_tlb_invalidate_entry(data, iova, 1);
579 clk_disable(data->clk_master);
Marek Szyprowskid631ea92016-02-18 15:12:55 +0100580 }
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530581 spin_unlock_irqrestore(&data->lock, flags);
582
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530583}
584
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200585static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
586 sysmmu_iova_t iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900587{
588 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900589
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530590 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900591 if (is_sysmmu_active(data)) {
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530592 unsigned int num_inv = 1;
Cho KyongHo70605872014-05-12 11:44:55 +0530593
Marek Szyprowskib398af22016-02-18 15:12:51 +0100594 clk_enable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530595
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530596 /*
597 * L2TLB invalidation required
598 * 4KB page: 1 invalidation
Sachin Kamatf171aba2014-08-04 10:06:28 +0530599 * 64KB page: 16 invalidations
600 * 1MB page: 64 invalidations
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530601 * because it is set-associative TLB
602 * with 8-way and 64 sets.
603 * 1MB page can be cached in one of all sets.
604 * 64KB page can be one of 16 consecutive sets.
605 */
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200606 if (MMU_MAJ_VER(data->version) == 2)
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530607 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
608
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100609 if (sysmmu_block(data)) {
610 __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
611 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900612 }
Marek Szyprowskib398af22016-02-18 15:12:51 +0100613 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900614 }
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530615 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900616}
617
Marek Szyprowski96f66552016-05-23 13:01:27 +0200618static struct iommu_ops exynos_iommu_ops;
619
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530620static int __init exynos_sysmmu_probe(struct platform_device *pdev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900621{
Cho KyongHo46c16d12014-05-12 11:44:54 +0530622 int irq, ret;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530623 struct device *dev = &pdev->dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900624 struct sysmmu_drvdata *data;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530625 struct resource *res;
KyongHo Cho2a965362012-05-12 05:56:09 +0900626
Cho KyongHo46c16d12014-05-12 11:44:54 +0530627 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
628 if (!data)
629 return -ENOMEM;
KyongHo Cho2a965362012-05-12 05:56:09 +0900630
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530631 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Cho KyongHo46c16d12014-05-12 11:44:54 +0530632 data->sfrbase = devm_ioremap_resource(dev, res);
633 if (IS_ERR(data->sfrbase))
634 return PTR_ERR(data->sfrbase);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530635
Cho KyongHo46c16d12014-05-12 11:44:54 +0530636 irq = platform_get_irq(pdev, 0);
637 if (irq <= 0) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530638 dev_err(dev, "Unable to find IRQ resource\n");
Cho KyongHo46c16d12014-05-12 11:44:54 +0530639 return irq;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530640 }
641
Cho KyongHo46c16d12014-05-12 11:44:54 +0530642 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530643 dev_name(dev), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900644 if (ret) {
Cho KyongHo46c16d12014-05-12 11:44:54 +0530645 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
646 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900647 }
648
Cho KyongHo46c16d12014-05-12 11:44:54 +0530649 data->clk = devm_clk_get(dev, "sysmmu");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200650 if (PTR_ERR(data->clk) == -ENOENT)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100651 data->clk = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200652 else if (IS_ERR(data->clk))
653 return PTR_ERR(data->clk);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100654
655 data->aclk = devm_clk_get(dev, "aclk");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200656 if (PTR_ERR(data->aclk) == -ENOENT)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100657 data->aclk = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200658 else if (IS_ERR(data->aclk))
659 return PTR_ERR(data->aclk);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100660
661 data->pclk = devm_clk_get(dev, "pclk");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200662 if (PTR_ERR(data->pclk) == -ENOENT)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100663 data->pclk = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200664 else if (IS_ERR(data->pclk))
665 return PTR_ERR(data->pclk);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100666
667 if (!data->clk && (!data->aclk || !data->pclk)) {
668 dev_err(dev, "Failed to get device clock(s)!\n");
669 return -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900670 }
671
Cho KyongHo70605872014-05-12 11:44:55 +0530672 data->clk_master = devm_clk_get(dev, "master");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200673 if (PTR_ERR(data->clk_master) == -ENOENT)
Marek Szyprowskib398af22016-02-18 15:12:51 +0100674 data->clk_master = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200675 else if (IS_ERR(data->clk_master))
676 return PTR_ERR(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530677
KyongHo Cho2a965362012-05-12 05:56:09 +0900678 data->sysmmu = dev;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530679 spin_lock_init(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900680
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530681 platform_set_drvdata(pdev, data);
682
Marek Szyprowski850d3132016-02-18 15:12:56 +0100683 __sysmmu_get_version(data);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100684 if (PG_ENT_SHIFT < 0) {
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100685 if (MMU_MAJ_VER(data->version) < 5) {
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100686 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100687 LV1_PROT = SYSMMU_LV1_PROT;
688 LV2_PROT = SYSMMU_LV2_PROT;
689 } else {
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100690 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100691 LV1_PROT = SYSMMU_V5_LV1_PROT;
692 LV2_PROT = SYSMMU_V5_LV2_PROT;
693 }
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100694 }
695
Cho KyongHof4723ec2014-05-12 11:44:52 +0530696 pm_runtime_enable(dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900697
Marek Szyprowski96f66552016-05-23 13:01:27 +0200698 of_iommu_set_ops(dev->of_node, &exynos_iommu_ops);
699
KyongHo Cho2a965362012-05-12 05:56:09 +0900700 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900701}
702
Marek Szyprowski622015e2015-05-19 15:20:35 +0200703#ifdef CONFIG_PM_SLEEP
704static int exynos_sysmmu_suspend(struct device *dev)
705{
706 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
707
708 dev_dbg(dev, "suspend\n");
709 if (is_sysmmu_active(data)) {
710 __sysmmu_disable_nocount(data);
711 pm_runtime_put(dev);
712 }
713 return 0;
714}
715
716static int exynos_sysmmu_resume(struct device *dev)
717{
718 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
719
720 dev_dbg(dev, "resume\n");
721 if (is_sysmmu_active(data)) {
722 pm_runtime_get_sync(dev);
723 __sysmmu_enable_nocount(data);
724 }
725 return 0;
726}
727#endif
728
729static const struct dev_pm_ops sysmmu_pm_ops = {
730 SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
731};
732
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530733static const struct of_device_id sysmmu_of_match[] __initconst = {
734 { .compatible = "samsung,exynos-sysmmu", },
735 { },
736};
737
738static struct platform_driver exynos_sysmmu_driver __refdata = {
739 .probe = exynos_sysmmu_probe,
740 .driver = {
KyongHo Cho2a965362012-05-12 05:56:09 +0900741 .name = "exynos-sysmmu",
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530742 .of_match_table = sysmmu_of_match,
Marek Szyprowski622015e2015-05-19 15:20:35 +0200743 .pm = &sysmmu_pm_ops,
Marek Szyprowskib54b8742016-05-20 15:48:21 +0200744 .suppress_bind_attrs = true,
KyongHo Cho2a965362012-05-12 05:56:09 +0900745 }
746};
747
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100748static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
KyongHo Cho2a965362012-05-12 05:56:09 +0900749{
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100750 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
751 DMA_TO_DEVICE);
Ben Dooks6ae53432016-06-08 19:31:10 +0100752 *ent = cpu_to_le32(val);
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100753 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
754 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +0900755}
756
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100757static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
KyongHo Cho2a965362012-05-12 05:56:09 +0900758{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200759 struct exynos_iommu_domain *domain;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100760 dma_addr_t handle;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530761 int i;
KyongHo Cho2a965362012-05-12 05:56:09 +0900762
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100763 /* Check if correct PTE offsets are initialized */
764 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900765
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200766 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
767 if (!domain)
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100768 return NULL;
769
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100770 if (type == IOMMU_DOMAIN_DMA) {
771 if (iommu_get_dma_cookie(&domain->domain) != 0)
772 goto err_pgtable;
773 } else if (type != IOMMU_DOMAIN_UNMANAGED) {
774 goto err_pgtable;
775 }
776
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200777 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
778 if (!domain->pgtable)
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100779 goto err_dma_cookie;
KyongHo Cho2a965362012-05-12 05:56:09 +0900780
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200781 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
782 if (!domain->lv2entcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900783 goto err_counter;
784
Sachin Kamatf171aba2014-08-04 10:06:28 +0530785 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530786 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200787 domain->pgtable[i + 0] = ZERO_LV2LINK;
788 domain->pgtable[i + 1] = ZERO_LV2LINK;
789 domain->pgtable[i + 2] = ZERO_LV2LINK;
790 domain->pgtable[i + 3] = ZERO_LV2LINK;
791 domain->pgtable[i + 4] = ZERO_LV2LINK;
792 domain->pgtable[i + 5] = ZERO_LV2LINK;
793 domain->pgtable[i + 6] = ZERO_LV2LINK;
794 domain->pgtable[i + 7] = ZERO_LV2LINK;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530795 }
796
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100797 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
798 DMA_TO_DEVICE);
799 /* For mapping page table entries we rely on dma == phys */
800 BUG_ON(handle != virt_to_phys(domain->pgtable));
KyongHo Cho2a965362012-05-12 05:56:09 +0900801
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200802 spin_lock_init(&domain->lock);
803 spin_lock_init(&domain->pgtablelock);
804 INIT_LIST_HEAD(&domain->clients);
KyongHo Cho2a965362012-05-12 05:56:09 +0900805
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200806 domain->domain.geometry.aperture_start = 0;
807 domain->domain.geometry.aperture_end = ~0UL;
808 domain->domain.geometry.force_aperture = true;
Joerg Roedel3177bb72012-07-11 12:41:10 +0200809
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200810 return &domain->domain;
KyongHo Cho2a965362012-05-12 05:56:09 +0900811
812err_counter:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200813 free_pages((unsigned long)domain->pgtable, 2);
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100814err_dma_cookie:
815 if (type == IOMMU_DOMAIN_DMA)
816 iommu_put_dma_cookie(&domain->domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900817err_pgtable:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200818 kfree(domain);
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100819 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900820}
821
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200822static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
KyongHo Cho2a965362012-05-12 05:56:09 +0900823{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200824 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200825 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900826 unsigned long flags;
827 int i;
828
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200829 WARN_ON(!list_empty(&domain->clients));
KyongHo Cho2a965362012-05-12 05:56:09 +0900830
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200831 spin_lock_irqsave(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900832
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200833 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200834 if (__sysmmu_disable(data))
835 data->master = NULL;
836 list_del_init(&data->domain_node);
KyongHo Cho2a965362012-05-12 05:56:09 +0900837 }
838
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200839 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900840
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100841 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
842 iommu_put_dma_cookie(iommu_domain);
843
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100844 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
845 DMA_TO_DEVICE);
846
KyongHo Cho2a965362012-05-12 05:56:09 +0900847 for (i = 0; i < NUM_LV1ENTRIES; i++)
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100848 if (lv1ent_page(domain->pgtable + i)) {
849 phys_addr_t base = lv2table_base(domain->pgtable + i);
850
851 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
852 DMA_TO_DEVICE);
Cho KyongHo734c3c72014-05-12 11:44:48 +0530853 kmem_cache_free(lv2table_kmem_cache,
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100854 phys_to_virt(base));
855 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900856
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200857 free_pages((unsigned long)domain->pgtable, 2);
858 free_pages((unsigned long)domain->lv2entcnt, 1);
859 kfree(domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900860}
861
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100862static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
863 struct device *dev)
864{
865 struct exynos_iommu_owner *owner = dev->archdata.iommu;
866 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
867 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
868 struct sysmmu_drvdata *data, *next;
869 unsigned long flags;
870 bool found = false;
871
872 if (!has_sysmmu(dev) || owner->domain != iommu_domain)
873 return;
874
875 spin_lock_irqsave(&domain->lock, flags);
876 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
877 if (data->master == dev) {
878 if (__sysmmu_disable(data)) {
879 data->master = NULL;
880 list_del_init(&data->domain_node);
881 }
882 pm_runtime_put(data->sysmmu);
883 found = true;
884 }
885 }
886 spin_unlock_irqrestore(&domain->lock, flags);
887
888 owner->domain = NULL;
889
890 if (found)
891 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
892 __func__, &pagetable);
893 else
894 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
895}
896
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200897static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900898 struct device *dev)
899{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530900 struct exynos_iommu_owner *owner = dev->archdata.iommu;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200901 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200902 struct sysmmu_drvdata *data;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200903 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900904 unsigned long flags;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200905 int ret = -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900906
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200907 if (!has_sysmmu(dev))
908 return -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900909
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100910 if (owner->domain)
911 exynos_iommu_detach_device(owner->domain, dev);
912
Marek Szyprowski1b092052015-05-19 15:20:33 +0200913 list_for_each_entry(data, &owner->controllers, owner_node) {
Marek Szyprowskice70ca52015-05-19 15:20:34 +0200914 pm_runtime_get_sync(data->sysmmu);
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200915 ret = __sysmmu_enable(data, pagetable, domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200916 if (ret >= 0) {
917 data->master = dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900918
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200919 spin_lock_irqsave(&domain->lock, flags);
920 list_add_tail(&data->domain_node, &domain->clients);
921 spin_unlock_irqrestore(&domain->lock, flags);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200922 }
923 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900924
925 if (ret < 0) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530926 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
927 __func__, &pagetable);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530928 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900929 }
930
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100931 owner->domain = iommu_domain;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530932 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
933 __func__, &pagetable, (ret == 0) ? "" : ", again");
934
KyongHo Cho2a965362012-05-12 05:56:09 +0900935 return ret;
936}
937
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200938static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530939 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
KyongHo Cho2a965362012-05-12 05:56:09 +0900940{
Cho KyongHo61128f02014-05-12 11:44:47 +0530941 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530942 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
Cho KyongHo61128f02014-05-12 11:44:47 +0530943 return ERR_PTR(-EADDRINUSE);
944 }
945
KyongHo Cho2a965362012-05-12 05:56:09 +0900946 if (lv1ent_fault(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530947 sysmmu_pte_t *pent;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530948 bool need_flush_flpd_cache = lv1ent_zero(sent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900949
Cho KyongHo734c3c72014-05-12 11:44:48 +0530950 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
Arnd Bergmanndbf6c6e2016-02-29 09:45:59 +0100951 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
KyongHo Cho2a965362012-05-12 05:56:09 +0900952 if (!pent)
Cho KyongHo61128f02014-05-12 11:44:47 +0530953 return ERR_PTR(-ENOMEM);
KyongHo Cho2a965362012-05-12 05:56:09 +0900954
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100955 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
Colin Crossdc3814f2015-05-08 17:05:44 -0700956 kmemleak_ignore(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900957 *pgcounter = NUM_LV2ENTRIES;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100958 dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530959
960 /*
Sachin Kamatf171aba2014-08-04 10:06:28 +0530961 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
962 * FLPD cache may cache the address of zero_l2_table. This
963 * function replaces the zero_l2_table with new L2 page table
964 * to write valid mappings.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530965 * Accessing the valid area may cause page fault since FLPD
Sachin Kamatf171aba2014-08-04 10:06:28 +0530966 * cache may still cache zero_l2_table for the valid area
967 * instead of new L2 page table that has the mapping
968 * information of the valid area.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530969 * Thus any replacement of zero_l2_table with other valid L2
970 * page table must involve FLPD cache invalidation for System
971 * MMU v3.3.
972 * FLPD cache invalidation is performed with TLB invalidation
973 * by VPN without blocking. It is safe to invalidate TLB without
974 * blocking because the target address of TLB invalidation is
975 * not currently mapped.
976 */
977 if (need_flush_flpd_cache) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200978 struct sysmmu_drvdata *data;
Sachin Kamat365409d2014-05-22 09:50:56 +0530979
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200980 spin_lock(&domain->lock);
981 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200982 sysmmu_tlb_invalidate_flpdcache(data, iova);
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200983 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530984 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900985 }
986
987 return page_entry(sent, iova);
988}
989
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200990static int lv1set_section(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530991 sysmmu_pte_t *sent, sysmmu_iova_t iova,
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100992 phys_addr_t paddr, int prot, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900993{
Cho KyongHo61128f02014-05-12 11:44:47 +0530994 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530995 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530996 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900997 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530998 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900999
1000 if (lv1ent_page(sent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301001 if (*pgcnt != NUM_LV2ENTRIES) {
Cho KyongHod09d78f2014-05-12 11:44:58 +05301002 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +05301003 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001004 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +05301005 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001006
Cho KyongHo734c3c72014-05-12 11:44:48 +05301007 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
KyongHo Cho2a965362012-05-12 05:56:09 +09001008 *pgcnt = 0;
1009 }
1010
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001011 update_pte(sent, mk_lv1ent_sect(paddr, prot));
KyongHo Cho2a965362012-05-12 05:56:09 +09001012
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001013 spin_lock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301014 if (lv1ent_page_zero(sent)) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001015 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301016 /*
1017 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1018 * entry by speculative prefetch of SLPD which has no mapping.
1019 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001020 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001021 sysmmu_tlb_invalidate_flpdcache(data, iova);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301022 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001023 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301024
KyongHo Cho2a965362012-05-12 05:56:09 +09001025 return 0;
1026}
1027
Cho KyongHod09d78f2014-05-12 11:44:58 +05301028static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001029 int prot, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +09001030{
1031 if (size == SPAGE_SIZE) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301032 if (WARN_ON(!lv2ent_fault(pent)))
KyongHo Cho2a965362012-05-12 05:56:09 +09001033 return -EADDRINUSE;
1034
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001035 update_pte(pent, mk_lv2ent_spage(paddr, prot));
KyongHo Cho2a965362012-05-12 05:56:09 +09001036 *pgcnt -= 1;
1037 } else { /* size == LPAGE_SIZE */
1038 int i;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001039 dma_addr_t pent_base = virt_to_phys(pent);
Sachin Kamat365409d2014-05-22 09:50:56 +05301040
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001041 dma_sync_single_for_cpu(dma_dev, pent_base,
1042 sizeof(*pent) * SPAGES_PER_LPAGE,
1043 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001044 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301045 if (WARN_ON(!lv2ent_fault(pent))) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301046 if (i > 0)
1047 memset(pent - i, 0, sizeof(*pent) * i);
KyongHo Cho2a965362012-05-12 05:56:09 +09001048 return -EADDRINUSE;
1049 }
1050
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001051 *pent = mk_lv2ent_lpage(paddr, prot);
KyongHo Cho2a965362012-05-12 05:56:09 +09001052 }
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001053 dma_sync_single_for_device(dma_dev, pent_base,
1054 sizeof(*pent) * SPAGES_PER_LPAGE,
1055 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001056 *pgcnt -= SPAGES_PER_LPAGE;
1057 }
1058
1059 return 0;
1060}
1061
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301062/*
1063 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1064 *
Sachin Kamatf171aba2014-08-04 10:06:28 +05301065 * System MMU v3.x has advanced logic to improve address translation
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301066 * performance with caching more page table entries by a page table walk.
Sachin Kamatf171aba2014-08-04 10:06:28 +05301067 * However, the logic has a bug that while caching faulty page table entries,
1068 * System MMU reports page fault if the cached fault entry is hit even though
1069 * the fault entry is updated to a valid entry after the entry is cached.
1070 * To prevent caching faulty page table entries which may be updated to valid
1071 * entries later, the virtual memory manager should care about the workaround
1072 * for the problem. The following describes the workaround.
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301073 *
1074 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
Sachin Kamatf171aba2014-08-04 10:06:28 +05301075 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301076 *
Sachin Kamatf171aba2014-08-04 10:06:28 +05301077 * Precisely, any start address of I/O virtual region must be aligned with
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301078 * the following sizes for System MMU v3.1 and v3.2.
1079 * System MMU v3.1: 128KiB
1080 * System MMU v3.2: 256KiB
1081 *
1082 * Because System MMU v3.3 caches page table entries more aggressively, it needs
Sachin Kamatf171aba2014-08-04 10:06:28 +05301083 * more workarounds.
1084 * - Any two consecutive I/O virtual regions must have a hole of size larger
1085 * than or equal to 128KiB.
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301086 * - Start address of an I/O virtual region must be aligned by 128KiB.
1087 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001088static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1089 unsigned long l_iova, phys_addr_t paddr, size_t size,
1090 int prot)
KyongHo Cho2a965362012-05-12 05:56:09 +09001091{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001092 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301093 sysmmu_pte_t *entry;
1094 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
KyongHo Cho2a965362012-05-12 05:56:09 +09001095 unsigned long flags;
1096 int ret = -ENOMEM;
1097
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001098 BUG_ON(domain->pgtable == NULL);
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001099 prot &= SYSMMU_SUPPORTED_PROT_BITS;
KyongHo Cho2a965362012-05-12 05:56:09 +09001100
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001101 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001102
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001103 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001104
1105 if (size == SECT_SIZE) {
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001106 ret = lv1set_section(domain, entry, iova, paddr, prot,
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001107 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001108 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +05301109 sysmmu_pte_t *pent;
KyongHo Cho2a965362012-05-12 05:56:09 +09001110
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001111 pent = alloc_lv2entry(domain, entry, iova,
1112 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001113
Cho KyongHo61128f02014-05-12 11:44:47 +05301114 if (IS_ERR(pent))
1115 ret = PTR_ERR(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +09001116 else
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001117 ret = lv2set_page(pent, paddr, size, prot,
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001118 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001119 }
1120
Cho KyongHo61128f02014-05-12 11:44:47 +05301121 if (ret)
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301122 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1123 __func__, ret, size, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001124
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001125 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001126
1127 return ret;
1128}
1129
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001130static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1131 sysmmu_iova_t iova, size_t size)
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301132{
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001133 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301134 unsigned long flags;
1135
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001136 spin_lock_irqsave(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301137
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001138 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001139 sysmmu_tlb_invalidate_entry(data, iova, size);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301140
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001141 spin_unlock_irqrestore(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301142}
1143
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001144static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1145 unsigned long l_iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +09001146{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001147 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301148 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1149 sysmmu_pte_t *ent;
Cho KyongHo61128f02014-05-12 11:44:47 +05301150 size_t err_pgsize;
Cho KyongHod09d78f2014-05-12 11:44:58 +05301151 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +09001152
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001153 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +09001154
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001155 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001156
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001157 ent = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001158
1159 if (lv1ent_section(ent)) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301160 if (WARN_ON(size < SECT_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301161 err_pgsize = SECT_SIZE;
1162 goto err;
1163 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001164
Sachin Kamatf171aba2014-08-04 10:06:28 +05301165 /* workaround for h/w bug in System MMU v3.3 */
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001166 update_pte(ent, ZERO_LV2LINK);
KyongHo Cho2a965362012-05-12 05:56:09 +09001167 size = SECT_SIZE;
1168 goto done;
1169 }
1170
1171 if (unlikely(lv1ent_fault(ent))) {
1172 if (size > SECT_SIZE)
1173 size = SECT_SIZE;
1174 goto done;
1175 }
1176
1177 /* lv1ent_page(sent) == true here */
1178
1179 ent = page_entry(ent, iova);
1180
1181 if (unlikely(lv2ent_fault(ent))) {
1182 size = SPAGE_SIZE;
1183 goto done;
1184 }
1185
1186 if (lv2ent_small(ent)) {
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001187 update_pte(ent, 0);
KyongHo Cho2a965362012-05-12 05:56:09 +09001188 size = SPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001189 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
KyongHo Cho2a965362012-05-12 05:56:09 +09001190 goto done;
1191 }
1192
1193 /* lv1ent_large(ent) == true here */
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301194 if (WARN_ON(size < LPAGE_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301195 err_pgsize = LPAGE_SIZE;
1196 goto err;
1197 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001198
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001199 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1200 sizeof(*ent) * SPAGES_PER_LPAGE,
1201 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001202 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001203 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1204 sizeof(*ent) * SPAGES_PER_LPAGE,
1205 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001206 size = LPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001207 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
KyongHo Cho2a965362012-05-12 05:56:09 +09001208done:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001209 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001210
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001211 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +09001212
KyongHo Cho2a965362012-05-12 05:56:09 +09001213 return size;
Cho KyongHo61128f02014-05-12 11:44:47 +05301214err:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001215 spin_unlock_irqrestore(&domain->pgtablelock, flags);
Cho KyongHo61128f02014-05-12 11:44:47 +05301216
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301217 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1218 __func__, size, iova, err_pgsize);
Cho KyongHo61128f02014-05-12 11:44:47 +05301219
1220 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +09001221}
1222
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001223static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05301224 dma_addr_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +09001225{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001226 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301227 sysmmu_pte_t *entry;
KyongHo Cho2a965362012-05-12 05:56:09 +09001228 unsigned long flags;
1229 phys_addr_t phys = 0;
1230
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001231 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001232
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001233 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001234
1235 if (lv1ent_section(entry)) {
1236 phys = section_phys(entry) + section_offs(iova);
1237 } else if (lv1ent_page(entry)) {
1238 entry = page_entry(entry, iova);
1239
1240 if (lv2ent_large(entry))
1241 phys = lpage_phys(entry) + lpage_offs(iova);
1242 else if (lv2ent_small(entry))
1243 phys = spage_phys(entry) + spage_offs(iova);
1244 }
1245
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001246 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001247
1248 return phys;
1249}
1250
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001251static struct iommu_group *get_device_iommu_group(struct device *dev)
1252{
1253 struct iommu_group *group;
1254
1255 group = iommu_group_get(dev);
1256 if (!group)
1257 group = iommu_group_alloc();
1258
1259 return group;
1260}
1261
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301262static int exynos_iommu_add_device(struct device *dev)
1263{
1264 struct iommu_group *group;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301265
Marek Szyprowski06801db2015-05-19 15:20:32 +02001266 if (!has_sysmmu(dev))
1267 return -ENODEV;
1268
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001269 group = iommu_group_get_for_dev(dev);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301270
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001271 if (IS_ERR(group))
1272 return PTR_ERR(group);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301273
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301274 iommu_group_put(group);
1275
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001276 return 0;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301277}
1278
1279static void exynos_iommu_remove_device(struct device *dev)
1280{
Marek Szyprowski06801db2015-05-19 15:20:32 +02001281 if (!has_sysmmu(dev))
1282 return;
1283
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301284 iommu_group_remove_device(dev);
1285}
1286
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001287static int exynos_iommu_of_xlate(struct device *dev,
1288 struct of_phandle_args *spec)
1289{
1290 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1291 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1292 struct sysmmu_drvdata *data;
1293
1294 if (!sysmmu)
1295 return -ENODEV;
1296
1297 data = platform_get_drvdata(sysmmu);
1298 if (!data)
1299 return -ENODEV;
1300
1301 if (!owner) {
1302 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1303 if (!owner)
1304 return -ENOMEM;
1305
1306 INIT_LIST_HEAD(&owner->controllers);
1307 dev->archdata.iommu = owner;
1308 }
1309
1310 list_add_tail(&data->owner_node, &owner->controllers);
1311 return 0;
1312}
1313
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001314static struct iommu_ops exynos_iommu_ops = {
Joerg Roedele1fd1ea2015-03-26 13:43:11 +01001315 .domain_alloc = exynos_iommu_domain_alloc,
1316 .domain_free = exynos_iommu_domain_free,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001317 .attach_dev = exynos_iommu_attach_device,
1318 .detach_dev = exynos_iommu_detach_device,
1319 .map = exynos_iommu_map,
1320 .unmap = exynos_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07001321 .map_sg = default_iommu_map_sg,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001322 .iova_to_phys = exynos_iommu_iova_to_phys,
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001323 .device_group = get_device_iommu_group,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001324 .add_device = exynos_iommu_add_device,
1325 .remove_device = exynos_iommu_remove_device,
KyongHo Cho2a965362012-05-12 05:56:09 +09001326 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001327 .of_xlate = exynos_iommu_of_xlate,
KyongHo Cho2a965362012-05-12 05:56:09 +09001328};
1329
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001330static bool init_done;
1331
KyongHo Cho2a965362012-05-12 05:56:09 +09001332static int __init exynos_iommu_init(void)
1333{
1334 int ret;
1335
Cho KyongHo734c3c72014-05-12 11:44:48 +05301336 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1337 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1338 if (!lv2table_kmem_cache) {
1339 pr_err("%s: Failed to create kmem cache\n", __func__);
1340 return -ENOMEM;
1341 }
1342
KyongHo Cho2a965362012-05-12 05:56:09 +09001343 ret = platform_driver_register(&exynos_sysmmu_driver);
Cho KyongHo734c3c72014-05-12 11:44:48 +05301344 if (ret) {
1345 pr_err("%s: Failed to register driver\n", __func__);
1346 goto err_reg_driver;
1347 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001348
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301349 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1350 if (zero_lv2_table == NULL) {
1351 pr_err("%s: Failed to allocate zero level2 page table\n",
1352 __func__);
1353 ret = -ENOMEM;
1354 goto err_zero_lv2;
1355 }
1356
Cho KyongHo734c3c72014-05-12 11:44:48 +05301357 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1358 if (ret) {
1359 pr_err("%s: Failed to register exynos-iommu driver.\n",
1360 __func__);
1361 goto err_set_iommu;
1362 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001363
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001364 init_done = true;
1365
Cho KyongHo734c3c72014-05-12 11:44:48 +05301366 return 0;
1367err_set_iommu:
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301368 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1369err_zero_lv2:
Cho KyongHo734c3c72014-05-12 11:44:48 +05301370 platform_driver_unregister(&exynos_sysmmu_driver);
1371err_reg_driver:
1372 kmem_cache_destroy(lv2table_kmem_cache);
KyongHo Cho2a965362012-05-12 05:56:09 +09001373 return ret;
1374}
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001375
1376static int __init exynos_iommu_of_setup(struct device_node *np)
1377{
1378 struct platform_device *pdev;
1379
1380 if (!init_done)
1381 exynos_iommu_init();
1382
1383 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
Amitoj Kaur Chawla423595e2016-08-01 11:48:38 +05301384 if (!pdev)
1385 return -ENODEV;
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001386
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001387 /*
1388 * use the first registered sysmmu device for performing
1389 * dma mapping operations on iommu page tables (cpu cache flush)
1390 */
1391 if (!dma_dev)
1392 dma_dev = &pdev->dev;
1393
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001394 return 0;
1395}
1396
1397IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
1398 exynos_iommu_of_setup);