blob: 2138102ef611de074ea16291770470feeaf2d4ee [file] [log] [blame]
Marek Szyprowski740a01e2016-02-18 15:12:58 +01001/*
2 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
KyongHo Cho2a965362012-05-12 05:56:09 +09003 * http://www.samsung.com
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
11#define DEBUG
12#endif
13
KyongHo Cho2a965362012-05-12 05:56:09 +090014#include <linux/clk.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020015#include <linux/dma-mapping.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090016#include <linux/err.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020017#include <linux/io.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090018#include <linux/iommu.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020019#include <linux/interrupt.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090020#include <linux/list.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020021#include <linux/of.h>
22#include <linux/of_iommu.h>
23#include <linux/of_platform.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020024#include <linux/platform_device.h>
25#include <linux/pm_runtime.h>
26#include <linux/slab.h>
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +010027#include <linux/dma-iommu.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090028
Cho KyongHod09d78f2014-05-12 11:44:58 +053029typedef u32 sysmmu_iova_t;
30typedef u32 sysmmu_pte_t;
31
Sachin Kamatf171aba2014-08-04 10:06:28 +053032/* We do not consider super section mapping (16MB) */
KyongHo Cho2a965362012-05-12 05:56:09 +090033#define SECT_ORDER 20
34#define LPAGE_ORDER 16
35#define SPAGE_ORDER 12
36
37#define SECT_SIZE (1 << SECT_ORDER)
38#define LPAGE_SIZE (1 << LPAGE_ORDER)
39#define SPAGE_SIZE (1 << SPAGE_ORDER)
40
41#define SECT_MASK (~(SECT_SIZE - 1))
42#define LPAGE_MASK (~(LPAGE_SIZE - 1))
43#define SPAGE_MASK (~(SPAGE_SIZE - 1))
44
Cho KyongHo66a7ed82014-05-12 11:45:04 +053045#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
46 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
47#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
48#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
49#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
50 ((*(sent) & 3) == 1))
KyongHo Cho2a965362012-05-12 05:56:09 +090051#define lv1ent_section(sent) ((*(sent) & 3) == 2)
52
53#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
54#define lv2ent_small(pent) ((*(pent) & 2) == 2)
55#define lv2ent_large(pent) ((*(pent) & 3) == 1)
56
Marek Szyprowski740a01e2016-02-18 15:12:58 +010057/*
58 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
59 * v5.0 introduced support for 36bit physical address space by shifting
60 * all page entry values by 4 bits.
61 * All SYSMMU controllers in the system support the address spaces of the same
62 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
63 * value (0 or 4).
64 */
65static short PG_ENT_SHIFT = -1;
66#define SYSMMU_PG_ENT_SHIFT 0
67#define SYSMMU_V5_PG_ENT_SHIFT 4
KyongHo Cho2a965362012-05-12 05:56:09 +090068
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +010069static const sysmmu_pte_t *LV1_PROT;
70static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
71 ((0 << 15) | (0 << 10)), /* no access */
72 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
73 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
74 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
75};
76static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
77 (0 << 4), /* no access */
78 (1 << 4), /* IOMMU_READ only */
79 (2 << 4), /* IOMMU_WRITE only */
80 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
81};
82
83static const sysmmu_pte_t *LV2_PROT;
84static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
85 ((0 << 9) | (0 << 4)), /* no access */
86 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
87 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
88 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
89};
90static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
91 (0 << 2), /* no access */
92 (1 << 2), /* IOMMU_READ only */
93 (2 << 2), /* IOMMU_WRITE only */
94 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
95};
96
97#define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
98
Marek Szyprowski740a01e2016-02-18 15:12:58 +010099#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
100#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
101#define section_offs(iova) (iova & (SECT_SIZE - 1))
102#define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
103#define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
104#define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
105#define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
KyongHo Cho2a965362012-05-12 05:56:09 +0900106
107#define NUM_LV1ENTRIES 4096
Cho KyongHod09d78f2014-05-12 11:44:58 +0530108#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +0900109
Cho KyongHod09d78f2014-05-12 11:44:58 +0530110static u32 lv1ent_offset(sysmmu_iova_t iova)
111{
112 return iova >> SECT_ORDER;
113}
114
115static u32 lv2ent_offset(sysmmu_iova_t iova)
116{
117 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
118}
119
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100120#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
Cho KyongHod09d78f2014-05-12 11:44:58 +0530121#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
KyongHo Cho2a965362012-05-12 05:56:09 +0900122
123#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100124#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
KyongHo Cho2a965362012-05-12 05:56:09 +0900125
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100126#define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100127#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100128#define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
129#define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
KyongHo Cho2a965362012-05-12 05:56:09 +0900130
131#define CTRL_ENABLE 0x5
132#define CTRL_BLOCK 0x7
133#define CTRL_DISABLE 0x0
134
Cho KyongHoeeb51842014-05-12 11:45:03 +0530135#define CFG_LRU 0x1
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100136#define CFG_EAP (1 << 2)
Cho KyongHoeeb51842014-05-12 11:45:03 +0530137#define CFG_QOS(n) ((n & 0xF) << 7)
Cho KyongHoeeb51842014-05-12 11:45:03 +0530138#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
139#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
140#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
141
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100142/* common registers */
KyongHo Cho2a965362012-05-12 05:56:09 +0900143#define REG_MMU_CTRL 0x000
144#define REG_MMU_CFG 0x004
145#define REG_MMU_STATUS 0x008
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100146#define REG_MMU_VERSION 0x034
147
148#define MMU_MAJ_VER(val) ((val) >> 7)
149#define MMU_MIN_VER(val) ((val) & 0x7F)
150#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
151
152#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
153
154/* v1.x - v3.x registers */
KyongHo Cho2a965362012-05-12 05:56:09 +0900155#define REG_MMU_FLUSH 0x00C
156#define REG_MMU_FLUSH_ENTRY 0x010
157#define REG_PT_BASE_ADDR 0x014
158#define REG_INT_STATUS 0x018
159#define REG_INT_CLEAR 0x01C
160
161#define REG_PAGE_FAULT_ADDR 0x024
162#define REG_AW_FAULT_ADDR 0x028
163#define REG_AR_FAULT_ADDR 0x02C
164#define REG_DEFAULT_SLAVE_ADDR 0x030
165
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100166/* v5.x registers */
167#define REG_V5_PT_BASE_PFN 0x00C
168#define REG_V5_MMU_FLUSH_ALL 0x010
169#define REG_V5_MMU_FLUSH_ENTRY 0x014
Marek Szyprowskid5bf7392017-03-24 10:19:01 +0100170#define REG_V5_MMU_FLUSH_RANGE 0x018
171#define REG_V5_MMU_FLUSH_START 0x020
172#define REG_V5_MMU_FLUSH_END 0x024
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100173#define REG_V5_INT_STATUS 0x060
174#define REG_V5_INT_CLEAR 0x064
175#define REG_V5_FAULT_AR_VA 0x070
176#define REG_V5_FAULT_AW_VA 0x080
KyongHo Cho2a965362012-05-12 05:56:09 +0900177
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530178#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
179
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100180static struct device *dma_dev;
Cho KyongHo734c3c72014-05-12 11:44:48 +0530181static struct kmem_cache *lv2table_kmem_cache;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530182static sysmmu_pte_t *zero_lv2_table;
183#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530184
Cho KyongHod09d78f2014-05-12 11:44:58 +0530185static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900186{
187 return pgtable + lv1ent_offset(iova);
188}
189
Cho KyongHod09d78f2014-05-12 11:44:58 +0530190static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900191{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530192 return (sysmmu_pte_t *)phys_to_virt(
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530193 lv2table_base(sent)) + lv2ent_offset(iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900194}
195
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100196/*
197 * IOMMU fault information register
198 */
199struct sysmmu_fault_info {
200 unsigned int bit; /* bit number in STATUS register */
201 unsigned short addr_reg; /* register to read VA fault address */
202 const char *name; /* human readable fault name */
203 unsigned int type; /* fault type for report_iommu_fault */
KyongHo Cho2a965362012-05-12 05:56:09 +0900204};
205
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100206static const struct sysmmu_fault_info sysmmu_faults[] = {
207 { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
208 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
209 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
210 { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
211 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
212 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
213 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
214 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
KyongHo Cho2a965362012-05-12 05:56:09 +0900215};
216
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100217static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
218 { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
219 { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
220 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
221 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
222 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
223 { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
224 { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
225 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
226 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
227 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
228};
229
Marek Szyprowski2860af32015-05-19 15:20:31 +0200230/*
231 * This structure is attached to dev.archdata.iommu of the master device
232 * on device add, contains a list of SYSMMU controllers defined by device tree,
233 * which are bound to given master device. It is usually referenced by 'owner'
234 * pointer.
235*/
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530236struct exynos_iommu_owner {
Marek Szyprowski1b092052015-05-19 15:20:33 +0200237 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100238 struct iommu_domain *domain; /* domain this device is attached */
Marek Szyprowski9b265532016-11-14 11:08:11 +0100239 struct mutex rpm_lock; /* for runtime pm of all sysmmus */
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530240};
241
Marek Szyprowski2860af32015-05-19 15:20:31 +0200242/*
243 * This structure exynos specific generalization of struct iommu_domain.
244 * It contains list of SYSMMU controllers from all master devices, which has
245 * been attached to this domain and page tables of IO address space defined by
246 * it. It is usually referenced by 'domain' pointer.
247 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900248struct exynos_iommu_domain {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200249 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
250 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
251 short *lv2entcnt; /* free lv2 entry counter for each section */
252 spinlock_t lock; /* lock for modyfying list of clients */
253 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100254 struct iommu_domain domain; /* generic domain data structure */
KyongHo Cho2a965362012-05-12 05:56:09 +0900255};
256
Marek Szyprowski2860af32015-05-19 15:20:31 +0200257/*
258 * This structure hold all data of a single SYSMMU controller, this includes
259 * hw resources like registers and clocks, pointers and list nodes to connect
260 * it to all other structures, internal state and parameters read from device
261 * tree. It is usually referenced by 'data' pointer.
262 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900263struct sysmmu_drvdata {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200264 struct device *sysmmu; /* SYSMMU controller device */
265 struct device *master; /* master device (owner) */
Marek Szyprowski7a974b22017-09-15 13:05:08 +0200266 struct device_link *link; /* runtime PM link to master */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200267 void __iomem *sfrbase; /* our registers */
268 struct clk *clk; /* SYSMMU's clock */
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100269 struct clk *aclk; /* SYSMMU's aclk clock */
270 struct clk *pclk; /* SYSMMU's pclk clock */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200271 struct clk *clk_master; /* master's device clock */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200272 spinlock_t lock; /* lock for modyfying state */
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100273 bool active; /* current status */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200274 struct exynos_iommu_domain *domain; /* domain we belong to */
275 struct list_head domain_node; /* node for domain clients list */
Marek Szyprowski1b092052015-05-19 15:20:33 +0200276 struct list_head owner_node; /* node for owner controllers list */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200277 phys_addr_t pgtable; /* assigned page table structure */
278 unsigned int version; /* our version */
Joerg Roedeld2c302b2017-02-03 13:23:42 +0100279
280 struct iommu_device iommu; /* IOMMU core handle */
KyongHo Cho2a965362012-05-12 05:56:09 +0900281};
282
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100283static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
284{
285 return container_of(dom, struct exynos_iommu_domain, domain);
286}
287
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100288static void sysmmu_unblock(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900289{
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100290 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900291}
292
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100293static bool sysmmu_block(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900294{
295 int i = 120;
296
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100297 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
298 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
KyongHo Cho2a965362012-05-12 05:56:09 +0900299 --i;
300
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100301 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100302 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900303 return false;
304 }
305
306 return true;
307}
308
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100309static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900310{
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100311 if (MMU_MAJ_VER(data->version) < 5)
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100312 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100313 else
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100314 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900315}
316
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100317static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530318 sysmmu_iova_t iova, unsigned int num_inv)
KyongHo Cho2a965362012-05-12 05:56:09 +0900319{
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530320 unsigned int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530321
Marek Szyprowskid5bf7392017-03-24 10:19:01 +0100322 if (MMU_MAJ_VER(data->version) < 5) {
323 for (i = 0; i < num_inv; i++) {
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100324 writel((iova & SPAGE_MASK) | 1,
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100325 data->sfrbase + REG_MMU_FLUSH_ENTRY);
Marek Szyprowskid5bf7392017-03-24 10:19:01 +0100326 iova += SPAGE_SIZE;
327 }
328 } else {
329 if (num_inv == 1) {
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100330 writel((iova & SPAGE_MASK) | 1,
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100331 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
Marek Szyprowskid5bf7392017-03-24 10:19:01 +0100332 } else {
333 writel((iova & SPAGE_MASK),
334 data->sfrbase + REG_V5_MMU_FLUSH_START);
335 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
336 data->sfrbase + REG_V5_MMU_FLUSH_END);
337 writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
338 }
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530339 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900340}
341
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100342static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
KyongHo Cho2a965362012-05-12 05:56:09 +0900343{
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100344 if (MMU_MAJ_VER(data->version) < 5)
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100345 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100346 else
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100347 writel(pgd >> PAGE_SHIFT,
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100348 data->sfrbase + REG_V5_PT_BASE_PFN);
KyongHo Cho2a965362012-05-12 05:56:09 +0900349
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100350 __sysmmu_tlb_invalidate(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900351}
352
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200353static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
354{
355 BUG_ON(clk_prepare_enable(data->clk_master));
356 BUG_ON(clk_prepare_enable(data->clk));
357 BUG_ON(clk_prepare_enable(data->pclk));
358 BUG_ON(clk_prepare_enable(data->aclk));
359}
360
361static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
362{
363 clk_disable_unprepare(data->aclk);
364 clk_disable_unprepare(data->pclk);
365 clk_disable_unprepare(data->clk);
366 clk_disable_unprepare(data->clk_master);
367}
368
Marek Szyprowski850d3132016-02-18 15:12:56 +0100369static void __sysmmu_get_version(struct sysmmu_drvdata *data)
370{
371 u32 ver;
372
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200373 __sysmmu_enable_clocks(data);
Marek Szyprowski850d3132016-02-18 15:12:56 +0100374
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100375 ver = readl(data->sfrbase + REG_MMU_VERSION);
Marek Szyprowski850d3132016-02-18 15:12:56 +0100376
377 /* controllers on some SoCs don't report proper version */
378 if (ver == 0x80000001u)
379 data->version = MAKE_MMU_VER(1, 0);
380 else
381 data->version = MMU_RAW_VER(ver);
382
383 dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
384 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
385
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200386 __sysmmu_disable_clocks(data);
Marek Szyprowski850d3132016-02-18 15:12:56 +0100387}
388
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100389static void show_fault_information(struct sysmmu_drvdata *data,
390 const struct sysmmu_fault_info *finfo,
391 sysmmu_iova_t fault_addr)
KyongHo Cho2a965362012-05-12 05:56:09 +0900392{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530393 sysmmu_pte_t *ent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900394
Marek Szyprowskiec5d2412017-01-09 13:03:53 +0100395 dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
396 dev_name(data->master), finfo->name, fault_addr);
397 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100398 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
Marek Szyprowskiec5d2412017-01-09 13:03:53 +0100399 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900400 if (lv1ent_page(ent)) {
401 ent = page_entry(ent, fault_addr);
Marek Szyprowskiec5d2412017-01-09 13:03:53 +0100402 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900403 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900404}
405
406static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
407{
Sachin Kamatf171aba2014-08-04 10:06:28 +0530408 /* SYSMMU is in blocked state when interrupt occurred. */
KyongHo Cho2a965362012-05-12 05:56:09 +0900409 struct sysmmu_drvdata *data = dev_id;
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100410 const struct sysmmu_fault_info *finfo;
411 unsigned int i, n, itype;
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100412 sysmmu_iova_t fault_addr = -1;
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100413 unsigned short reg_status, reg_clear;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530414 int ret = -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900415
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100416 WARN_ON(!data->active);
KyongHo Cho2a965362012-05-12 05:56:09 +0900417
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100418 if (MMU_MAJ_VER(data->version) < 5) {
419 reg_status = REG_INT_STATUS;
420 reg_clear = REG_INT_CLEAR;
421 finfo = sysmmu_faults;
422 n = ARRAY_SIZE(sysmmu_faults);
423 } else {
424 reg_status = REG_V5_INT_STATUS;
425 reg_clear = REG_V5_INT_CLEAR;
426 finfo = sysmmu_v5_faults;
427 n = ARRAY_SIZE(sysmmu_v5_faults);
428 }
429
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530430 spin_lock(&data->lock);
431
Marek Szyprowskib398af22016-02-18 15:12:51 +0100432 clk_enable(data->clk_master);
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530433
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100434 itype = __ffs(readl(data->sfrbase + reg_status));
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100435 for (i = 0; i < n; i++, finfo++)
436 if (finfo->bit == itype)
437 break;
438 /* unknown/unsupported fault */
439 BUG_ON(i == n);
KyongHo Cho2a965362012-05-12 05:56:09 +0900440
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100441 /* print debug message */
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100442 fault_addr = readl(data->sfrbase + finfo->addr_reg);
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100443 show_fault_information(data, finfo, fault_addr);
KyongHo Cho2a965362012-05-12 05:56:09 +0900444
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100445 if (data->domain)
446 ret = report_iommu_fault(&data->domain->domain,
447 data->master, fault_addr, finfo->type);
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530448 /* fault is not recovered by fault handler */
449 BUG_ON(ret != 0);
KyongHo Cho2a965362012-05-12 05:56:09 +0900450
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100451 writel(1 << itype, data->sfrbase + reg_clear);
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530452
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100453 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900454
Marek Szyprowskib398af22016-02-18 15:12:51 +0100455 clk_disable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530456
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530457 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900458
459 return IRQ_HANDLED;
460}
461
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100462static void __sysmmu_disable(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900463{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530464 unsigned long flags;
465
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100466 clk_enable(data->clk_master);
467
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530468 spin_lock_irqsave(&data->lock, flags);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100469 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
470 writel(0, data->sfrbase + REG_MMU_CFG);
471 data->active = false;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530472 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900473
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100474 __sysmmu_disable_clocks(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900475}
476
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530477static void __sysmmu_init_config(struct sysmmu_drvdata *data)
478{
Marek Szyprowski83addec2016-02-18 15:12:54 +0100479 unsigned int cfg;
Cho KyongHoeeb51842014-05-12 11:45:03 +0530480
Marek Szyprowski83addec2016-02-18 15:12:54 +0100481 if (data->version <= MAKE_MMU_VER(3, 1))
482 cfg = CFG_LRU | CFG_QOS(15);
483 else if (data->version <= MAKE_MMU_VER(3, 2))
484 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
485 else
486 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530487
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100488 cfg |= CFG_EAP; /* enable access protection bits check */
489
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100490 writel(cfg, data->sfrbase + REG_MMU_CFG);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530491}
492
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100493static void __sysmmu_enable(struct sysmmu_drvdata *data)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530494{
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100495 unsigned long flags;
496
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200497 __sysmmu_enable_clocks(data);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530498
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100499 spin_lock_irqsave(&data->lock, flags);
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100500 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530501 __sysmmu_init_config(data);
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100502 __sysmmu_set_ptbase(data, data->pgtable);
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100503 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100504 data->active = true;
505 spin_unlock_irqrestore(&data->lock, flags);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530506
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200507 /*
508 * SYSMMU driver keeps master's clock enabled only for the short
509 * time, while accessing the registers. For performing address
510 * translation during DMA transaction it relies on the client
511 * driver to enable it.
512 */
Marek Szyprowskib398af22016-02-18 15:12:51 +0100513 clk_disable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530514}
515
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200516static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530517 sysmmu_iova_t iova)
518{
519 unsigned long flags;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530520
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530521 spin_lock_irqsave(&data->lock, flags);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100522 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
Marek Szyprowski01324ab2016-05-23 11:30:08 +0200523 clk_enable(data->clk_master);
Marek Szyprowski7d2aa6b2017-03-20 10:17:56 +0100524 if (sysmmu_block(data)) {
Marek Szyprowskicd37a292017-03-20 10:17:57 +0100525 if (data->version >= MAKE_MMU_VER(5, 0))
526 __sysmmu_tlb_invalidate(data);
527 else
528 __sysmmu_tlb_invalidate_entry(data, iova, 1);
Marek Szyprowski7d2aa6b2017-03-20 10:17:56 +0100529 sysmmu_unblock(data);
530 }
Marek Szyprowski01324ab2016-05-23 11:30:08 +0200531 clk_disable(data->clk_master);
Marek Szyprowskid631ea92016-02-18 15:12:55 +0100532 }
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530533 spin_unlock_irqrestore(&data->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530534}
535
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200536static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
537 sysmmu_iova_t iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900538{
539 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900540
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530541 spin_lock_irqsave(&data->lock, flags);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100542 if (data->active) {
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530543 unsigned int num_inv = 1;
Cho KyongHo70605872014-05-12 11:44:55 +0530544
Marek Szyprowskib398af22016-02-18 15:12:51 +0100545 clk_enable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530546
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530547 /*
548 * L2TLB invalidation required
549 * 4KB page: 1 invalidation
Sachin Kamatf171aba2014-08-04 10:06:28 +0530550 * 64KB page: 16 invalidations
551 * 1MB page: 64 invalidations
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530552 * because it is set-associative TLB
553 * with 8-way and 64 sets.
554 * 1MB page can be cached in one of all sets.
555 * 64KB page can be one of 16 consecutive sets.
556 */
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200557 if (MMU_MAJ_VER(data->version) == 2)
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530558 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
559
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100560 if (sysmmu_block(data)) {
561 __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
562 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900563 }
Marek Szyprowskib398af22016-02-18 15:12:51 +0100564 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900565 }
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530566 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900567}
568
Arvind Yadav0b9a3692017-08-28 17:42:05 +0530569static const struct iommu_ops exynos_iommu_ops;
Marek Szyprowski96f66552016-05-23 13:01:27 +0200570
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530571static int __init exynos_sysmmu_probe(struct platform_device *pdev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900572{
Cho KyongHo46c16d12014-05-12 11:44:54 +0530573 int irq, ret;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530574 struct device *dev = &pdev->dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900575 struct sysmmu_drvdata *data;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530576 struct resource *res;
KyongHo Cho2a965362012-05-12 05:56:09 +0900577
Cho KyongHo46c16d12014-05-12 11:44:54 +0530578 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
579 if (!data)
580 return -ENOMEM;
KyongHo Cho2a965362012-05-12 05:56:09 +0900581
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530582 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Cho KyongHo46c16d12014-05-12 11:44:54 +0530583 data->sfrbase = devm_ioremap_resource(dev, res);
584 if (IS_ERR(data->sfrbase))
585 return PTR_ERR(data->sfrbase);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530586
Cho KyongHo46c16d12014-05-12 11:44:54 +0530587 irq = platform_get_irq(pdev, 0);
588 if (irq <= 0) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530589 dev_err(dev, "Unable to find IRQ resource\n");
Cho KyongHo46c16d12014-05-12 11:44:54 +0530590 return irq;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530591 }
592
Cho KyongHo46c16d12014-05-12 11:44:54 +0530593 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530594 dev_name(dev), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900595 if (ret) {
Cho KyongHo46c16d12014-05-12 11:44:54 +0530596 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
597 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900598 }
599
Cho KyongHo46c16d12014-05-12 11:44:54 +0530600 data->clk = devm_clk_get(dev, "sysmmu");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200601 if (PTR_ERR(data->clk) == -ENOENT)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100602 data->clk = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200603 else if (IS_ERR(data->clk))
604 return PTR_ERR(data->clk);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100605
606 data->aclk = devm_clk_get(dev, "aclk");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200607 if (PTR_ERR(data->aclk) == -ENOENT)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100608 data->aclk = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200609 else if (IS_ERR(data->aclk))
610 return PTR_ERR(data->aclk);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100611
612 data->pclk = devm_clk_get(dev, "pclk");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200613 if (PTR_ERR(data->pclk) == -ENOENT)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100614 data->pclk = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200615 else if (IS_ERR(data->pclk))
616 return PTR_ERR(data->pclk);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100617
618 if (!data->clk && (!data->aclk || !data->pclk)) {
619 dev_err(dev, "Failed to get device clock(s)!\n");
620 return -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900621 }
622
Cho KyongHo70605872014-05-12 11:44:55 +0530623 data->clk_master = devm_clk_get(dev, "master");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200624 if (PTR_ERR(data->clk_master) == -ENOENT)
Marek Szyprowskib398af22016-02-18 15:12:51 +0100625 data->clk_master = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200626 else if (IS_ERR(data->clk_master))
627 return PTR_ERR(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530628
KyongHo Cho2a965362012-05-12 05:56:09 +0900629 data->sysmmu = dev;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530630 spin_lock_init(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900631
Joerg Roedeld2c302b2017-02-03 13:23:42 +0100632 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
633 dev_name(data->sysmmu));
634 if (ret)
635 return ret;
636
637 iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
638 iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
639
640 ret = iommu_device_register(&data->iommu);
641 if (ret)
642 return ret;
643
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530644 platform_set_drvdata(pdev, data);
645
Marek Szyprowski850d3132016-02-18 15:12:56 +0100646 __sysmmu_get_version(data);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100647 if (PG_ENT_SHIFT < 0) {
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100648 if (MMU_MAJ_VER(data->version) < 5) {
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100649 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100650 LV1_PROT = SYSMMU_LV1_PROT;
651 LV2_PROT = SYSMMU_LV2_PROT;
652 } else {
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100653 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100654 LV1_PROT = SYSMMU_V5_LV1_PROT;
655 LV2_PROT = SYSMMU_V5_LV2_PROT;
656 }
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100657 }
658
Marek Szyprowski928055a2017-08-04 12:28:33 +0200659 /*
660 * use the first registered sysmmu device for performing
661 * dma mapping operations on iommu page tables (cpu cache flush)
662 */
663 if (!dma_dev)
664 dma_dev = &pdev->dev;
665
Cho KyongHof4723ec2014-05-12 11:44:52 +0530666 pm_runtime_enable(dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900667
KyongHo Cho2a965362012-05-12 05:56:09 +0900668 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900669}
670
Marek Szyprowski9b265532016-11-14 11:08:11 +0100671static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
Marek Szyprowski622015e2015-05-19 15:20:35 +0200672{
673 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100674 struct device *master = data->master;
Marek Szyprowski622015e2015-05-19 15:20:35 +0200675
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100676 if (master) {
Marek Szyprowski9b265532016-11-14 11:08:11 +0100677 struct exynos_iommu_owner *owner = master->archdata.iommu;
678
679 mutex_lock(&owner->rpm_lock);
Marek Szyprowski92798b42016-11-14 11:08:09 +0100680 if (data->domain) {
681 dev_dbg(data->sysmmu, "saving state\n");
682 __sysmmu_disable(data);
683 }
Marek Szyprowski9b265532016-11-14 11:08:11 +0100684 mutex_unlock(&owner->rpm_lock);
Marek Szyprowski622015e2015-05-19 15:20:35 +0200685 }
686 return 0;
687}
688
Marek Szyprowski9b265532016-11-14 11:08:11 +0100689static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
Marek Szyprowski622015e2015-05-19 15:20:35 +0200690{
691 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100692 struct device *master = data->master;
Marek Szyprowski622015e2015-05-19 15:20:35 +0200693
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100694 if (master) {
Marek Szyprowski9b265532016-11-14 11:08:11 +0100695 struct exynos_iommu_owner *owner = master->archdata.iommu;
696
697 mutex_lock(&owner->rpm_lock);
Marek Szyprowski92798b42016-11-14 11:08:09 +0100698 if (data->domain) {
699 dev_dbg(data->sysmmu, "restoring state\n");
700 __sysmmu_enable(data);
701 }
Marek Szyprowski9b265532016-11-14 11:08:11 +0100702 mutex_unlock(&owner->rpm_lock);
Marek Szyprowski622015e2015-05-19 15:20:35 +0200703 }
704 return 0;
705}
Marek Szyprowski622015e2015-05-19 15:20:35 +0200706
707static const struct dev_pm_ops sysmmu_pm_ops = {
Marek Szyprowski9b265532016-11-14 11:08:11 +0100708 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
Marek Szyprowski2f5f44f2016-11-14 11:08:12 +0100709 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
710 pm_runtime_force_resume)
Marek Szyprowski622015e2015-05-19 15:20:35 +0200711};
712
Marek Szyprowski9d25e3c2017-10-09 13:40:23 +0200713static const struct of_device_id sysmmu_of_match[] = {
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530714 { .compatible = "samsung,exynos-sysmmu", },
715 { },
716};
717
718static struct platform_driver exynos_sysmmu_driver __refdata = {
719 .probe = exynos_sysmmu_probe,
720 .driver = {
KyongHo Cho2a965362012-05-12 05:56:09 +0900721 .name = "exynos-sysmmu",
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530722 .of_match_table = sysmmu_of_match,
Marek Szyprowski622015e2015-05-19 15:20:35 +0200723 .pm = &sysmmu_pm_ops,
Marek Szyprowskib54b8742016-05-20 15:48:21 +0200724 .suppress_bind_attrs = true,
KyongHo Cho2a965362012-05-12 05:56:09 +0900725 }
726};
727
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100728static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
KyongHo Cho2a965362012-05-12 05:56:09 +0900729{
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100730 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
731 DMA_TO_DEVICE);
Ben Dooks6ae53432016-06-08 19:31:10 +0100732 *ent = cpu_to_le32(val);
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100733 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
734 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +0900735}
736
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100737static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
KyongHo Cho2a965362012-05-12 05:56:09 +0900738{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200739 struct exynos_iommu_domain *domain;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100740 dma_addr_t handle;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530741 int i;
KyongHo Cho2a965362012-05-12 05:56:09 +0900742
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100743 /* Check if correct PTE offsets are initialized */
744 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900745
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200746 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
747 if (!domain)
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100748 return NULL;
749
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100750 if (type == IOMMU_DOMAIN_DMA) {
751 if (iommu_get_dma_cookie(&domain->domain) != 0)
752 goto err_pgtable;
753 } else if (type != IOMMU_DOMAIN_UNMANAGED) {
754 goto err_pgtable;
755 }
756
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200757 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
758 if (!domain->pgtable)
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100759 goto err_dma_cookie;
KyongHo Cho2a965362012-05-12 05:56:09 +0900760
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200761 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
762 if (!domain->lv2entcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900763 goto err_counter;
764
Sachin Kamatf171aba2014-08-04 10:06:28 +0530765 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
Marek Szyprowskie7527662017-03-24 10:18:44 +0100766 for (i = 0; i < NUM_LV1ENTRIES; i++)
767 domain->pgtable[i] = ZERO_LV2LINK;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530768
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100769 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
770 DMA_TO_DEVICE);
771 /* For mapping page table entries we rely on dma == phys */
772 BUG_ON(handle != virt_to_phys(domain->pgtable));
Marek Szyprowski0d6d3da2017-01-09 13:03:54 +0100773 if (dma_mapping_error(dma_dev, handle))
774 goto err_lv2ent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900775
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200776 spin_lock_init(&domain->lock);
777 spin_lock_init(&domain->pgtablelock);
778 INIT_LIST_HEAD(&domain->clients);
KyongHo Cho2a965362012-05-12 05:56:09 +0900779
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200780 domain->domain.geometry.aperture_start = 0;
781 domain->domain.geometry.aperture_end = ~0UL;
782 domain->domain.geometry.force_aperture = true;
Joerg Roedel3177bb72012-07-11 12:41:10 +0200783
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200784 return &domain->domain;
KyongHo Cho2a965362012-05-12 05:56:09 +0900785
Marek Szyprowski0d6d3da2017-01-09 13:03:54 +0100786err_lv2ent:
787 free_pages((unsigned long)domain->lv2entcnt, 1);
KyongHo Cho2a965362012-05-12 05:56:09 +0900788err_counter:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200789 free_pages((unsigned long)domain->pgtable, 2);
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100790err_dma_cookie:
791 if (type == IOMMU_DOMAIN_DMA)
792 iommu_put_dma_cookie(&domain->domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900793err_pgtable:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200794 kfree(domain);
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100795 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900796}
797
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200798static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
KyongHo Cho2a965362012-05-12 05:56:09 +0900799{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200800 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200801 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900802 unsigned long flags;
803 int i;
804
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200805 WARN_ON(!list_empty(&domain->clients));
KyongHo Cho2a965362012-05-12 05:56:09 +0900806
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200807 spin_lock_irqsave(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900808
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200809 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowskie1172302016-11-14 11:08:10 +0100810 spin_lock(&data->lock);
Marek Szyprowskib0d4c862016-11-14 11:08:07 +0100811 __sysmmu_disable(data);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100812 data->pgtable = 0;
813 data->domain = NULL;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200814 list_del_init(&data->domain_node);
Marek Szyprowskie1172302016-11-14 11:08:10 +0100815 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900816 }
817
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200818 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900819
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100820 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
821 iommu_put_dma_cookie(iommu_domain);
822
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100823 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
824 DMA_TO_DEVICE);
825
KyongHo Cho2a965362012-05-12 05:56:09 +0900826 for (i = 0; i < NUM_LV1ENTRIES; i++)
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100827 if (lv1ent_page(domain->pgtable + i)) {
828 phys_addr_t base = lv2table_base(domain->pgtable + i);
829
830 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
831 DMA_TO_DEVICE);
Cho KyongHo734c3c72014-05-12 11:44:48 +0530832 kmem_cache_free(lv2table_kmem_cache,
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100833 phys_to_virt(base));
834 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900835
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200836 free_pages((unsigned long)domain->pgtable, 2);
837 free_pages((unsigned long)domain->lv2entcnt, 1);
838 kfree(domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900839}
840
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100841static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
842 struct device *dev)
843{
844 struct exynos_iommu_owner *owner = dev->archdata.iommu;
845 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
846 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
847 struct sysmmu_drvdata *data, *next;
848 unsigned long flags;
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100849
850 if (!has_sysmmu(dev) || owner->domain != iommu_domain)
851 return;
852
Marek Szyprowski9b265532016-11-14 11:08:11 +0100853 mutex_lock(&owner->rpm_lock);
854
855 list_for_each_entry(data, &owner->controllers, owner_node) {
856 pm_runtime_get_noresume(data->sysmmu);
857 if (pm_runtime_active(data->sysmmu))
858 __sysmmu_disable(data);
Marek Szyprowskie1172302016-11-14 11:08:10 +0100859 pm_runtime_put(data->sysmmu);
860 }
861
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100862 spin_lock_irqsave(&domain->lock, flags);
863 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowskie1172302016-11-14 11:08:10 +0100864 spin_lock(&data->lock);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100865 data->pgtable = 0;
866 data->domain = NULL;
Marek Szyprowskib0d4c862016-11-14 11:08:07 +0100867 list_del_init(&data->domain_node);
Marek Szyprowskie1172302016-11-14 11:08:10 +0100868 spin_unlock(&data->lock);
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100869 }
Marek Szyprowskie1172302016-11-14 11:08:10 +0100870 owner->domain = NULL;
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100871 spin_unlock_irqrestore(&domain->lock, flags);
872
Marek Szyprowski9b265532016-11-14 11:08:11 +0100873 mutex_unlock(&owner->rpm_lock);
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100874
Marek Szyprowskib0d4c862016-11-14 11:08:07 +0100875 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
876 &pagetable);
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100877}
878
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200879static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900880 struct device *dev)
881{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530882 struct exynos_iommu_owner *owner = dev->archdata.iommu;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200883 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200884 struct sysmmu_drvdata *data;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200885 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900886 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900887
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200888 if (!has_sysmmu(dev))
889 return -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900890
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100891 if (owner->domain)
892 exynos_iommu_detach_device(owner->domain, dev);
893
Marek Szyprowski9b265532016-11-14 11:08:11 +0100894 mutex_lock(&owner->rpm_lock);
895
Marek Szyprowskie1172302016-11-14 11:08:10 +0100896 spin_lock_irqsave(&domain->lock, flags);
Marek Szyprowski1b092052015-05-19 15:20:33 +0200897 list_for_each_entry(data, &owner->controllers, owner_node) {
Marek Szyprowskie1172302016-11-14 11:08:10 +0100898 spin_lock(&data->lock);
Marek Szyprowski47a574f2016-11-14 11:08:08 +0100899 data->pgtable = pagetable;
900 data->domain = domain;
Marek Szyprowskie1172302016-11-14 11:08:10 +0100901 list_add_tail(&data->domain_node, &domain->clients);
902 spin_unlock(&data->lock);
903 }
904 owner->domain = iommu_domain;
905 spin_unlock_irqrestore(&domain->lock, flags);
906
907 list_for_each_entry(data, &owner->controllers, owner_node) {
Marek Szyprowski9b265532016-11-14 11:08:11 +0100908 pm_runtime_get_noresume(data->sysmmu);
909 if (pm_runtime_active(data->sysmmu))
910 __sysmmu_enable(data);
911 pm_runtime_put(data->sysmmu);
912 }
913
914 mutex_unlock(&owner->rpm_lock);
915
Marek Szyprowskib0d4c862016-11-14 11:08:07 +0100916 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
917 &pagetable);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530918
Marek Szyprowskib0d4c862016-11-14 11:08:07 +0100919 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900920}
921
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200922static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530923 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
KyongHo Cho2a965362012-05-12 05:56:09 +0900924{
Cho KyongHo61128f02014-05-12 11:44:47 +0530925 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530926 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
Cho KyongHo61128f02014-05-12 11:44:47 +0530927 return ERR_PTR(-EADDRINUSE);
928 }
929
KyongHo Cho2a965362012-05-12 05:56:09 +0900930 if (lv1ent_fault(sent)) {
Marek Szyprowski0d6d3da2017-01-09 13:03:54 +0100931 dma_addr_t handle;
Cho KyongHod09d78f2014-05-12 11:44:58 +0530932 sysmmu_pte_t *pent;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530933 bool need_flush_flpd_cache = lv1ent_zero(sent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900934
Cho KyongHo734c3c72014-05-12 11:44:48 +0530935 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
Arnd Bergmanndbf6c6e2016-02-29 09:45:59 +0100936 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
KyongHo Cho2a965362012-05-12 05:56:09 +0900937 if (!pent)
Cho KyongHo61128f02014-05-12 11:44:47 +0530938 return ERR_PTR(-ENOMEM);
KyongHo Cho2a965362012-05-12 05:56:09 +0900939
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100940 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
Colin Crossdc3814f2015-05-08 17:05:44 -0700941 kmemleak_ignore(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900942 *pgcounter = NUM_LV2ENTRIES;
Marek Szyprowski0d6d3da2017-01-09 13:03:54 +0100943 handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
944 DMA_TO_DEVICE);
945 if (dma_mapping_error(dma_dev, handle)) {
946 kmem_cache_free(lv2table_kmem_cache, pent);
947 return ERR_PTR(-EADDRINUSE);
948 }
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530949
950 /*
Sachin Kamatf171aba2014-08-04 10:06:28 +0530951 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
952 * FLPD cache may cache the address of zero_l2_table. This
953 * function replaces the zero_l2_table with new L2 page table
954 * to write valid mappings.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530955 * Accessing the valid area may cause page fault since FLPD
Sachin Kamatf171aba2014-08-04 10:06:28 +0530956 * cache may still cache zero_l2_table for the valid area
957 * instead of new L2 page table that has the mapping
958 * information of the valid area.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530959 * Thus any replacement of zero_l2_table with other valid L2
960 * page table must involve FLPD cache invalidation for System
961 * MMU v3.3.
962 * FLPD cache invalidation is performed with TLB invalidation
963 * by VPN without blocking. It is safe to invalidate TLB without
964 * blocking because the target address of TLB invalidation is
965 * not currently mapped.
966 */
967 if (need_flush_flpd_cache) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200968 struct sysmmu_drvdata *data;
Sachin Kamat365409d2014-05-22 09:50:56 +0530969
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200970 spin_lock(&domain->lock);
971 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200972 sysmmu_tlb_invalidate_flpdcache(data, iova);
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200973 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530974 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900975 }
976
977 return page_entry(sent, iova);
978}
979
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200980static int lv1set_section(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530981 sysmmu_pte_t *sent, sysmmu_iova_t iova,
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +0100982 phys_addr_t paddr, int prot, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900983{
Cho KyongHo61128f02014-05-12 11:44:47 +0530984 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530985 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530986 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900987 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530988 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900989
990 if (lv1ent_page(sent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530991 if (*pgcnt != NUM_LV2ENTRIES) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530992 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530993 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900994 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530995 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900996
Cho KyongHo734c3c72014-05-12 11:44:48 +0530997 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
KyongHo Cho2a965362012-05-12 05:56:09 +0900998 *pgcnt = 0;
999 }
1000
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001001 update_pte(sent, mk_lv1ent_sect(paddr, prot));
KyongHo Cho2a965362012-05-12 05:56:09 +09001002
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001003 spin_lock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301004 if (lv1ent_page_zero(sent)) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001005 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301006 /*
1007 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1008 * entry by speculative prefetch of SLPD which has no mapping.
1009 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001010 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001011 sysmmu_tlb_invalidate_flpdcache(data, iova);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301012 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001013 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301014
KyongHo Cho2a965362012-05-12 05:56:09 +09001015 return 0;
1016}
1017
Cho KyongHod09d78f2014-05-12 11:44:58 +05301018static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001019 int prot, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +09001020{
1021 if (size == SPAGE_SIZE) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301022 if (WARN_ON(!lv2ent_fault(pent)))
KyongHo Cho2a965362012-05-12 05:56:09 +09001023 return -EADDRINUSE;
1024
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001025 update_pte(pent, mk_lv2ent_spage(paddr, prot));
KyongHo Cho2a965362012-05-12 05:56:09 +09001026 *pgcnt -= 1;
1027 } else { /* size == LPAGE_SIZE */
1028 int i;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001029 dma_addr_t pent_base = virt_to_phys(pent);
Sachin Kamat365409d2014-05-22 09:50:56 +05301030
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001031 dma_sync_single_for_cpu(dma_dev, pent_base,
1032 sizeof(*pent) * SPAGES_PER_LPAGE,
1033 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001034 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301035 if (WARN_ON(!lv2ent_fault(pent))) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301036 if (i > 0)
1037 memset(pent - i, 0, sizeof(*pent) * i);
KyongHo Cho2a965362012-05-12 05:56:09 +09001038 return -EADDRINUSE;
1039 }
1040
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001041 *pent = mk_lv2ent_lpage(paddr, prot);
KyongHo Cho2a965362012-05-12 05:56:09 +09001042 }
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001043 dma_sync_single_for_device(dma_dev, pent_base,
1044 sizeof(*pent) * SPAGES_PER_LPAGE,
1045 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001046 *pgcnt -= SPAGES_PER_LPAGE;
1047 }
1048
1049 return 0;
1050}
1051
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301052/*
1053 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1054 *
Sachin Kamatf171aba2014-08-04 10:06:28 +05301055 * System MMU v3.x has advanced logic to improve address translation
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301056 * performance with caching more page table entries by a page table walk.
Sachin Kamatf171aba2014-08-04 10:06:28 +05301057 * However, the logic has a bug that while caching faulty page table entries,
1058 * System MMU reports page fault if the cached fault entry is hit even though
1059 * the fault entry is updated to a valid entry after the entry is cached.
1060 * To prevent caching faulty page table entries which may be updated to valid
1061 * entries later, the virtual memory manager should care about the workaround
1062 * for the problem. The following describes the workaround.
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301063 *
1064 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
Sachin Kamatf171aba2014-08-04 10:06:28 +05301065 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301066 *
Sachin Kamatf171aba2014-08-04 10:06:28 +05301067 * Precisely, any start address of I/O virtual region must be aligned with
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301068 * the following sizes for System MMU v3.1 and v3.2.
1069 * System MMU v3.1: 128KiB
1070 * System MMU v3.2: 256KiB
1071 *
1072 * Because System MMU v3.3 caches page table entries more aggressively, it needs
Sachin Kamatf171aba2014-08-04 10:06:28 +05301073 * more workarounds.
1074 * - Any two consecutive I/O virtual regions must have a hole of size larger
1075 * than or equal to 128KiB.
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301076 * - Start address of an I/O virtual region must be aligned by 128KiB.
1077 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001078static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1079 unsigned long l_iova, phys_addr_t paddr, size_t size,
1080 int prot)
KyongHo Cho2a965362012-05-12 05:56:09 +09001081{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001082 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301083 sysmmu_pte_t *entry;
1084 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
KyongHo Cho2a965362012-05-12 05:56:09 +09001085 unsigned long flags;
1086 int ret = -ENOMEM;
1087
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001088 BUG_ON(domain->pgtable == NULL);
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001089 prot &= SYSMMU_SUPPORTED_PROT_BITS;
KyongHo Cho2a965362012-05-12 05:56:09 +09001090
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001091 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001092
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001093 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001094
1095 if (size == SECT_SIZE) {
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001096 ret = lv1set_section(domain, entry, iova, paddr, prot,
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001097 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001098 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +05301099 sysmmu_pte_t *pent;
KyongHo Cho2a965362012-05-12 05:56:09 +09001100
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001101 pent = alloc_lv2entry(domain, entry, iova,
1102 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001103
Cho KyongHo61128f02014-05-12 11:44:47 +05301104 if (IS_ERR(pent))
1105 ret = PTR_ERR(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +09001106 else
Marek Szyprowski1a0d8da2016-11-03 09:04:45 +01001107 ret = lv2set_page(pent, paddr, size, prot,
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001108 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001109 }
1110
Cho KyongHo61128f02014-05-12 11:44:47 +05301111 if (ret)
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301112 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1113 __func__, ret, size, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001114
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001115 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001116
1117 return ret;
1118}
1119
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001120static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1121 sysmmu_iova_t iova, size_t size)
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301122{
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001123 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301124 unsigned long flags;
1125
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001126 spin_lock_irqsave(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301127
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001128 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001129 sysmmu_tlb_invalidate_entry(data, iova, size);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301130
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001131 spin_unlock_irqrestore(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301132}
1133
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001134static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1135 unsigned long l_iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +09001136{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001137 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301138 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1139 sysmmu_pte_t *ent;
Cho KyongHo61128f02014-05-12 11:44:47 +05301140 size_t err_pgsize;
Cho KyongHod09d78f2014-05-12 11:44:58 +05301141 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +09001142
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001143 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +09001144
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001145 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001146
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001147 ent = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001148
1149 if (lv1ent_section(ent)) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301150 if (WARN_ON(size < SECT_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301151 err_pgsize = SECT_SIZE;
1152 goto err;
1153 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001154
Sachin Kamatf171aba2014-08-04 10:06:28 +05301155 /* workaround for h/w bug in System MMU v3.3 */
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001156 update_pte(ent, ZERO_LV2LINK);
KyongHo Cho2a965362012-05-12 05:56:09 +09001157 size = SECT_SIZE;
1158 goto done;
1159 }
1160
1161 if (unlikely(lv1ent_fault(ent))) {
1162 if (size > SECT_SIZE)
1163 size = SECT_SIZE;
1164 goto done;
1165 }
1166
1167 /* lv1ent_page(sent) == true here */
1168
1169 ent = page_entry(ent, iova);
1170
1171 if (unlikely(lv2ent_fault(ent))) {
1172 size = SPAGE_SIZE;
1173 goto done;
1174 }
1175
1176 if (lv2ent_small(ent)) {
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001177 update_pte(ent, 0);
KyongHo Cho2a965362012-05-12 05:56:09 +09001178 size = SPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001179 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
KyongHo Cho2a965362012-05-12 05:56:09 +09001180 goto done;
1181 }
1182
1183 /* lv1ent_large(ent) == true here */
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301184 if (WARN_ON(size < LPAGE_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301185 err_pgsize = LPAGE_SIZE;
1186 goto err;
1187 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001188
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001189 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1190 sizeof(*ent) * SPAGES_PER_LPAGE,
1191 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001192 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001193 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1194 sizeof(*ent) * SPAGES_PER_LPAGE,
1195 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001196 size = LPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001197 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
KyongHo Cho2a965362012-05-12 05:56:09 +09001198done:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001199 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001200
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001201 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +09001202
KyongHo Cho2a965362012-05-12 05:56:09 +09001203 return size;
Cho KyongHo61128f02014-05-12 11:44:47 +05301204err:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001205 spin_unlock_irqrestore(&domain->pgtablelock, flags);
Cho KyongHo61128f02014-05-12 11:44:47 +05301206
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301207 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1208 __func__, size, iova, err_pgsize);
Cho KyongHo61128f02014-05-12 11:44:47 +05301209
1210 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +09001211}
1212
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001213static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05301214 dma_addr_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +09001215{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001216 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301217 sysmmu_pte_t *entry;
KyongHo Cho2a965362012-05-12 05:56:09 +09001218 unsigned long flags;
1219 phys_addr_t phys = 0;
1220
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001221 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001222
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001223 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001224
1225 if (lv1ent_section(entry)) {
1226 phys = section_phys(entry) + section_offs(iova);
1227 } else if (lv1ent_page(entry)) {
1228 entry = page_entry(entry, iova);
1229
1230 if (lv2ent_large(entry))
1231 phys = lpage_phys(entry) + lpage_offs(iova);
1232 else if (lv2ent_small(entry))
1233 phys = spage_phys(entry) + spage_offs(iova);
1234 }
1235
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001236 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001237
1238 return phys;
1239}
1240
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001241static struct iommu_group *get_device_iommu_group(struct device *dev)
1242{
1243 struct iommu_group *group;
1244
1245 group = iommu_group_get(dev);
1246 if (!group)
1247 group = iommu_group_alloc();
1248
1249 return group;
1250}
1251
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301252static int exynos_iommu_add_device(struct device *dev)
1253{
Marek Szyprowski7a974b22017-09-15 13:05:08 +02001254 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1255 struct sysmmu_drvdata *data;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301256 struct iommu_group *group;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301257
Marek Szyprowski06801db2015-05-19 15:20:32 +02001258 if (!has_sysmmu(dev))
1259 return -ENODEV;
1260
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001261 group = iommu_group_get_for_dev(dev);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301262
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001263 if (IS_ERR(group))
1264 return PTR_ERR(group);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301265
Marek Szyprowski7a974b22017-09-15 13:05:08 +02001266 list_for_each_entry(data, &owner->controllers, owner_node) {
1267 /*
1268 * SYSMMU will be runtime activated via device link
1269 * (dependency) to its master device, so there are no
1270 * direct calls to pm_runtime_get/put in this driver.
1271 */
1272 data->link = device_link_add(dev, data->sysmmu,
1273 DL_FLAG_PM_RUNTIME);
1274 }
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301275 iommu_group_put(group);
1276
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001277 return 0;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301278}
1279
1280static void exynos_iommu_remove_device(struct device *dev)
1281{
Marek Szyprowskifff2fd12017-01-09 13:03:56 +01001282 struct exynos_iommu_owner *owner = dev->archdata.iommu;
Marek Szyprowski7a974b22017-09-15 13:05:08 +02001283 struct sysmmu_drvdata *data;
Marek Szyprowskifff2fd12017-01-09 13:03:56 +01001284
Marek Szyprowski06801db2015-05-19 15:20:32 +02001285 if (!has_sysmmu(dev))
1286 return;
1287
Marek Szyprowskifff2fd12017-01-09 13:03:56 +01001288 if (owner->domain) {
1289 struct iommu_group *group = iommu_group_get(dev);
1290
1291 if (group) {
1292 WARN_ON(owner->domain !=
1293 iommu_group_default_domain(group));
1294 exynos_iommu_detach_device(owner->domain, dev);
1295 iommu_group_put(group);
1296 }
1297 }
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301298 iommu_group_remove_device(dev);
Marek Szyprowski7a974b22017-09-15 13:05:08 +02001299
1300 list_for_each_entry(data, &owner->controllers, owner_node)
1301 device_link_del(data->link);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301302}
1303
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001304static int exynos_iommu_of_xlate(struct device *dev,
1305 struct of_phandle_args *spec)
1306{
1307 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1308 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
Marek Szyprowski0bd5a0c2017-01-09 13:03:55 +01001309 struct sysmmu_drvdata *data, *entry;
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001310
1311 if (!sysmmu)
1312 return -ENODEV;
1313
1314 data = platform_get_drvdata(sysmmu);
1315 if (!data)
1316 return -ENODEV;
1317
1318 if (!owner) {
1319 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1320 if (!owner)
1321 return -ENOMEM;
1322
1323 INIT_LIST_HEAD(&owner->controllers);
Marek Szyprowski9b265532016-11-14 11:08:11 +01001324 mutex_init(&owner->rpm_lock);
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001325 dev->archdata.iommu = owner;
1326 }
1327
Marek Szyprowski0bd5a0c2017-01-09 13:03:55 +01001328 list_for_each_entry(entry, &owner->controllers, owner_node)
1329 if (entry == data)
1330 return 0;
1331
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001332 list_add_tail(&data->owner_node, &owner->controllers);
Marek Szyprowski92798b42016-11-14 11:08:09 +01001333 data->master = dev;
Marek Szyprowski2f5f44f2016-11-14 11:08:12 +01001334
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001335 return 0;
1336}
1337
Arvind Yadav0b9a3692017-08-28 17:42:05 +05301338static const struct iommu_ops exynos_iommu_ops = {
Joerg Roedele1fd1ea2015-03-26 13:43:11 +01001339 .domain_alloc = exynos_iommu_domain_alloc,
1340 .domain_free = exynos_iommu_domain_free,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001341 .attach_dev = exynos_iommu_attach_device,
1342 .detach_dev = exynos_iommu_detach_device,
1343 .map = exynos_iommu_map,
1344 .unmap = exynos_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07001345 .map_sg = default_iommu_map_sg,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001346 .iova_to_phys = exynos_iommu_iova_to_phys,
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001347 .device_group = get_device_iommu_group,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001348 .add_device = exynos_iommu_add_device,
1349 .remove_device = exynos_iommu_remove_device,
KyongHo Cho2a965362012-05-12 05:56:09 +09001350 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001351 .of_xlate = exynos_iommu_of_xlate,
KyongHo Cho2a965362012-05-12 05:56:09 +09001352};
1353
1354static int __init exynos_iommu_init(void)
1355{
Robin Murphydc98b842018-01-09 15:34:07 +00001356 struct device_node *np;
KyongHo Cho2a965362012-05-12 05:56:09 +09001357 int ret;
1358
Robin Murphydc98b842018-01-09 15:34:07 +00001359 np = of_find_matching_node(NULL, sysmmu_of_match);
1360 if (!np)
1361 return 0;
1362
1363 of_node_put(np);
1364
Cho KyongHo734c3c72014-05-12 11:44:48 +05301365 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1366 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1367 if (!lv2table_kmem_cache) {
1368 pr_err("%s: Failed to create kmem cache\n", __func__);
1369 return -ENOMEM;
1370 }
1371
KyongHo Cho2a965362012-05-12 05:56:09 +09001372 ret = platform_driver_register(&exynos_sysmmu_driver);
Cho KyongHo734c3c72014-05-12 11:44:48 +05301373 if (ret) {
1374 pr_err("%s: Failed to register driver\n", __func__);
1375 goto err_reg_driver;
1376 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001377
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301378 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1379 if (zero_lv2_table == NULL) {
1380 pr_err("%s: Failed to allocate zero level2 page table\n",
1381 __func__);
1382 ret = -ENOMEM;
1383 goto err_zero_lv2;
1384 }
1385
Cho KyongHo734c3c72014-05-12 11:44:48 +05301386 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1387 if (ret) {
1388 pr_err("%s: Failed to register exynos-iommu driver.\n",
1389 __func__);
1390 goto err_set_iommu;
1391 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001392
Cho KyongHo734c3c72014-05-12 11:44:48 +05301393 return 0;
1394err_set_iommu:
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301395 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1396err_zero_lv2:
Cho KyongHo734c3c72014-05-12 11:44:48 +05301397 platform_driver_unregister(&exynos_sysmmu_driver);
1398err_reg_driver:
1399 kmem_cache_destroy(lv2table_kmem_cache);
KyongHo Cho2a965362012-05-12 05:56:09 +09001400 return ret;
1401}
Marek Szyprowski928055a2017-08-04 12:28:33 +02001402core_initcall(exynos_iommu_init);
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001403
Robin Murphyb0c560f2018-01-09 16:17:27 +00001404IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu");