blob: e0b834375f172dd004956a477256039f5aadb0bf [file] [log] [blame]
Marek Szyprowski740a01e2016-02-18 15:12:58 +01001/*
2 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
KyongHo Cho2a965362012-05-12 05:56:09 +09003 * http://www.samsung.com
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
11#define DEBUG
12#endif
13
KyongHo Cho2a965362012-05-12 05:56:09 +090014#include <linux/clk.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020015#include <linux/dma-mapping.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090016#include <linux/err.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020017#include <linux/io.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090018#include <linux/iommu.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020019#include <linux/interrupt.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090020#include <linux/list.h>
Marek Szyprowski8ed55c82015-05-19 15:20:36 +020021#include <linux/of.h>
22#include <linux/of_iommu.h>
23#include <linux/of_platform.h>
Marek Szyprowski312900c2015-05-19 15:20:30 +020024#include <linux/platform_device.h>
25#include <linux/pm_runtime.h>
26#include <linux/slab.h>
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +010027#include <linux/dma-iommu.h>
KyongHo Cho2a965362012-05-12 05:56:09 +090028
Cho KyongHod09d78f2014-05-12 11:44:58 +053029typedef u32 sysmmu_iova_t;
30typedef u32 sysmmu_pte_t;
31
Sachin Kamatf171aba2014-08-04 10:06:28 +053032/* We do not consider super section mapping (16MB) */
KyongHo Cho2a965362012-05-12 05:56:09 +090033#define SECT_ORDER 20
34#define LPAGE_ORDER 16
35#define SPAGE_ORDER 12
36
37#define SECT_SIZE (1 << SECT_ORDER)
38#define LPAGE_SIZE (1 << LPAGE_ORDER)
39#define SPAGE_SIZE (1 << SPAGE_ORDER)
40
41#define SECT_MASK (~(SECT_SIZE - 1))
42#define LPAGE_MASK (~(LPAGE_SIZE - 1))
43#define SPAGE_MASK (~(SPAGE_SIZE - 1))
44
Cho KyongHo66a7ed82014-05-12 11:45:04 +053045#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
46 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
47#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
48#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
49#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
50 ((*(sent) & 3) == 1))
KyongHo Cho2a965362012-05-12 05:56:09 +090051#define lv1ent_section(sent) ((*(sent) & 3) == 2)
52
53#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
54#define lv2ent_small(pent) ((*(pent) & 2) == 2)
55#define lv2ent_large(pent) ((*(pent) & 3) == 1)
56
Marek Szyprowski740a01e2016-02-18 15:12:58 +010057/*
58 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
59 * v5.0 introduced support for 36bit physical address space by shifting
60 * all page entry values by 4 bits.
61 * All SYSMMU controllers in the system support the address spaces of the same
62 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
63 * value (0 or 4).
64 */
65static short PG_ENT_SHIFT = -1;
66#define SYSMMU_PG_ENT_SHIFT 0
67#define SYSMMU_V5_PG_ENT_SHIFT 4
KyongHo Cho2a965362012-05-12 05:56:09 +090068
Marek Szyprowski740a01e2016-02-18 15:12:58 +010069#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
70#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
71#define section_offs(iova) (iova & (SECT_SIZE - 1))
72#define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
73#define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
74#define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
75#define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
KyongHo Cho2a965362012-05-12 05:56:09 +090076
77#define NUM_LV1ENTRIES 4096
Cho KyongHod09d78f2014-05-12 11:44:58 +053078#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
KyongHo Cho2a965362012-05-12 05:56:09 +090079
Cho KyongHod09d78f2014-05-12 11:44:58 +053080static u32 lv1ent_offset(sysmmu_iova_t iova)
81{
82 return iova >> SECT_ORDER;
83}
84
85static u32 lv2ent_offset(sysmmu_iova_t iova)
86{
87 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
88}
89
Marek Szyprowski5e3435e2016-02-18 15:12:50 +010090#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
Cho KyongHod09d78f2014-05-12 11:44:58 +053091#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
KyongHo Cho2a965362012-05-12 05:56:09 +090092
93#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
Marek Szyprowski740a01e2016-02-18 15:12:58 +010094#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
KyongHo Cho2a965362012-05-12 05:56:09 +090095
Marek Szyprowski740a01e2016-02-18 15:12:58 +010096#define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2)
97#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
98#define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1)
99#define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2)
KyongHo Cho2a965362012-05-12 05:56:09 +0900100
101#define CTRL_ENABLE 0x5
102#define CTRL_BLOCK 0x7
103#define CTRL_DISABLE 0x0
104
Cho KyongHoeeb51842014-05-12 11:45:03 +0530105#define CFG_LRU 0x1
106#define CFG_QOS(n) ((n & 0xF) << 7)
Cho KyongHoeeb51842014-05-12 11:45:03 +0530107#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
108#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
109#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
110
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100111/* common registers */
KyongHo Cho2a965362012-05-12 05:56:09 +0900112#define REG_MMU_CTRL 0x000
113#define REG_MMU_CFG 0x004
114#define REG_MMU_STATUS 0x008
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100115#define REG_MMU_VERSION 0x034
116
117#define MMU_MAJ_VER(val) ((val) >> 7)
118#define MMU_MIN_VER(val) ((val) & 0x7F)
119#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
120
121#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
122
123/* v1.x - v3.x registers */
KyongHo Cho2a965362012-05-12 05:56:09 +0900124#define REG_MMU_FLUSH 0x00C
125#define REG_MMU_FLUSH_ENTRY 0x010
126#define REG_PT_BASE_ADDR 0x014
127#define REG_INT_STATUS 0x018
128#define REG_INT_CLEAR 0x01C
129
130#define REG_PAGE_FAULT_ADDR 0x024
131#define REG_AW_FAULT_ADDR 0x028
132#define REG_AR_FAULT_ADDR 0x02C
133#define REG_DEFAULT_SLAVE_ADDR 0x030
134
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100135/* v5.x registers */
136#define REG_V5_PT_BASE_PFN 0x00C
137#define REG_V5_MMU_FLUSH_ALL 0x010
138#define REG_V5_MMU_FLUSH_ENTRY 0x014
139#define REG_V5_INT_STATUS 0x060
140#define REG_V5_INT_CLEAR 0x064
141#define REG_V5_FAULT_AR_VA 0x070
142#define REG_V5_FAULT_AW_VA 0x080
KyongHo Cho2a965362012-05-12 05:56:09 +0900143
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530144#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
145
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100146static struct device *dma_dev;
Cho KyongHo734c3c72014-05-12 11:44:48 +0530147static struct kmem_cache *lv2table_kmem_cache;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530148static sysmmu_pte_t *zero_lv2_table;
149#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
Cho KyongHo734c3c72014-05-12 11:44:48 +0530150
Cho KyongHod09d78f2014-05-12 11:44:58 +0530151static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900152{
153 return pgtable + lv1ent_offset(iova);
154}
155
Cho KyongHod09d78f2014-05-12 11:44:58 +0530156static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +0900157{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530158 return (sysmmu_pte_t *)phys_to_virt(
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530159 lv2table_base(sent)) + lv2ent_offset(iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900160}
161
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100162/*
163 * IOMMU fault information register
164 */
165struct sysmmu_fault_info {
166 unsigned int bit; /* bit number in STATUS register */
167 unsigned short addr_reg; /* register to read VA fault address */
168 const char *name; /* human readable fault name */
169 unsigned int type; /* fault type for report_iommu_fault */
KyongHo Cho2a965362012-05-12 05:56:09 +0900170};
171
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100172static const struct sysmmu_fault_info sysmmu_faults[] = {
173 { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
174 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
175 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
176 { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
177 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
178 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
179 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
180 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
KyongHo Cho2a965362012-05-12 05:56:09 +0900181};
182
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100183static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
184 { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
185 { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
186 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
187 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
188 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
189 { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
190 { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
191 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
192 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
193 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
194};
195
Marek Szyprowski2860af32015-05-19 15:20:31 +0200196/*
197 * This structure is attached to dev.archdata.iommu of the master device
198 * on device add, contains a list of SYSMMU controllers defined by device tree,
199 * which are bound to given master device. It is usually referenced by 'owner'
200 * pointer.
201*/
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530202struct exynos_iommu_owner {
Marek Szyprowski1b092052015-05-19 15:20:33 +0200203 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100204 struct iommu_domain *domain; /* domain this device is attached */
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530205};
206
Marek Szyprowski2860af32015-05-19 15:20:31 +0200207/*
208 * This structure exynos specific generalization of struct iommu_domain.
209 * It contains list of SYSMMU controllers from all master devices, which has
210 * been attached to this domain and page tables of IO address space defined by
211 * it. It is usually referenced by 'domain' pointer.
212 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900213struct exynos_iommu_domain {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200214 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
215 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
216 short *lv2entcnt; /* free lv2 entry counter for each section */
217 spinlock_t lock; /* lock for modyfying list of clients */
218 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100219 struct iommu_domain domain; /* generic domain data structure */
KyongHo Cho2a965362012-05-12 05:56:09 +0900220};
221
Marek Szyprowski2860af32015-05-19 15:20:31 +0200222/*
223 * This structure hold all data of a single SYSMMU controller, this includes
224 * hw resources like registers and clocks, pointers and list nodes to connect
225 * it to all other structures, internal state and parameters read from device
226 * tree. It is usually referenced by 'data' pointer.
227 */
KyongHo Cho2a965362012-05-12 05:56:09 +0900228struct sysmmu_drvdata {
Marek Szyprowski2860af32015-05-19 15:20:31 +0200229 struct device *sysmmu; /* SYSMMU controller device */
230 struct device *master; /* master device (owner) */
231 void __iomem *sfrbase; /* our registers */
232 struct clk *clk; /* SYSMMU's clock */
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100233 struct clk *aclk; /* SYSMMU's aclk clock */
234 struct clk *pclk; /* SYSMMU's pclk clock */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200235 struct clk *clk_master; /* master's device clock */
236 int activations; /* number of calls to sysmmu_enable */
237 spinlock_t lock; /* lock for modyfying state */
238 struct exynos_iommu_domain *domain; /* domain we belong to */
239 struct list_head domain_node; /* node for domain clients list */
Marek Szyprowski1b092052015-05-19 15:20:33 +0200240 struct list_head owner_node; /* node for owner controllers list */
Marek Szyprowski2860af32015-05-19 15:20:31 +0200241 phys_addr_t pgtable; /* assigned page table structure */
242 unsigned int version; /* our version */
KyongHo Cho2a965362012-05-12 05:56:09 +0900243};
244
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100245static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
246{
247 return container_of(dom, struct exynos_iommu_domain, domain);
248}
249
KyongHo Cho2a965362012-05-12 05:56:09 +0900250static bool set_sysmmu_active(struct sysmmu_drvdata *data)
251{
252 /* return true if the System MMU was not active previously
253 and it needs to be initialized */
254 return ++data->activations == 1;
255}
256
257static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
258{
259 /* return true if the System MMU is needed to be disabled */
260 BUG_ON(data->activations < 1);
261 return --data->activations == 0;
262}
263
264static bool is_sysmmu_active(struct sysmmu_drvdata *data)
265{
266 return data->activations > 0;
267}
268
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100269static void sysmmu_unblock(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900270{
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100271 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900272}
273
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100274static bool sysmmu_block(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900275{
276 int i = 120;
277
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100278 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
279 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
KyongHo Cho2a965362012-05-12 05:56:09 +0900280 --i;
281
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100282 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100283 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900284 return false;
285 }
286
287 return true;
288}
289
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100290static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900291{
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100292 if (MMU_MAJ_VER(data->version) < 5)
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100293 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100294 else
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100295 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
KyongHo Cho2a965362012-05-12 05:56:09 +0900296}
297
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100298static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
Cho KyongHod09d78f2014-05-12 11:44:58 +0530299 sysmmu_iova_t iova, unsigned int num_inv)
KyongHo Cho2a965362012-05-12 05:56:09 +0900300{
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530301 unsigned int i;
Sachin Kamat365409d2014-05-22 09:50:56 +0530302
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530303 for (i = 0; i < num_inv; i++) {
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100304 if (MMU_MAJ_VER(data->version) < 5)
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100305 writel((iova & SPAGE_MASK) | 1,
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100306 data->sfrbase + REG_MMU_FLUSH_ENTRY);
307 else
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100308 writel((iova & SPAGE_MASK) | 1,
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100309 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530310 iova += SPAGE_SIZE;
311 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900312}
313
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100314static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
KyongHo Cho2a965362012-05-12 05:56:09 +0900315{
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100316 if (MMU_MAJ_VER(data->version) < 5)
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100317 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100318 else
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100319 writel(pgd >> PAGE_SHIFT,
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100320 data->sfrbase + REG_V5_PT_BASE_PFN);
KyongHo Cho2a965362012-05-12 05:56:09 +0900321
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100322 __sysmmu_tlb_invalidate(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900323}
324
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200325static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
326{
327 BUG_ON(clk_prepare_enable(data->clk_master));
328 BUG_ON(clk_prepare_enable(data->clk));
329 BUG_ON(clk_prepare_enable(data->pclk));
330 BUG_ON(clk_prepare_enable(data->aclk));
331}
332
333static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
334{
335 clk_disable_unprepare(data->aclk);
336 clk_disable_unprepare(data->pclk);
337 clk_disable_unprepare(data->clk);
338 clk_disable_unprepare(data->clk_master);
339}
340
Marek Szyprowski850d3132016-02-18 15:12:56 +0100341static void __sysmmu_get_version(struct sysmmu_drvdata *data)
342{
343 u32 ver;
344
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200345 __sysmmu_enable_clocks(data);
Marek Szyprowski850d3132016-02-18 15:12:56 +0100346
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100347 ver = readl(data->sfrbase + REG_MMU_VERSION);
Marek Szyprowski850d3132016-02-18 15:12:56 +0100348
349 /* controllers on some SoCs don't report proper version */
350 if (ver == 0x80000001u)
351 data->version = MAKE_MMU_VER(1, 0);
352 else
353 data->version = MMU_RAW_VER(ver);
354
355 dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
356 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
357
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200358 __sysmmu_disable_clocks(data);
Marek Szyprowski850d3132016-02-18 15:12:56 +0100359}
360
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100361static void show_fault_information(struct sysmmu_drvdata *data,
362 const struct sysmmu_fault_info *finfo,
363 sysmmu_iova_t fault_addr)
KyongHo Cho2a965362012-05-12 05:56:09 +0900364{
Cho KyongHod09d78f2014-05-12 11:44:58 +0530365 sysmmu_pte_t *ent;
KyongHo Cho2a965362012-05-12 05:56:09 +0900366
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100367 dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
368 finfo->name, fault_addr, &data->pgtable);
369 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
370 dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900371 if (lv1ent_page(ent)) {
372 ent = page_entry(ent, fault_addr);
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100373 dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900374 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900375}
376
377static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
378{
Sachin Kamatf171aba2014-08-04 10:06:28 +0530379 /* SYSMMU is in blocked state when interrupt occurred. */
KyongHo Cho2a965362012-05-12 05:56:09 +0900380 struct sysmmu_drvdata *data = dev_id;
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100381 const struct sysmmu_fault_info *finfo;
382 unsigned int i, n, itype;
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100383 sysmmu_iova_t fault_addr = -1;
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100384 unsigned short reg_status, reg_clear;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530385 int ret = -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900386
KyongHo Cho2a965362012-05-12 05:56:09 +0900387 WARN_ON(!is_sysmmu_active(data));
388
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100389 if (MMU_MAJ_VER(data->version) < 5) {
390 reg_status = REG_INT_STATUS;
391 reg_clear = REG_INT_CLEAR;
392 finfo = sysmmu_faults;
393 n = ARRAY_SIZE(sysmmu_faults);
394 } else {
395 reg_status = REG_V5_INT_STATUS;
396 reg_clear = REG_V5_INT_CLEAR;
397 finfo = sysmmu_v5_faults;
398 n = ARRAY_SIZE(sysmmu_v5_faults);
399 }
400
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530401 spin_lock(&data->lock);
402
Marek Szyprowskib398af22016-02-18 15:12:51 +0100403 clk_enable(data->clk_master);
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530404
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100405 itype = __ffs(readl(data->sfrbase + reg_status));
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100406 for (i = 0; i < n; i++, finfo++)
407 if (finfo->bit == itype)
408 break;
409 /* unknown/unsupported fault */
410 BUG_ON(i == n);
KyongHo Cho2a965362012-05-12 05:56:09 +0900411
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100412 /* print debug message */
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100413 fault_addr = readl(data->sfrbase + finfo->addr_reg);
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100414 show_fault_information(data, finfo, fault_addr);
KyongHo Cho2a965362012-05-12 05:56:09 +0900415
Marek Szyprowskid093fc72016-02-18 15:12:53 +0100416 if (data->domain)
417 ret = report_iommu_fault(&data->domain->domain,
418 data->master, fault_addr, finfo->type);
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530419 /* fault is not recovered by fault handler */
420 BUG_ON(ret != 0);
KyongHo Cho2a965362012-05-12 05:56:09 +0900421
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100422 writel(1 << itype, data->sfrbase + reg_clear);
Cho KyongHo1fab7fa2014-05-12 11:44:56 +0530423
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100424 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900425
Marek Szyprowskib398af22016-02-18 15:12:51 +0100426 clk_disable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530427
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530428 spin_unlock(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900429
430 return IRQ_HANDLED;
431}
432
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530433static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
KyongHo Cho2a965362012-05-12 05:56:09 +0900434{
Marek Szyprowskib398af22016-02-18 15:12:51 +0100435 clk_enable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530436
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100437 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
438 writel(0, data->sfrbase + REG_MMU_CFG);
KyongHo Cho2a965362012-05-12 05:56:09 +0900439
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200440 __sysmmu_disable_clocks(data);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530441}
KyongHo Cho2a965362012-05-12 05:56:09 +0900442
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530443static bool __sysmmu_disable(struct sysmmu_drvdata *data)
444{
445 bool disabled;
446 unsigned long flags;
447
448 spin_lock_irqsave(&data->lock, flags);
449
450 disabled = set_sysmmu_inactive(data);
451
452 if (disabled) {
453 data->pgtable = 0;
454 data->domain = NULL;
455
456 __sysmmu_disable_nocount(data);
457
458 dev_dbg(data->sysmmu, "Disabled\n");
459 } else {
460 dev_dbg(data->sysmmu, "%d times left to disable\n",
461 data->activations);
462 }
463
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530464 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900465
KyongHo Cho2a965362012-05-12 05:56:09 +0900466 return disabled;
467}
468
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530469static void __sysmmu_init_config(struct sysmmu_drvdata *data)
470{
Marek Szyprowski83addec2016-02-18 15:12:54 +0100471 unsigned int cfg;
Cho KyongHoeeb51842014-05-12 11:45:03 +0530472
Marek Szyprowski83addec2016-02-18 15:12:54 +0100473 if (data->version <= MAKE_MMU_VER(3, 1))
474 cfg = CFG_LRU | CFG_QOS(15);
475 else if (data->version <= MAKE_MMU_VER(3, 2))
476 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
477 else
478 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530479
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100480 writel(cfg, data->sfrbase + REG_MMU_CFG);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530481}
482
483static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
484{
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200485 __sysmmu_enable_clocks(data);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530486
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100487 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530488
489 __sysmmu_init_config(data);
490
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100491 __sysmmu_set_ptbase(data, data->pgtable);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530492
Marek Szyprowski84bd0422016-02-29 13:42:57 +0100493 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530494
Marek Szyprowskifecc49d2016-05-23 11:30:09 +0200495 /*
496 * SYSMMU driver keeps master's clock enabled only for the short
497 * time, while accessing the registers. For performing address
498 * translation during DMA transaction it relies on the client
499 * driver to enable it.
500 */
Marek Szyprowskib398af22016-02-18 15:12:51 +0100501 clk_disable(data->clk_master);
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530502}
503
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200504static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200505 struct exynos_iommu_domain *domain)
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530506{
507 int ret = 0;
508 unsigned long flags;
509
510 spin_lock_irqsave(&data->lock, flags);
511 if (set_sysmmu_active(data)) {
512 data->pgtable = pgtable;
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200513 data->domain = domain;
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530514
515 __sysmmu_enable_nocount(data);
516
517 dev_dbg(data->sysmmu, "Enabled\n");
518 } else {
519 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
520
521 dev_dbg(data->sysmmu, "already enabled\n");
522 }
523
524 if (WARN_ON(ret < 0))
525 set_sysmmu_inactive(data); /* decrement count */
526
527 spin_unlock_irqrestore(&data->lock, flags);
528
529 return ret;
530}
531
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200532static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530533 sysmmu_iova_t iova)
534{
535 unsigned long flags;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530536
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530537
538 spin_lock_irqsave(&data->lock, flags);
Marek Szyprowski01324ab2016-05-23 11:30:08 +0200539 if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
540 clk_enable(data->clk_master);
541 __sysmmu_tlb_invalidate_entry(data, iova, 1);
542 clk_disable(data->clk_master);
Marek Szyprowskid631ea92016-02-18 15:12:55 +0100543 }
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530544 spin_unlock_irqrestore(&data->lock, flags);
545
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530546}
547
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200548static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
549 sysmmu_iova_t iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +0900550{
551 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +0900552
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530553 spin_lock_irqsave(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900554 if (is_sysmmu_active(data)) {
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530555 unsigned int num_inv = 1;
Cho KyongHo70605872014-05-12 11:44:55 +0530556
Marek Szyprowskib398af22016-02-18 15:12:51 +0100557 clk_enable(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530558
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530559 /*
560 * L2TLB invalidation required
561 * 4KB page: 1 invalidation
Sachin Kamatf171aba2014-08-04 10:06:28 +0530562 * 64KB page: 16 invalidations
563 * 1MB page: 64 invalidations
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530564 * because it is set-associative TLB
565 * with 8-way and 64 sets.
566 * 1MB page can be cached in one of all sets.
567 * 64KB page can be one of 16 consecutive sets.
568 */
Marek Szyprowski512bd0c2015-05-19 15:20:24 +0200569 if (MMU_MAJ_VER(data->version) == 2)
Cho KyongHo3ad6b7f2014-05-12 11:44:49 +0530570 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
571
Marek Szyprowski02cdc362016-02-18 15:12:52 +0100572 if (sysmmu_block(data)) {
573 __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
574 sysmmu_unblock(data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900575 }
Marek Szyprowskib398af22016-02-18 15:12:51 +0100576 clk_disable(data->clk_master);
KyongHo Cho2a965362012-05-12 05:56:09 +0900577 } else {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200578 dev_dbg(data->master,
579 "disabled. Skipping TLB invalidation @ %#x\n", iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900580 }
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530581 spin_unlock_irqrestore(&data->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900582}
583
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530584static int __init exynos_sysmmu_probe(struct platform_device *pdev)
KyongHo Cho2a965362012-05-12 05:56:09 +0900585{
Cho KyongHo46c16d12014-05-12 11:44:54 +0530586 int irq, ret;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530587 struct device *dev = &pdev->dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900588 struct sysmmu_drvdata *data;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530589 struct resource *res;
KyongHo Cho2a965362012-05-12 05:56:09 +0900590
Cho KyongHo46c16d12014-05-12 11:44:54 +0530591 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
592 if (!data)
593 return -ENOMEM;
KyongHo Cho2a965362012-05-12 05:56:09 +0900594
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530595 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Cho KyongHo46c16d12014-05-12 11:44:54 +0530596 data->sfrbase = devm_ioremap_resource(dev, res);
597 if (IS_ERR(data->sfrbase))
598 return PTR_ERR(data->sfrbase);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530599
Cho KyongHo46c16d12014-05-12 11:44:54 +0530600 irq = platform_get_irq(pdev, 0);
601 if (irq <= 0) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530602 dev_err(dev, "Unable to find IRQ resource\n");
Cho KyongHo46c16d12014-05-12 11:44:54 +0530603 return irq;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530604 }
605
Cho KyongHo46c16d12014-05-12 11:44:54 +0530606 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530607 dev_name(dev), data);
KyongHo Cho2a965362012-05-12 05:56:09 +0900608 if (ret) {
Cho KyongHo46c16d12014-05-12 11:44:54 +0530609 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
610 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900611 }
612
Cho KyongHo46c16d12014-05-12 11:44:54 +0530613 data->clk = devm_clk_get(dev, "sysmmu");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200614 if (PTR_ERR(data->clk) == -ENOENT)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100615 data->clk = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200616 else if (IS_ERR(data->clk))
617 return PTR_ERR(data->clk);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100618
619 data->aclk = devm_clk_get(dev, "aclk");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200620 if (PTR_ERR(data->aclk) == -ENOENT)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100621 data->aclk = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200622 else if (IS_ERR(data->aclk))
623 return PTR_ERR(data->aclk);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100624
625 data->pclk = devm_clk_get(dev, "pclk");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200626 if (PTR_ERR(data->pclk) == -ENOENT)
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100627 data->pclk = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200628 else if (IS_ERR(data->pclk))
629 return PTR_ERR(data->pclk);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100630
631 if (!data->clk && (!data->aclk || !data->pclk)) {
632 dev_err(dev, "Failed to get device clock(s)!\n");
633 return -ENOSYS;
KyongHo Cho2a965362012-05-12 05:56:09 +0900634 }
635
Cho KyongHo70605872014-05-12 11:44:55 +0530636 data->clk_master = devm_clk_get(dev, "master");
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200637 if (PTR_ERR(data->clk_master) == -ENOENT)
Marek Szyprowskib398af22016-02-18 15:12:51 +0100638 data->clk_master = NULL;
Marek Szyprowski0c2b0632016-05-23 11:30:07 +0200639 else if (IS_ERR(data->clk_master))
640 return PTR_ERR(data->clk_master);
Cho KyongHo70605872014-05-12 11:44:55 +0530641
KyongHo Cho2a965362012-05-12 05:56:09 +0900642 data->sysmmu = dev;
Cho KyongHo9d4e7a22014-05-12 11:44:57 +0530643 spin_lock_init(&data->lock);
KyongHo Cho2a965362012-05-12 05:56:09 +0900644
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530645 platform_set_drvdata(pdev, data);
646
Marek Szyprowski850d3132016-02-18 15:12:56 +0100647 __sysmmu_get_version(data);
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100648 if (PG_ENT_SHIFT < 0) {
649 if (MMU_MAJ_VER(data->version) < 5)
650 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
651 else
652 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
653 }
654
Cho KyongHof4723ec2014-05-12 11:44:52 +0530655 pm_runtime_enable(dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900656
KyongHo Cho2a965362012-05-12 05:56:09 +0900657 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +0900658}
659
Marek Szyprowski622015e2015-05-19 15:20:35 +0200660#ifdef CONFIG_PM_SLEEP
661static int exynos_sysmmu_suspend(struct device *dev)
662{
663 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
664
665 dev_dbg(dev, "suspend\n");
666 if (is_sysmmu_active(data)) {
667 __sysmmu_disable_nocount(data);
668 pm_runtime_put(dev);
669 }
670 return 0;
671}
672
673static int exynos_sysmmu_resume(struct device *dev)
674{
675 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
676
677 dev_dbg(dev, "resume\n");
678 if (is_sysmmu_active(data)) {
679 pm_runtime_get_sync(dev);
680 __sysmmu_enable_nocount(data);
681 }
682 return 0;
683}
684#endif
685
686static const struct dev_pm_ops sysmmu_pm_ops = {
687 SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
688};
689
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530690static const struct of_device_id sysmmu_of_match[] __initconst = {
691 { .compatible = "samsung,exynos-sysmmu", },
692 { },
693};
694
695static struct platform_driver exynos_sysmmu_driver __refdata = {
696 .probe = exynos_sysmmu_probe,
697 .driver = {
KyongHo Cho2a965362012-05-12 05:56:09 +0900698 .name = "exynos-sysmmu",
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530699 .of_match_table = sysmmu_of_match,
Marek Szyprowski622015e2015-05-19 15:20:35 +0200700 .pm = &sysmmu_pm_ops,
Marek Szyprowskib54b8742016-05-20 15:48:21 +0200701 .suppress_bind_attrs = true,
KyongHo Cho2a965362012-05-12 05:56:09 +0900702 }
703};
704
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100705static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
KyongHo Cho2a965362012-05-12 05:56:09 +0900706{
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100707 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
708 DMA_TO_DEVICE);
709 *ent = val;
710 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
711 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +0900712}
713
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100714static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
KyongHo Cho2a965362012-05-12 05:56:09 +0900715{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200716 struct exynos_iommu_domain *domain;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100717 dma_addr_t handle;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530718 int i;
KyongHo Cho2a965362012-05-12 05:56:09 +0900719
Marek Szyprowski740a01e2016-02-18 15:12:58 +0100720 /* Check if correct PTE offsets are initialized */
721 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
KyongHo Cho2a965362012-05-12 05:56:09 +0900722
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200723 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
724 if (!domain)
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100725 return NULL;
726
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100727 if (type == IOMMU_DOMAIN_DMA) {
728 if (iommu_get_dma_cookie(&domain->domain) != 0)
729 goto err_pgtable;
730 } else if (type != IOMMU_DOMAIN_UNMANAGED) {
731 goto err_pgtable;
732 }
733
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200734 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
735 if (!domain->pgtable)
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100736 goto err_dma_cookie;
KyongHo Cho2a965362012-05-12 05:56:09 +0900737
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200738 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
739 if (!domain->lv2entcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900740 goto err_counter;
741
Sachin Kamatf171aba2014-08-04 10:06:28 +0530742 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530743 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200744 domain->pgtable[i + 0] = ZERO_LV2LINK;
745 domain->pgtable[i + 1] = ZERO_LV2LINK;
746 domain->pgtable[i + 2] = ZERO_LV2LINK;
747 domain->pgtable[i + 3] = ZERO_LV2LINK;
748 domain->pgtable[i + 4] = ZERO_LV2LINK;
749 domain->pgtable[i + 5] = ZERO_LV2LINK;
750 domain->pgtable[i + 6] = ZERO_LV2LINK;
751 domain->pgtable[i + 7] = ZERO_LV2LINK;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530752 }
753
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100754 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
755 DMA_TO_DEVICE);
756 /* For mapping page table entries we rely on dma == phys */
757 BUG_ON(handle != virt_to_phys(domain->pgtable));
KyongHo Cho2a965362012-05-12 05:56:09 +0900758
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200759 spin_lock_init(&domain->lock);
760 spin_lock_init(&domain->pgtablelock);
761 INIT_LIST_HEAD(&domain->clients);
KyongHo Cho2a965362012-05-12 05:56:09 +0900762
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200763 domain->domain.geometry.aperture_start = 0;
764 domain->domain.geometry.aperture_end = ~0UL;
765 domain->domain.geometry.force_aperture = true;
Joerg Roedel3177bb72012-07-11 12:41:10 +0200766
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200767 return &domain->domain;
KyongHo Cho2a965362012-05-12 05:56:09 +0900768
769err_counter:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200770 free_pages((unsigned long)domain->pgtable, 2);
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100771err_dma_cookie:
772 if (type == IOMMU_DOMAIN_DMA)
773 iommu_put_dma_cookie(&domain->domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900774err_pgtable:
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200775 kfree(domain);
Joerg Roedele1fd1ea2015-03-26 13:43:11 +0100776 return NULL;
KyongHo Cho2a965362012-05-12 05:56:09 +0900777}
778
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200779static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
KyongHo Cho2a965362012-05-12 05:56:09 +0900780{
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200781 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200782 struct sysmmu_drvdata *data, *next;
KyongHo Cho2a965362012-05-12 05:56:09 +0900783 unsigned long flags;
784 int i;
785
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200786 WARN_ON(!list_empty(&domain->clients));
KyongHo Cho2a965362012-05-12 05:56:09 +0900787
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200788 spin_lock_irqsave(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900789
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200790 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200791 if (__sysmmu_disable(data))
792 data->master = NULL;
793 list_del_init(&data->domain_node);
KyongHo Cho2a965362012-05-12 05:56:09 +0900794 }
795
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200796 spin_unlock_irqrestore(&domain->lock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +0900797
Marek Szyprowski58c6f6a2016-02-18 15:12:49 +0100798 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
799 iommu_put_dma_cookie(iommu_domain);
800
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100801 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
802 DMA_TO_DEVICE);
803
KyongHo Cho2a965362012-05-12 05:56:09 +0900804 for (i = 0; i < NUM_LV1ENTRIES; i++)
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100805 if (lv1ent_page(domain->pgtable + i)) {
806 phys_addr_t base = lv2table_base(domain->pgtable + i);
807
808 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
809 DMA_TO_DEVICE);
Cho KyongHo734c3c72014-05-12 11:44:48 +0530810 kmem_cache_free(lv2table_kmem_cache,
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100811 phys_to_virt(base));
812 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900813
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200814 free_pages((unsigned long)domain->pgtable, 2);
815 free_pages((unsigned long)domain->lv2entcnt, 1);
816 kfree(domain);
KyongHo Cho2a965362012-05-12 05:56:09 +0900817}
818
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100819static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
820 struct device *dev)
821{
822 struct exynos_iommu_owner *owner = dev->archdata.iommu;
823 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
824 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
825 struct sysmmu_drvdata *data, *next;
826 unsigned long flags;
827 bool found = false;
828
829 if (!has_sysmmu(dev) || owner->domain != iommu_domain)
830 return;
831
832 spin_lock_irqsave(&domain->lock, flags);
833 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
834 if (data->master == dev) {
835 if (__sysmmu_disable(data)) {
836 data->master = NULL;
837 list_del_init(&data->domain_node);
838 }
839 pm_runtime_put(data->sysmmu);
840 found = true;
841 }
842 }
843 spin_unlock_irqrestore(&domain->lock, flags);
844
845 owner->domain = NULL;
846
847 if (found)
848 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
849 __func__, &pagetable);
850 else
851 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
852}
853
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200854static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
KyongHo Cho2a965362012-05-12 05:56:09 +0900855 struct device *dev)
856{
Cho KyongHo6b21a5d2014-05-12 11:45:02 +0530857 struct exynos_iommu_owner *owner = dev->archdata.iommu;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200858 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200859 struct sysmmu_drvdata *data;
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200860 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
KyongHo Cho2a965362012-05-12 05:56:09 +0900861 unsigned long flags;
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200862 int ret = -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900863
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200864 if (!has_sysmmu(dev))
865 return -ENODEV;
KyongHo Cho2a965362012-05-12 05:56:09 +0900866
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100867 if (owner->domain)
868 exynos_iommu_detach_device(owner->domain, dev);
869
Marek Szyprowski1b092052015-05-19 15:20:33 +0200870 list_for_each_entry(data, &owner->controllers, owner_node) {
Marek Szyprowskice70ca52015-05-19 15:20:34 +0200871 pm_runtime_get_sync(data->sysmmu);
Marek Szyprowskia9133b992015-05-19 15:20:29 +0200872 ret = __sysmmu_enable(data, pagetable, domain);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200873 if (ret >= 0) {
874 data->master = dev;
KyongHo Cho2a965362012-05-12 05:56:09 +0900875
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200876 spin_lock_irqsave(&domain->lock, flags);
877 list_add_tail(&data->domain_node, &domain->clients);
878 spin_unlock_irqrestore(&domain->lock, flags);
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200879 }
880 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900881
882 if (ret < 0) {
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530883 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
884 __func__, &pagetable);
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530885 return ret;
KyongHo Cho2a965362012-05-12 05:56:09 +0900886 }
887
Marek Szyprowski5fa61cb2016-02-18 15:13:00 +0100888 owner->domain = iommu_domain;
Cho KyongHo7222e8d2014-05-12 11:44:46 +0530889 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
890 __func__, &pagetable, (ret == 0) ? "" : ", again");
891
KyongHo Cho2a965362012-05-12 05:56:09 +0900892 return ret;
893}
894
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200895static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530896 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
KyongHo Cho2a965362012-05-12 05:56:09 +0900897{
Cho KyongHo61128f02014-05-12 11:44:47 +0530898 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530899 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
Cho KyongHo61128f02014-05-12 11:44:47 +0530900 return ERR_PTR(-EADDRINUSE);
901 }
902
KyongHo Cho2a965362012-05-12 05:56:09 +0900903 if (lv1ent_fault(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530904 sysmmu_pte_t *pent;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530905 bool need_flush_flpd_cache = lv1ent_zero(sent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900906
Cho KyongHo734c3c72014-05-12 11:44:48 +0530907 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
Arnd Bergmanndbf6c6e2016-02-29 09:45:59 +0100908 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
KyongHo Cho2a965362012-05-12 05:56:09 +0900909 if (!pent)
Cho KyongHo61128f02014-05-12 11:44:47 +0530910 return ERR_PTR(-ENOMEM);
KyongHo Cho2a965362012-05-12 05:56:09 +0900911
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100912 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
Colin Crossdc3814f2015-05-08 17:05:44 -0700913 kmemleak_ignore(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +0900914 *pgcounter = NUM_LV2ENTRIES;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100915 dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530916
917 /*
Sachin Kamatf171aba2014-08-04 10:06:28 +0530918 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
919 * FLPD cache may cache the address of zero_l2_table. This
920 * function replaces the zero_l2_table with new L2 page table
921 * to write valid mappings.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530922 * Accessing the valid area may cause page fault since FLPD
Sachin Kamatf171aba2014-08-04 10:06:28 +0530923 * cache may still cache zero_l2_table for the valid area
924 * instead of new L2 page table that has the mapping
925 * information of the valid area.
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530926 * Thus any replacement of zero_l2_table with other valid L2
927 * page table must involve FLPD cache invalidation for System
928 * MMU v3.3.
929 * FLPD cache invalidation is performed with TLB invalidation
930 * by VPN without blocking. It is safe to invalidate TLB without
931 * blocking because the target address of TLB invalidation is
932 * not currently mapped.
933 */
934 if (need_flush_flpd_cache) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200935 struct sysmmu_drvdata *data;
Sachin Kamat365409d2014-05-22 09:50:56 +0530936
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200937 spin_lock(&domain->lock);
938 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200939 sysmmu_tlb_invalidate_flpdcache(data, iova);
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200940 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530941 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900942 }
943
944 return page_entry(sent, iova);
945}
946
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200947static int lv1set_section(struct exynos_iommu_domain *domain,
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530948 sysmmu_pte_t *sent, sysmmu_iova_t iova,
Cho KyongHo61128f02014-05-12 11:44:47 +0530949 phys_addr_t paddr, short *pgcnt)
KyongHo Cho2a965362012-05-12 05:56:09 +0900950{
Cho KyongHo61128f02014-05-12 11:44:47 +0530951 if (lv1ent_section(sent)) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530952 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530953 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900954 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530955 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900956
957 if (lv1ent_page(sent)) {
Cho KyongHo61128f02014-05-12 11:44:47 +0530958 if (*pgcnt != NUM_LV2ENTRIES) {
Cho KyongHod09d78f2014-05-12 11:44:58 +0530959 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
Cho KyongHo61128f02014-05-12 11:44:47 +0530960 iova);
KyongHo Cho2a965362012-05-12 05:56:09 +0900961 return -EADDRINUSE;
Cho KyongHo61128f02014-05-12 11:44:47 +0530962 }
KyongHo Cho2a965362012-05-12 05:56:09 +0900963
Cho KyongHo734c3c72014-05-12 11:44:48 +0530964 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
KyongHo Cho2a965362012-05-12 05:56:09 +0900965 *pgcnt = 0;
966 }
967
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100968 update_pte(sent, mk_lv1ent_sect(paddr));
KyongHo Cho2a965362012-05-12 05:56:09 +0900969
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200970 spin_lock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530971 if (lv1ent_page_zero(sent)) {
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200972 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530973 /*
974 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
975 * entry by speculative prefetch of SLPD which has no mapping.
976 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200977 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +0200978 sysmmu_tlb_invalidate_flpdcache(data, iova);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530979 }
Marek Szyprowskibfa00482015-05-19 15:20:28 +0200980 spin_unlock(&domain->lock);
Cho KyongHo66a7ed82014-05-12 11:45:04 +0530981
KyongHo Cho2a965362012-05-12 05:56:09 +0900982 return 0;
983}
984
Cho KyongHod09d78f2014-05-12 11:44:58 +0530985static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
KyongHo Cho2a965362012-05-12 05:56:09 +0900986 short *pgcnt)
987{
988 if (size == SPAGE_SIZE) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +0530989 if (WARN_ON(!lv2ent_fault(pent)))
KyongHo Cho2a965362012-05-12 05:56:09 +0900990 return -EADDRINUSE;
991
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100992 update_pte(pent, mk_lv2ent_spage(paddr));
KyongHo Cho2a965362012-05-12 05:56:09 +0900993 *pgcnt -= 1;
994 } else { /* size == LPAGE_SIZE */
995 int i;
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100996 dma_addr_t pent_base = virt_to_phys(pent);
Sachin Kamat365409d2014-05-22 09:50:56 +0530997
Marek Szyprowski5e3435e2016-02-18 15:12:50 +0100998 dma_sync_single_for_cpu(dma_dev, pent_base,
999 sizeof(*pent) * SPAGES_PER_LPAGE,
1000 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001001 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301002 if (WARN_ON(!lv2ent_fault(pent))) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301003 if (i > 0)
1004 memset(pent - i, 0, sizeof(*pent) * i);
KyongHo Cho2a965362012-05-12 05:56:09 +09001005 return -EADDRINUSE;
1006 }
1007
1008 *pent = mk_lv2ent_lpage(paddr);
1009 }
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001010 dma_sync_single_for_device(dma_dev, pent_base,
1011 sizeof(*pent) * SPAGES_PER_LPAGE,
1012 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001013 *pgcnt -= SPAGES_PER_LPAGE;
1014 }
1015
1016 return 0;
1017}
1018
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301019/*
1020 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1021 *
Sachin Kamatf171aba2014-08-04 10:06:28 +05301022 * System MMU v3.x has advanced logic to improve address translation
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301023 * performance with caching more page table entries by a page table walk.
Sachin Kamatf171aba2014-08-04 10:06:28 +05301024 * However, the logic has a bug that while caching faulty page table entries,
1025 * System MMU reports page fault if the cached fault entry is hit even though
1026 * the fault entry is updated to a valid entry after the entry is cached.
1027 * To prevent caching faulty page table entries which may be updated to valid
1028 * entries later, the virtual memory manager should care about the workaround
1029 * for the problem. The following describes the workaround.
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301030 *
1031 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
Sachin Kamatf171aba2014-08-04 10:06:28 +05301032 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301033 *
Sachin Kamatf171aba2014-08-04 10:06:28 +05301034 * Precisely, any start address of I/O virtual region must be aligned with
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301035 * the following sizes for System MMU v3.1 and v3.2.
1036 * System MMU v3.1: 128KiB
1037 * System MMU v3.2: 256KiB
1038 *
1039 * Because System MMU v3.3 caches page table entries more aggressively, it needs
Sachin Kamatf171aba2014-08-04 10:06:28 +05301040 * more workarounds.
1041 * - Any two consecutive I/O virtual regions must have a hole of size larger
1042 * than or equal to 128KiB.
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301043 * - Start address of an I/O virtual region must be aligned by 128KiB.
1044 */
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001045static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1046 unsigned long l_iova, phys_addr_t paddr, size_t size,
1047 int prot)
KyongHo Cho2a965362012-05-12 05:56:09 +09001048{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001049 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301050 sysmmu_pte_t *entry;
1051 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
KyongHo Cho2a965362012-05-12 05:56:09 +09001052 unsigned long flags;
1053 int ret = -ENOMEM;
1054
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001055 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +09001056
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001057 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001058
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001059 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001060
1061 if (size == SECT_SIZE) {
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001062 ret = lv1set_section(domain, entry, iova, paddr,
1063 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001064 } else {
Cho KyongHod09d78f2014-05-12 11:44:58 +05301065 sysmmu_pte_t *pent;
KyongHo Cho2a965362012-05-12 05:56:09 +09001066
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001067 pent = alloc_lv2entry(domain, entry, iova,
1068 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001069
Cho KyongHo61128f02014-05-12 11:44:47 +05301070 if (IS_ERR(pent))
1071 ret = PTR_ERR(pent);
KyongHo Cho2a965362012-05-12 05:56:09 +09001072 else
1073 ret = lv2set_page(pent, paddr, size,
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001074 &domain->lv2entcnt[lv1ent_offset(iova)]);
KyongHo Cho2a965362012-05-12 05:56:09 +09001075 }
1076
Cho KyongHo61128f02014-05-12 11:44:47 +05301077 if (ret)
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301078 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1079 __func__, ret, size, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001080
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001081 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001082
1083 return ret;
1084}
1085
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001086static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1087 sysmmu_iova_t iova, size_t size)
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301088{
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001089 struct sysmmu_drvdata *data;
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301090 unsigned long flags;
1091
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001092 spin_lock_irqsave(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301093
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001094 list_for_each_entry(data, &domain->clients, domain_node)
Marek Szyprowski469aceb2015-05-19 15:20:27 +02001095 sysmmu_tlb_invalidate_entry(data, iova, size);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301096
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001097 spin_unlock_irqrestore(&domain->lock, flags);
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301098}
1099
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001100static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1101 unsigned long l_iova, size_t size)
KyongHo Cho2a965362012-05-12 05:56:09 +09001102{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001103 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301104 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1105 sysmmu_pte_t *ent;
Cho KyongHo61128f02014-05-12 11:44:47 +05301106 size_t err_pgsize;
Cho KyongHod09d78f2014-05-12 11:44:58 +05301107 unsigned long flags;
KyongHo Cho2a965362012-05-12 05:56:09 +09001108
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001109 BUG_ON(domain->pgtable == NULL);
KyongHo Cho2a965362012-05-12 05:56:09 +09001110
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001111 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001112
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001113 ent = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001114
1115 if (lv1ent_section(ent)) {
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301116 if (WARN_ON(size < SECT_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301117 err_pgsize = SECT_SIZE;
1118 goto err;
1119 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001120
Sachin Kamatf171aba2014-08-04 10:06:28 +05301121 /* workaround for h/w bug in System MMU v3.3 */
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001122 update_pte(ent, ZERO_LV2LINK);
KyongHo Cho2a965362012-05-12 05:56:09 +09001123 size = SECT_SIZE;
1124 goto done;
1125 }
1126
1127 if (unlikely(lv1ent_fault(ent))) {
1128 if (size > SECT_SIZE)
1129 size = SECT_SIZE;
1130 goto done;
1131 }
1132
1133 /* lv1ent_page(sent) == true here */
1134
1135 ent = page_entry(ent, iova);
1136
1137 if (unlikely(lv2ent_fault(ent))) {
1138 size = SPAGE_SIZE;
1139 goto done;
1140 }
1141
1142 if (lv2ent_small(ent)) {
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001143 update_pte(ent, 0);
KyongHo Cho2a965362012-05-12 05:56:09 +09001144 size = SPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001145 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
KyongHo Cho2a965362012-05-12 05:56:09 +09001146 goto done;
1147 }
1148
1149 /* lv1ent_large(ent) == true here */
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301150 if (WARN_ON(size < LPAGE_SIZE)) {
Cho KyongHo61128f02014-05-12 11:44:47 +05301151 err_pgsize = LPAGE_SIZE;
1152 goto err;
1153 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001154
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001155 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1156 sizeof(*ent) * SPAGES_PER_LPAGE,
1157 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001158 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001159 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1160 sizeof(*ent) * SPAGES_PER_LPAGE,
1161 DMA_TO_DEVICE);
KyongHo Cho2a965362012-05-12 05:56:09 +09001162 size = LPAGE_SIZE;
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001163 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
KyongHo Cho2a965362012-05-12 05:56:09 +09001164done:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001165 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001166
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001167 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
KyongHo Cho2a965362012-05-12 05:56:09 +09001168
KyongHo Cho2a965362012-05-12 05:56:09 +09001169 return size;
Cho KyongHo61128f02014-05-12 11:44:47 +05301170err:
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001171 spin_unlock_irqrestore(&domain->pgtablelock, flags);
Cho KyongHo61128f02014-05-12 11:44:47 +05301172
Cho KyongHo0bf4e542014-05-12 11:45:00 +05301173 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1174 __func__, size, iova, err_pgsize);
Cho KyongHo61128f02014-05-12 11:44:47 +05301175
1176 return 0;
KyongHo Cho2a965362012-05-12 05:56:09 +09001177}
1178
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001179static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05301180 dma_addr_t iova)
KyongHo Cho2a965362012-05-12 05:56:09 +09001181{
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001182 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
Cho KyongHod09d78f2014-05-12 11:44:58 +05301183 sysmmu_pte_t *entry;
KyongHo Cho2a965362012-05-12 05:56:09 +09001184 unsigned long flags;
1185 phys_addr_t phys = 0;
1186
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001187 spin_lock_irqsave(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001188
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001189 entry = section_entry(domain->pgtable, iova);
KyongHo Cho2a965362012-05-12 05:56:09 +09001190
1191 if (lv1ent_section(entry)) {
1192 phys = section_phys(entry) + section_offs(iova);
1193 } else if (lv1ent_page(entry)) {
1194 entry = page_entry(entry, iova);
1195
1196 if (lv2ent_large(entry))
1197 phys = lpage_phys(entry) + lpage_offs(iova);
1198 else if (lv2ent_small(entry))
1199 phys = spage_phys(entry) + spage_offs(iova);
1200 }
1201
Marek Szyprowskibfa00482015-05-19 15:20:28 +02001202 spin_unlock_irqrestore(&domain->pgtablelock, flags);
KyongHo Cho2a965362012-05-12 05:56:09 +09001203
1204 return phys;
1205}
1206
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001207static struct iommu_group *get_device_iommu_group(struct device *dev)
1208{
1209 struct iommu_group *group;
1210
1211 group = iommu_group_get(dev);
1212 if (!group)
1213 group = iommu_group_alloc();
1214
1215 return group;
1216}
1217
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301218static int exynos_iommu_add_device(struct device *dev)
1219{
1220 struct iommu_group *group;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301221
Marek Szyprowski06801db2015-05-19 15:20:32 +02001222 if (!has_sysmmu(dev))
1223 return -ENODEV;
1224
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001225 group = iommu_group_get_for_dev(dev);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301226
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001227 if (IS_ERR(group))
1228 return PTR_ERR(group);
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301229
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301230 iommu_group_put(group);
1231
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001232 return 0;
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301233}
1234
1235static void exynos_iommu_remove_device(struct device *dev)
1236{
Marek Szyprowski06801db2015-05-19 15:20:32 +02001237 if (!has_sysmmu(dev))
1238 return;
1239
Antonios Motakisbf4a1c92014-05-12 11:44:59 +05301240 iommu_group_remove_device(dev);
1241}
1242
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001243static int exynos_iommu_of_xlate(struct device *dev,
1244 struct of_phandle_args *spec)
1245{
1246 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1247 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1248 struct sysmmu_drvdata *data;
1249
1250 if (!sysmmu)
1251 return -ENODEV;
1252
1253 data = platform_get_drvdata(sysmmu);
1254 if (!data)
1255 return -ENODEV;
1256
1257 if (!owner) {
1258 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1259 if (!owner)
1260 return -ENOMEM;
1261
1262 INIT_LIST_HEAD(&owner->controllers);
1263 dev->archdata.iommu = owner;
1264 }
1265
1266 list_add_tail(&data->owner_node, &owner->controllers);
1267 return 0;
1268}
1269
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001270static struct iommu_ops exynos_iommu_ops = {
Joerg Roedele1fd1ea2015-03-26 13:43:11 +01001271 .domain_alloc = exynos_iommu_domain_alloc,
1272 .domain_free = exynos_iommu_domain_free,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001273 .attach_dev = exynos_iommu_attach_device,
1274 .detach_dev = exynos_iommu_detach_device,
1275 .map = exynos_iommu_map,
1276 .unmap = exynos_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07001277 .map_sg = default_iommu_map_sg,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001278 .iova_to_phys = exynos_iommu_iova_to_phys,
Marek Szyprowski6c2ae7e2016-02-18 15:12:48 +01001279 .device_group = get_device_iommu_group,
Bjorn Helgaasba5fa6f2014-05-08 14:49:14 -06001280 .add_device = exynos_iommu_add_device,
1281 .remove_device = exynos_iommu_remove_device,
KyongHo Cho2a965362012-05-12 05:56:09 +09001282 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
Marek Szyprowskiaa759fd2015-05-19 15:20:37 +02001283 .of_xlate = exynos_iommu_of_xlate,
KyongHo Cho2a965362012-05-12 05:56:09 +09001284};
1285
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001286static bool init_done;
1287
KyongHo Cho2a965362012-05-12 05:56:09 +09001288static int __init exynos_iommu_init(void)
1289{
1290 int ret;
1291
Cho KyongHo734c3c72014-05-12 11:44:48 +05301292 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1293 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1294 if (!lv2table_kmem_cache) {
1295 pr_err("%s: Failed to create kmem cache\n", __func__);
1296 return -ENOMEM;
1297 }
1298
KyongHo Cho2a965362012-05-12 05:56:09 +09001299 ret = platform_driver_register(&exynos_sysmmu_driver);
Cho KyongHo734c3c72014-05-12 11:44:48 +05301300 if (ret) {
1301 pr_err("%s: Failed to register driver\n", __func__);
1302 goto err_reg_driver;
1303 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001304
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301305 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1306 if (zero_lv2_table == NULL) {
1307 pr_err("%s: Failed to allocate zero level2 page table\n",
1308 __func__);
1309 ret = -ENOMEM;
1310 goto err_zero_lv2;
1311 }
1312
Cho KyongHo734c3c72014-05-12 11:44:48 +05301313 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1314 if (ret) {
1315 pr_err("%s: Failed to register exynos-iommu driver.\n",
1316 __func__);
1317 goto err_set_iommu;
1318 }
KyongHo Cho2a965362012-05-12 05:56:09 +09001319
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001320 init_done = true;
1321
Cho KyongHo734c3c72014-05-12 11:44:48 +05301322 return 0;
1323err_set_iommu:
Cho KyongHo66a7ed82014-05-12 11:45:04 +05301324 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1325err_zero_lv2:
Cho KyongHo734c3c72014-05-12 11:44:48 +05301326 platform_driver_unregister(&exynos_sysmmu_driver);
1327err_reg_driver:
1328 kmem_cache_destroy(lv2table_kmem_cache);
KyongHo Cho2a965362012-05-12 05:56:09 +09001329 return ret;
1330}
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001331
1332static int __init exynos_iommu_of_setup(struct device_node *np)
1333{
1334 struct platform_device *pdev;
1335
1336 if (!init_done)
1337 exynos_iommu_init();
1338
1339 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
1340 if (IS_ERR(pdev))
1341 return PTR_ERR(pdev);
1342
Marek Szyprowski5e3435e2016-02-18 15:12:50 +01001343 /*
1344 * use the first registered sysmmu device for performing
1345 * dma mapping operations on iommu page tables (cpu cache flush)
1346 */
1347 if (!dma_dev)
1348 dma_dev = &pdev->dev;
1349
Marek Szyprowski8ed55c82015-05-19 15:20:36 +02001350 of_iommu_set_ops(np, &exynos_iommu_ops);
1351 return 0;
1352}
1353
1354IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
1355 exynos_iommu_of_setup);