blob: df66a3a31c5af53c12ed0a45f5f6dfa53baa4483 [file] [log] [blame]
Steve Mucklef132c6c2012-06-06 18:30:57 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
30#include <mach/iommu_hw-8xxx.h>
31#include <mach/iommu.h>
32
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080033#define MRC(reg, processor, op1, crn, crm, op2) \
34__asm__ __volatile__ ( \
35" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
36: "=r" (reg))
37
38#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
39#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
40
Steve Mucklef132c6c2012-06-06 18:30:57 -070041/* Sharability attributes of MSM IOMMU mappings */
42#define MSM_IOMMU_ATTR_NON_SH 0x0
43#define MSM_IOMMU_ATTR_SH 0x4
44
45/* Cacheability attributes of MSM IOMMU mappings */
46#define MSM_IOMMU_ATTR_NONCACHED 0x0
47#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
48#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
49#define MSM_IOMMU_ATTR_CACHED_WT 0x3
50
51
52static inline void clean_pte(unsigned long *start, unsigned long *end,
53 int redirect)
54{
55 if (!redirect)
56 dmac_flush_range(start, end);
57}
58
Ohad Ben-Cohen83427272011-11-10 11:32:28 +020059/* bitmap of the page sizes currently supported */
60#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
61
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080062static int msm_iommu_tex_class[4];
63
Steve Mucklef132c6c2012-06-06 18:30:57 -070064DEFINE_MUTEX(msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070065
66struct msm_priv {
67 unsigned long *pgtable;
Steve Mucklef132c6c2012-06-06 18:30:57 -070068 int redirect;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070069 struct list_head list_attached;
70};
71
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080072static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
73{
74 int ret;
75
Steve Mucklef132c6c2012-06-06 18:30:57 -070076 ret = clk_prepare_enable(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080077 if (ret)
78 goto fail;
79
80 if (drvdata->clk) {
Steve Mucklef132c6c2012-06-06 18:30:57 -070081 ret = clk_prepare_enable(drvdata->clk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080082 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -070083 clk_disable_unprepare(drvdata->pclk);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080084 }
85fail:
86 return ret;
87}
88
89static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
90{
91 if (drvdata->clk)
Steve Mucklef132c6c2012-06-06 18:30:57 -070092 clk_disable_unprepare(drvdata->clk);
93 clk_disable_unprepare(drvdata->pclk);
94}
95
96static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
97{
98 struct msm_priv *priv = domain->priv;
99 struct msm_iommu_drvdata *iommu_drvdata;
100 struct msm_iommu_ctx_drvdata *ctx_drvdata;
101 int ret = 0;
102 int asid;
103
104 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
105 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
106 BUG();
107
108 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
109 if (!iommu_drvdata)
110 BUG();
111
112 ret = __enable_clocks(iommu_drvdata);
113 if (ret)
114 goto fail;
115
116 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
117 ctx_drvdata->num);
118
119 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
120 asid | (va & TLBIVA_VA));
121 mb();
122 __disable_clocks(iommu_drvdata);
123 }
124fail:
125 return ret;
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800126}
127
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800128static int __flush_iotlb(struct iommu_domain *domain)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700129{
130 struct msm_priv *priv = domain->priv;
131 struct msm_iommu_drvdata *iommu_drvdata;
132 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800133 int ret = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700134 int asid;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700135
136 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
137 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
138 BUG();
139
140 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700141 if (!iommu_drvdata)
142 BUG();
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800143
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800144 ret = __enable_clocks(iommu_drvdata);
145 if (ret)
146 goto fail;
147
Steve Mucklef132c6c2012-06-06 18:30:57 -0700148 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
149 ctx_drvdata->num);
150
151 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
152 mb();
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800153 __disable_clocks(iommu_drvdata);
154 }
155fail:
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800156 return ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700157}
158
159static void __reset_context(void __iomem *base, int ctx)
160{
161 SET_BPRCOSH(base, ctx, 0);
162 SET_BPRCISH(base, ctx, 0);
163 SET_BPRCNSH(base, ctx, 0);
164 SET_BPSHCFG(base, ctx, 0);
165 SET_BPMTCFG(base, ctx, 0);
166 SET_ACTLR(base, ctx, 0);
167 SET_SCTLR(base, ctx, 0);
168 SET_FSRRESTORE(base, ctx, 0);
169 SET_TTBR0(base, ctx, 0);
170 SET_TTBR1(base, ctx, 0);
171 SET_TTBCR(base, ctx, 0);
172 SET_BFBCR(base, ctx, 0);
173 SET_PAR(base, ctx, 0);
174 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700175 SET_TLBFLPTER(base, ctx, 0);
176 SET_TLBSLPTER(base, ctx, 0);
177 SET_TLBLKCR(base, ctx, 0);
178 SET_PRRR(base, ctx, 0);
179 SET_NMRR(base, ctx, 0);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700180 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700181}
182
Steve Mucklef132c6c2012-06-06 18:30:57 -0700183static void __program_context(void __iomem *base, int ctx, int ncb,
184 phys_addr_t pgtable, int redirect,
185 int ttbr_split)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700186{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800187 unsigned int prrr, nmrr;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700188 int i, j, found;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700189 __reset_context(base, ctx);
190
191 /* Set up HTW mode */
192 /* TLB miss configuration: perform HTW on miss */
193 SET_TLBMCFG(base, ctx, 0x3);
194
195 /* V2P configuration: HTW for access */
196 SET_V2PCFG(base, ctx, 0x3);
197
Steve Mucklef132c6c2012-06-06 18:30:57 -0700198 SET_TTBCR(base, ctx, ttbr_split);
199 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
200 if (ttbr_split)
201 SET_TTBR1_PA(base, ctx, (pgtable >> TTBR1_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700202
203 /* Enable context fault interrupt */
204 SET_CFEIE(base, ctx, 1);
205
206 /* Stall access on a context fault and let the handler deal with it */
207 SET_CFCFG(base, ctx, 1);
208
209 /* Redirect all cacheable requests to L2 slave port. */
210 SET_RCISH(base, ctx, 1);
211 SET_RCOSH(base, ctx, 1);
212 SET_RCNSH(base, ctx, 1);
213
214 /* Turn on TEX Remap */
215 SET_TRE(base, ctx, 1);
216
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800217 /* Set TEX remap attributes */
218 RCP15_PRRR(prrr);
219 RCP15_NMRR(nmrr);
220 SET_PRRR(base, ctx, prrr);
221 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700222
223 /* Turn on BFB prefetch */
224 SET_BFBDFE(base, ctx, 1);
225
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700226 /* Configure page tables as inner-cacheable and shareable to reduce
227 * the TLB miss penalty.
228 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700229 if (redirect) {
230 SET_TTBR0_SH(base, ctx, 1);
231 SET_TTBR1_SH(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700232
Steve Mucklef132c6c2012-06-06 18:30:57 -0700233 SET_TTBR0_NOS(base, ctx, 1);
234 SET_TTBR1_NOS(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700235
Steve Mucklef132c6c2012-06-06 18:30:57 -0700236 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
237 SET_TTBR0_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700238
Steve Mucklef132c6c2012-06-06 18:30:57 -0700239 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
240 SET_TTBR1_IRGNL(base, ctx, 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700241
Steve Mucklef132c6c2012-06-06 18:30:57 -0700242 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
243 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
244 }
245
246 /* Find if this page table is used elsewhere, and re-use ASID */
247 found = 0;
248 for (i = 0; i < ncb; i++)
249 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
250 i != ctx) {
251 SET_CONTEXTIDR_ASID(base, ctx, \
252 GET_CONTEXTIDR_ASID(base, i));
253 found = 1;
254 break;
255 }
256
257 /* If page table is new, find an unused ASID */
258 if (!found) {
259 for (i = 0; i < ncb; i++) {
260 found = 0;
261 for (j = 0; j < ncb; j++) {
262 if (GET_CONTEXTIDR_ASID(base, j) == i &&
263 j != ctx)
264 found = 1;
265 }
266
267 if (!found) {
268 SET_CONTEXTIDR_ASID(base, ctx, i);
269 break;
270 }
271 }
272 BUG_ON(found);
273 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700274
275 /* Enable the MMU */
276 SET_M(base, ctx, 1);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700277 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700278}
279
Steve Mucklef132c6c2012-06-06 18:30:57 -0700280static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700281{
282 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
283
284 if (!priv)
285 goto fail_nomem;
286
287 INIT_LIST_HEAD(&priv->list_attached);
288 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
289 get_order(SZ_16K));
290
291 if (!priv->pgtable)
292 goto fail_nomem;
293
Steve Mucklef132c6c2012-06-06 18:30:57 -0700294#ifdef CONFIG_IOMMU_PGTABLES_L2
295 priv->redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
296#endif
297
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700298 memset(priv->pgtable, 0, SZ_16K);
299 domain->priv = priv;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700300
301 clean_pte(priv->pgtable, priv->pgtable + NUM_FL_PTE, priv->redirect);
302
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700303 return 0;
304
305fail_nomem:
306 kfree(priv);
307 return -ENOMEM;
308}
309
310static void msm_iommu_domain_destroy(struct iommu_domain *domain)
311{
312 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700313 unsigned long *fl_table;
314 int i;
315
Steve Mucklef132c6c2012-06-06 18:30:57 -0700316 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700317 priv = domain->priv;
318 domain->priv = NULL;
319
320 if (priv) {
321 fl_table = priv->pgtable;
322
323 for (i = 0; i < NUM_FL_PTE; i++)
324 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
325 free_page((unsigned long) __va(((fl_table[i]) &
326 FL_BASE_MASK)));
327
328 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
329 priv->pgtable = NULL;
330 }
331
332 kfree(priv);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700333 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700334}
335
336static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
337{
338 struct msm_priv *priv;
339 struct msm_iommu_ctx_dev *ctx_dev;
340 struct msm_iommu_drvdata *iommu_drvdata;
341 struct msm_iommu_ctx_drvdata *ctx_drvdata;
342 struct msm_iommu_ctx_drvdata *tmp_drvdata;
343 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700344
Steve Mucklef132c6c2012-06-06 18:30:57 -0700345 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700346
347 priv = domain->priv;
348
349 if (!priv || !dev) {
350 ret = -EINVAL;
351 goto fail;
352 }
353
354 iommu_drvdata = dev_get_drvdata(dev->parent);
355 ctx_drvdata = dev_get_drvdata(dev);
356 ctx_dev = dev->platform_data;
357
358 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
359 ret = -EINVAL;
360 goto fail;
361 }
362
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800363 if (!list_empty(&ctx_drvdata->attached_elm)) {
364 ret = -EBUSY;
365 goto fail;
366 }
367
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700368 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
369 if (tmp_drvdata == ctx_drvdata) {
370 ret = -EBUSY;
371 goto fail;
372 }
373
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800374 ret = __enable_clocks(iommu_drvdata);
375 if (ret)
376 goto fail;
377
Steve Mucklef132c6c2012-06-06 18:30:57 -0700378 __program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
379 __pa(priv->pgtable), priv->redirect,
380 iommu_drvdata->ttbr_split);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700381
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800382 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700383 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700384
Steve Mucklef132c6c2012-06-06 18:30:57 -0700385 ctx_drvdata->attached_domain = domain;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700386fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700387 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700388 return ret;
389}
390
391static void msm_iommu_detach_dev(struct iommu_domain *domain,
392 struct device *dev)
393{
394 struct msm_priv *priv;
395 struct msm_iommu_ctx_dev *ctx_dev;
396 struct msm_iommu_drvdata *iommu_drvdata;
397 struct msm_iommu_ctx_drvdata *ctx_drvdata;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800398 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700399
Steve Mucklef132c6c2012-06-06 18:30:57 -0700400 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700401 priv = domain->priv;
402
403 if (!priv || !dev)
404 goto fail;
405
406 iommu_drvdata = dev_get_drvdata(dev->parent);
407 ctx_drvdata = dev_get_drvdata(dev);
408 ctx_dev = dev->platform_data;
409
410 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
411 goto fail;
412
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800413 ret = __enable_clocks(iommu_drvdata);
414 if (ret)
415 goto fail;
416
Steve Mucklef132c6c2012-06-06 18:30:57 -0700417 SET_TLBIASID(iommu_drvdata->base, ctx_dev->num,
418 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_dev->num));
419
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700420 __reset_context(iommu_drvdata->base, ctx_dev->num);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800421 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700422 list_del_init(&ctx_drvdata->attached_elm);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700423 ctx_drvdata->attached_domain = NULL;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700424fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700425 mutex_unlock(&msm_iommu_lock);
426}
427
428static int __get_pgprot(int prot, int len)
429{
430 unsigned int pgprot;
431 int tex;
432
433 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
434 prot |= IOMMU_READ | IOMMU_WRITE;
435 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
436 }
437
438 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
439 prot |= IOMMU_READ;
440 WARN_ONCE(1, "Write-only iommu mappings unsupported; falling back to RW\n");
441 }
442
443 if (prot & IOMMU_CACHE)
444 tex = (pgprot_kernel >> 2) & 0x07;
445 else
446 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
447
448 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
449 return 0;
450
451 if (len == SZ_16M || len == SZ_1M) {
452 pgprot = FL_SHARED;
453 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
454 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
455 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
456 pgprot |= FL_AP0 | FL_AP1;
457 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
458 } else {
459 pgprot = SL_SHARED;
460 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
461 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
462 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
463 pgprot |= SL_AP0 | SL_AP1;
464 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
465 }
466
467 return pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700468}
469
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600470static unsigned long *make_second_level(struct msm_priv *priv,
471 unsigned long *fl_pte)
472{
473 unsigned long *sl;
474 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
475 get_order(SZ_4K));
476
477 if (!sl) {
478 pr_debug("Could not allocate second level table\n");
479 goto fail;
480 }
481 memset(sl, 0, SZ_4K);
482 clean_pte(sl, sl + NUM_SL_PTE, priv->redirect);
483
484 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
485 FL_TYPE_TABLE);
486
487 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
488fail:
489 return sl;
490}
491
492static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
493{
494 int ret = 0;
495
496 if (*sl_pte) {
497 ret = -EBUSY;
498 goto fail;
499 }
500
501 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
502 | SL_TYPE_SMALL | pgprot;
503fail:
504 return ret;
505}
506
507static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
508{
509 int ret = 0;
510
511 int i;
512
513 for (i = 0; i < 16; i++)
514 if (*(sl_pte+i)) {
515 ret = -EBUSY;
516 goto fail;
517 }
518
519 for (i = 0; i < 16; i++)
520 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
521 | SL_SHARED | SL_TYPE_LARGE | pgprot;
522
523fail:
524 return ret;
525}
526
527
528static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
529{
530 if (*fl_pte)
531 return -EBUSY;
532
533 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
534 | pgprot;
535
536 return 0;
537}
538
539
540static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
541{
542 int i;
543 int ret = 0;
544 for (i = 0; i < 16; i++)
545 if (*(fl_pte+i)) {
546 ret = -EBUSY;
547 goto fail;
548 }
549 for (i = 0; i < 16; i++)
550 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
551 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
552fail:
553 return ret;
554}
555
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700556static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200557 phys_addr_t pa, size_t len, int prot)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700558{
559 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700560 unsigned long *fl_table;
561 unsigned long *fl_pte;
562 unsigned long fl_offset;
563 unsigned long *sl_table;
564 unsigned long *sl_pte;
565 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800566 unsigned int pgprot;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700567 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700568
Steve Mucklef132c6c2012-06-06 18:30:57 -0700569 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800570
571 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700572 if (!priv) {
573 ret = -EINVAL;
574 goto fail;
575 }
576
577 fl_table = priv->pgtable;
578
579 if (len != SZ_16M && len != SZ_1M &&
580 len != SZ_64K && len != SZ_4K) {
581 pr_debug("Bad size: %d\n", len);
582 ret = -EINVAL;
583 goto fail;
584 }
585
586 if (!fl_table) {
587 pr_debug("Null page table\n");
588 ret = -EINVAL;
589 goto fail;
590 }
591
Steve Mucklef132c6c2012-06-06 18:30:57 -0700592 pgprot = __get_pgprot(prot, len);
593
594 if (!pgprot) {
595 ret = -EINVAL;
596 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800597 }
598
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700599 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
600 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
601
602 if (len == SZ_16M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600603 ret = fl_16m(fl_pte, pa, pgprot);
604 if (ret)
605 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700606 clean_pte(fl_pte, fl_pte + 16, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700607 }
608
Steve Mucklef132c6c2012-06-06 18:30:57 -0700609 if (len == SZ_1M) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600610 ret = fl_1m(fl_pte, pa, pgprot);
611 if (ret)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700612 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700613 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
614 }
615
616 /* Need a 2nd level table */
617 if (len == SZ_4K || len == SZ_64K) {
618
619 if (*fl_pte == 0) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600620 if (make_second_level(priv, fl_pte) == NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700621 ret = -ENOMEM;
622 goto fail;
623 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700624 }
625
626 if (!(*fl_pte & FL_TYPE_TABLE)) {
627 ret = -EBUSY;
628 goto fail;
629 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700630 }
631
632 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
633 sl_offset = SL_OFFSET(va);
634 sl_pte = sl_table + sl_offset;
635
Steve Mucklef132c6c2012-06-06 18:30:57 -0700636 if (len == SZ_4K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600637 ret = sl_4k(sl_pte, pa, pgprot);
638 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700639 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700640
Steve Mucklef132c6c2012-06-06 18:30:57 -0700641 clean_pte(sl_pte, sl_pte + 1, priv->redirect);
642 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700643
644 if (len == SZ_64K) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600645 ret = sl_64k(sl_pte, pa, pgprot);
646 if (ret)
647 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700648 clean_pte(sl_pte, sl_pte + 16, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700649 }
650
Steve Mucklef132c6c2012-06-06 18:30:57 -0700651 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700652fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700653 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700654 return ret;
655}
656
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200657static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
658 size_t len)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700659{
660 struct msm_priv *priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700661 unsigned long *fl_table;
662 unsigned long *fl_pte;
663 unsigned long fl_offset;
664 unsigned long *sl_table;
665 unsigned long *sl_pte;
666 unsigned long sl_offset;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700667 int i, ret = 0;
668
Steve Mucklef132c6c2012-06-06 18:30:57 -0700669 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700670
671 priv = domain->priv;
672
Joerg Roedel05df1f32012-01-26 18:25:37 +0100673 if (!priv)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700674 goto fail;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700675
676 fl_table = priv->pgtable;
677
678 if (len != SZ_16M && len != SZ_1M &&
679 len != SZ_64K && len != SZ_4K) {
680 pr_debug("Bad length: %d\n", len);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700681 goto fail;
682 }
683
684 if (!fl_table) {
685 pr_debug("Null page table\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700686 goto fail;
687 }
688
689 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
690 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
691
692 if (*fl_pte == 0) {
693 pr_debug("First level PTE is 0\n");
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700694 goto fail;
695 }
696
697 /* Unmap supersection */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700698 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700699 for (i = 0; i < 16; i++)
700 *(fl_pte+i) = 0;
701
Steve Mucklef132c6c2012-06-06 18:30:57 -0700702 clean_pte(fl_pte, fl_pte + 16, priv->redirect);
703 }
704
705 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700706 *fl_pte = 0;
707
Steve Mucklef132c6c2012-06-06 18:30:57 -0700708 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
709 }
710
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700711 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
712 sl_offset = SL_OFFSET(va);
713 sl_pte = sl_table + sl_offset;
714
715 if (len == SZ_64K) {
716 for (i = 0; i < 16; i++)
717 *(sl_pte+i) = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700718
719 clean_pte(sl_pte, sl_pte + 16, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700720 }
721
Steve Mucklef132c6c2012-06-06 18:30:57 -0700722 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700723 *sl_pte = 0;
724
Steve Mucklef132c6c2012-06-06 18:30:57 -0700725 clean_pte(sl_pte, sl_pte + 1, priv->redirect);
726 }
727
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700728 if (len == SZ_4K || len == SZ_64K) {
729 int used = 0;
730
731 for (i = 0; i < NUM_SL_PTE; i++)
732 if (sl_table[i])
733 used = 1;
734 if (!used) {
735 free_page((unsigned long)sl_table);
736 *fl_pte = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700737
738 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700739 }
740 }
741
Steve Mucklef132c6c2012-06-06 18:30:57 -0700742 ret = __flush_iotlb_va(domain, va);
Ohad Ben-Cohen9e285472011-09-02 13:32:34 -0400743
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700744fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -0700745 mutex_unlock(&msm_iommu_lock);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200746
747 /* the IOMMU API requires us to return how many bytes were unmapped */
748 len = ret ? 0 : len;
749 return len;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700750}
751
Steve Mucklef132c6c2012-06-06 18:30:57 -0700752static unsigned int get_phys_addr(struct scatterlist *sg)
753{
754 /*
755 * Try sg_dma_address first so that we can
756 * map carveout regions that do not have a
757 * struct page associated with them.
758 */
759 unsigned int pa = sg_dma_address(sg);
760 if (pa == 0)
761 pa = sg_phys(sg);
762 return pa;
763}
764
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600765static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
766 int align)
767{
768 return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
769 && (len >= align);
770}
771
Steve Mucklef132c6c2012-06-06 18:30:57 -0700772static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
773 struct scatterlist *sg, unsigned int len,
774 int prot)
775{
776 unsigned int pa;
777 unsigned int offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700778 unsigned long *fl_table;
779 unsigned long *fl_pte;
780 unsigned long fl_offset;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600781 unsigned long *sl_table = NULL;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700782 unsigned long sl_offset, sl_start;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600783 unsigned int chunk_size, chunk_offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700784 int ret = 0;
785 struct msm_priv *priv;
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600786 unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700787
788 mutex_lock(&msm_iommu_lock);
789
790 BUG_ON(len & (SZ_4K - 1));
791
792 priv = domain->priv;
793 fl_table = priv->pgtable;
794
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600795 pgprot4k = __get_pgprot(prot, SZ_4K);
796 pgprot64k = __get_pgprot(prot, SZ_64K);
797 pgprot1m = __get_pgprot(prot, SZ_1M);
798 pgprot16m = __get_pgprot(prot, SZ_16M);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700799
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600800 if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700801 ret = -EINVAL;
802 goto fail;
803 }
804
805 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
806 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600807 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700808
809 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600810 chunk_size = SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700811
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600812 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
813 SZ_16M))
814 chunk_size = SZ_16M;
815 else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
816 SZ_1M))
817 chunk_size = SZ_1M;
818 /* 64k or 4k determined later */
819
820 /* for 1M and 16M, only first level entries are required */
821 if (chunk_size >= SZ_1M) {
822 if (chunk_size == SZ_16M) {
823 ret = fl_16m(fl_pte, pa, pgprot16m);
824 if (ret)
825 goto fail;
826 clean_pte(fl_pte, fl_pte + 16, priv->redirect);
827 fl_pte += 16;
828 } else if (chunk_size == SZ_1M) {
829 ret = fl_1m(fl_pte, pa, pgprot1m);
830 if (ret)
831 goto fail;
832 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
833 fl_pte++;
834 }
835
836 offset += chunk_size;
837 chunk_offset += chunk_size;
838 va += chunk_size;
839 pa += chunk_size;
840
841 if (chunk_offset >= sg->length && offset < len) {
842 chunk_offset = 0;
843 sg = sg_next(sg);
844 pa = get_phys_addr(sg);
845 if (pa == 0) {
846 pr_debug("No dma address for sg %p\n",
847 sg);
848 ret = -EINVAL;
849 goto fail;
850 }
851 }
852 continue;
853 }
854 /* for 4K or 64K, make sure there is a second level table */
855 if (*fl_pte == 0) {
856 if (!make_second_level(priv, fl_pte)) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700857 ret = -ENOMEM;
858 goto fail;
859 }
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600860 }
861 if (!(*fl_pte & FL_TYPE_TABLE)) {
862 ret = -EBUSY;
863 goto fail;
864 }
865 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
866 sl_offset = SL_OFFSET(va);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700867 /* Keep track of initial position so we
868 * don't clean more than we have to
869 */
870 sl_start = sl_offset;
871
872 /* Build the 2nd level page table */
873 while (offset < len && sl_offset < NUM_SL_PTE) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700874
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600875 /* Map a large 64K page if the chunk is large enough and
876 * the pa and va are aligned
877 */
878
879 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
880 SZ_64K))
881 chunk_size = SZ_64K;
882 else
883 chunk_size = SZ_4K;
884
885 if (chunk_size == SZ_4K) {
886 sl_4k(&sl_table[sl_offset], pa, pgprot4k);
887 sl_offset++;
888 } else {
889 BUG_ON(sl_offset + 16 > NUM_SL_PTE);
890 sl_64k(&sl_table[sl_offset], pa, pgprot64k);
891 sl_offset += 16;
892 }
893
894
895 offset += chunk_size;
896 chunk_offset += chunk_size;
897 va += chunk_size;
898 pa += chunk_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700899
900 if (chunk_offset >= sg->length && offset < len) {
901 chunk_offset = 0;
902 sg = sg_next(sg);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600903 pa = get_phys_addr(sg);
904 if (pa == 0) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700905 pr_debug("No dma address for sg %p\n",
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600906 sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700907 ret = -EINVAL;
908 goto fail;
909 }
910 }
911 }
912
913 clean_pte(sl_table + sl_start, sl_table + sl_offset,
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600914 priv->redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700915
916 fl_pte++;
917 sl_offset = 0;
918 }
919 __flush_iotlb(domain);
920fail:
921 mutex_unlock(&msm_iommu_lock);
922 return ret;
923}
924
925
926static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
927 unsigned int len)
928{
929 unsigned int offset = 0;
930 unsigned long *fl_table;
931 unsigned long *fl_pte;
932 unsigned long fl_offset;
933 unsigned long *sl_table;
934 unsigned long sl_start, sl_end;
935 int used, i;
936 struct msm_priv *priv;
937
938 mutex_lock(&msm_iommu_lock);
939
940 BUG_ON(len & (SZ_4K - 1));
941
942 priv = domain->priv;
943 fl_table = priv->pgtable;
944
945 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
946 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
947
Steve Mucklef132c6c2012-06-06 18:30:57 -0700948 while (offset < len) {
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600949 if (*fl_pte & FL_TYPE_TABLE) {
950 sl_start = SL_OFFSET(va);
951 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
952 sl_end = ((len - offset) / SZ_4K) + sl_start;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700953
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600954 if (sl_end > NUM_SL_PTE)
955 sl_end = NUM_SL_PTE;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700956
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600957 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
958 clean_pte(sl_table + sl_start, sl_table + sl_end,
959 priv->redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700960
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600961 offset += (sl_end - sl_start) * SZ_4K;
962 va += (sl_end - sl_start) * SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700963
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600964 /* Unmap and free the 2nd level table if all mappings
965 * in it were removed. This saves memory, but the table
966 * will need to be re-allocated the next time someone
967 * tries to map these VAs.
968 */
969 used = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700970
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600971 /* If we just unmapped the whole table, don't bother
972 * seeing if there are still used entries left.
973 */
974 if (sl_end - sl_start != NUM_SL_PTE)
975 for (i = 0; i < NUM_SL_PTE; i++)
976 if (sl_table[i]) {
977 used = 1;
978 break;
979 }
980 if (!used) {
981 free_page((unsigned long)sl_table);
982 *fl_pte = 0;
983
984 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
985 }
986
987 sl_start = 0;
988 } else {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700989 *fl_pte = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700990 clean_pte(fl_pte, fl_pte + 1, priv->redirect);
Jordan Crouse8d8ee1a2012-07-09 13:27:07 -0600991 va += SZ_1M;
992 offset += SZ_1M;
993 sl_start = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700994 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700995 fl_pte++;
996 }
997
998 __flush_iotlb(domain);
999 mutex_unlock(&msm_iommu_lock);
1000 return 0;
1001}
1002
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001003static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
1004 unsigned long va)
1005{
1006 struct msm_priv *priv;
1007 struct msm_iommu_drvdata *iommu_drvdata;
1008 struct msm_iommu_ctx_drvdata *ctx_drvdata;
1009 unsigned int par;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001010 void __iomem *base;
1011 phys_addr_t ret = 0;
1012 int ctx;
1013
Steve Mucklef132c6c2012-06-06 18:30:57 -07001014 mutex_lock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001015
1016 priv = domain->priv;
1017 if (list_empty(&priv->list_attached))
1018 goto fail;
1019
1020 ctx_drvdata = list_entry(priv->list_attached.next,
1021 struct msm_iommu_ctx_drvdata, attached_elm);
1022 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1023
1024 base = iommu_drvdata->base;
1025 ctx = ctx_drvdata->num;
1026
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001027 ret = __enable_clocks(iommu_drvdata);
1028 if (ret)
1029 goto fail;
1030
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -08001031 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001032
Steve Mucklef132c6c2012-06-06 18:30:57 -07001033 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001034 par = GET_PAR(base, ctx);
1035
1036 /* We are dealing with a supersection */
1037 if (GET_NOFAULT_SS(base, ctx))
1038 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
1039 else /* Upper 20 bits from PAR, lower 12 from VA */
1040 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
1041
Stepan Moskovchenko33069732010-11-12 19:30:00 -08001042 if (GET_FAULT(base, ctx))
1043 ret = 0;
1044
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001045 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001046fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001047 mutex_unlock(&msm_iommu_lock);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001048 return ret;
1049}
1050
1051static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
1052 unsigned long cap)
1053{
1054 return 0;
1055}
1056
1057static void print_ctx_regs(void __iomem *base, int ctx)
1058{
1059 unsigned int fsr = GET_FSR(base, ctx);
1060 pr_err("FAR = %08x PAR = %08x\n",
1061 GET_FAR(base, ctx), GET_PAR(base, ctx));
1062 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
1063 (fsr & 0x02) ? "TF " : "",
1064 (fsr & 0x04) ? "AFF " : "",
1065 (fsr & 0x08) ? "APF " : "",
1066 (fsr & 0x10) ? "TLBMF " : "",
1067 (fsr & 0x20) ? "HTWDEEF " : "",
1068 (fsr & 0x40) ? "HTWSEEF " : "",
1069 (fsr & 0x80) ? "MHF " : "",
1070 (fsr & 0x10000) ? "SL " : "",
1071 (fsr & 0x40000000) ? "SS " : "",
1072 (fsr & 0x80000000) ? "MULTI " : "");
1073
1074 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
1075 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
1076 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
1077 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
1078 pr_err("SCTLR = %08x ACTLR = %08x\n",
1079 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
1080 pr_err("PRRR = %08x NMRR = %08x\n",
1081 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
1082}
1083
1084irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
1085{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001086 struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_id;
1087 struct msm_iommu_drvdata *drvdata;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001088 void __iomem *base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001089 unsigned int fsr, num;
1090 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001091
Steve Mucklef132c6c2012-06-06 18:30:57 -07001092 mutex_lock(&msm_iommu_lock);
1093 BUG_ON(!ctx_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001094
Steve Mucklef132c6c2012-06-06 18:30:57 -07001095 drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
1096 BUG_ON(!drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001097
1098 base = drvdata->base;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001099 num = ctx_drvdata->num;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001100
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001101 ret = __enable_clocks(drvdata);
1102 if (ret)
1103 goto fail;
1104
Steve Mucklef132c6c2012-06-06 18:30:57 -07001105 fsr = GET_FSR(base, num);
1106
1107 if (fsr) {
1108 if (!ctx_drvdata->attached_domain) {
1109 pr_err("Bad domain in interrupt handler\n");
1110 ret = -ENOSYS;
1111 } else
1112 ret = report_iommu_fault(ctx_drvdata->attached_domain,
1113 &ctx_drvdata->pdev->dev,
1114 GET_FAR(base, num), 0);
1115
1116 if (ret == -ENOSYS) {
1117 pr_err("Unexpected IOMMU page fault!\n");
1118 pr_err("name = %s\n", drvdata->name);
1119 pr_err("context = %s (%d)\n", ctx_drvdata->name, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001120 pr_err("Interesting registers:\n");
Steve Mucklef132c6c2012-06-06 18:30:57 -07001121 print_ctx_regs(base, num);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001122 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001123
1124 SET_FSR(base, num, fsr);
1125 SET_RESUME(base, num, 1);
1126
1127 ret = IRQ_HANDLED;
1128 } else
1129 ret = IRQ_NONE;
1130
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001131 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001132fail:
Steve Mucklef132c6c2012-06-06 18:30:57 -07001133 mutex_unlock(&msm_iommu_lock);
1134 return ret;
1135}
1136
1137static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
1138{
1139 struct msm_priv *priv = domain->priv;
1140 return __pa(priv->pgtable);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001141}
1142
1143static struct iommu_ops msm_iommu_ops = {
1144 .domain_init = msm_iommu_domain_init,
1145 .domain_destroy = msm_iommu_domain_destroy,
1146 .attach_dev = msm_iommu_attach_dev,
1147 .detach_dev = msm_iommu_detach_dev,
1148 .map = msm_iommu_map,
1149 .unmap = msm_iommu_unmap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001150 .map_range = msm_iommu_map_range,
1151 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001152 .iova_to_phys = msm_iommu_iova_to_phys,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001153 .domain_has_cap = msm_iommu_domain_has_cap,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001154 .get_pt_base_addr = msm_iommu_get_pt_base_addr,
Ohad Ben-Cohen83427272011-11-10 11:32:28 +02001155 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001156};
1157
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001158static int __init get_tex_class(int icp, int ocp, int mt, int nos)
1159{
1160 int i = 0;
1161 unsigned int prrr = 0;
1162 unsigned int nmrr = 0;
1163 int c_icp, c_ocp, c_mt, c_nos;
1164
1165 RCP15_PRRR(prrr);
1166 RCP15_NMRR(nmrr);
1167
1168 for (i = 0; i < NUM_TEX_CLASS; i++) {
1169 c_nos = PRRR_NOS(prrr, i);
1170 c_mt = PRRR_MT(prrr, i);
1171 c_icp = NMRR_ICP(nmrr, i);
1172 c_ocp = NMRR_OCP(nmrr, i);
1173
1174 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
1175 return i;
1176 }
1177
1178 return -ENODEV;
1179}
1180
1181static void __init setup_iommu_tex_classes(void)
1182{
1183 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
1184 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
1185
1186 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1187 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1188
1189 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1190 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1191
1192 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1193 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1194}
1195
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001196static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001197{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001198 if (!msm_soc_version_supports_iommu_v1())
1199 return -ENODEV;
1200
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001201 setup_iommu_tex_classes();
Joerg Roedel85eebbc2011-09-06 17:56:07 +02001202 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001203 return 0;
1204}
1205
1206subsys_initcall(msm_iommu_init);
1207
1208MODULE_LICENSE("GPL v2");
1209MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");