blob: e5fb44157c13f65951cde0a6f86c6ad256623d9d [file] [log] [blame]
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -08001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070011 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/iommu.h>
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080024#include <linux/clk.h>
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -070025#include <linux/scatterlist.h>
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070026
27#include <asm/cacheflush.h>
28#include <asm/sizes.h>
29
30#include <mach/iommu_hw-8xxx.h>
31#include <mach/iommu.h>
32
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080033#define MRC(reg, processor, op1, crn, crm, op2) \
34__asm__ __volatile__ ( \
35" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
36: "=r" (reg))
37
38#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
39#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
40
Stepan Moskovchenko094475d2011-08-03 13:38:29 -070041#ifndef CONFIG_IOMMU_PGTABLES_L2
42static inline void clean_pte(unsigned long *start, unsigned long *end)
43{
44 dmac_flush_range(start, end);
45}
46#else
47static inline void clean_pte(unsigned long *start, unsigned long *end) { }
48#endif
49
Stepan Moskovchenko100832c2010-11-15 18:20:08 -080050static int msm_iommu_tex_class[4];
51
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -070052DEFINE_SPINLOCK(msm_iommu_lock);
53
54struct msm_priv {
55 unsigned long *pgtable;
56 struct list_head list_attached;
57};
58
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -080059static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
60{
61 int ret;
62
63 ret = clk_enable(drvdata->pclk);
64 if (ret)
65 goto fail;
66
67 if (drvdata->clk) {
68 ret = clk_enable(drvdata->clk);
69 if (ret)
70 clk_disable(drvdata->pclk);
71 }
72fail:
73 return ret;
74}
75
76static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
77{
78 if (drvdata->clk)
79 clk_disable(drvdata->clk);
80 clk_disable(drvdata->pclk);
81}
82
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -070083static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
84{
85 struct msm_priv *priv = domain->priv;
86 struct msm_iommu_drvdata *iommu_drvdata;
87 struct msm_iommu_ctx_drvdata *ctx_drvdata;
88 int ret = 0;
89 int asid;
90
91 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
92 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
93 BUG();
94
95 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
96 if (!iommu_drvdata)
97 BUG();
98
99 ret = __enable_clocks(iommu_drvdata);
100 if (ret)
101 goto fail;
102
103 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
104 ctx_drvdata->num);
105
106 SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
107 asid | (va & TLBIVA_VA));
108 mb();
109 __disable_clocks(iommu_drvdata);
110 }
111fail:
112 return ret;
113}
114
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700115static int __flush_iotlb(struct iommu_domain *domain)
116{
117 struct msm_priv *priv = domain->priv;
118 struct msm_iommu_drvdata *iommu_drvdata;
119 struct msm_iommu_ctx_drvdata *ctx_drvdata;
120 int ret = 0;
121 int asid;
122
123 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
124 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
125 BUG();
126
127 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
128 if (!iommu_drvdata)
129 BUG();
130
131 ret = __enable_clocks(iommu_drvdata);
132 if (ret)
133 goto fail;
134
135 asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
136 ctx_drvdata->num);
137
138 SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
139 mb();
140 __disable_clocks(iommu_drvdata);
141 }
142fail:
143 return ret;
144}
145
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700146static void __reset_context(void __iomem *base, int ctx)
147{
148 SET_BPRCOSH(base, ctx, 0);
149 SET_BPRCISH(base, ctx, 0);
150 SET_BPRCNSH(base, ctx, 0);
151 SET_BPSHCFG(base, ctx, 0);
152 SET_BPMTCFG(base, ctx, 0);
153 SET_ACTLR(base, ctx, 0);
154 SET_SCTLR(base, ctx, 0);
155 SET_FSRRESTORE(base, ctx, 0);
156 SET_TTBR0(base, ctx, 0);
157 SET_TTBR1(base, ctx, 0);
158 SET_TTBCR(base, ctx, 0);
159 SET_BFBCR(base, ctx, 0);
160 SET_PAR(base, ctx, 0);
161 SET_FAR(base, ctx, 0);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700162 SET_TLBFLPTER(base, ctx, 0);
163 SET_TLBSLPTER(base, ctx, 0);
164 SET_TLBLKCR(base, ctx, 0);
165 SET_PRRR(base, ctx, 0);
166 SET_NMRR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700168}
169
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170static void __program_context(void __iomem *base, int ctx, int ncb,
171 phys_addr_t pgtable)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700172{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800173 unsigned int prrr, nmrr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 int i, j, found;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700175 __reset_context(base, ctx);
176
177 /* Set up HTW mode */
178 /* TLB miss configuration: perform HTW on miss */
179 SET_TLBMCFG(base, ctx, 0x3);
180
181 /* V2P configuration: HTW for access */
182 SET_V2PCFG(base, ctx, 0x3);
183
184 SET_TTBCR(base, ctx, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 SET_TTBR0_PA(base, ctx, (pgtable >> TTBR0_PA_SHIFT));
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700186
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700187 /* Set interrupt number to "secure" interrupt */
188 SET_IRPTNDX(base, ctx, 0);
189
190 /* Enable context fault interrupt */
191 SET_CFEIE(base, ctx, 1);
192
193 /* Stall access on a context fault and let the handler deal with it */
194 SET_CFCFG(base, ctx, 1);
195
196 /* Redirect all cacheable requests to L2 slave port. */
197 SET_RCISH(base, ctx, 1);
198 SET_RCOSH(base, ctx, 1);
199 SET_RCNSH(base, ctx, 1);
200
201 /* Turn on TEX Remap */
202 SET_TRE(base, ctx, 1);
203
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800204 /* Set TEX remap attributes */
205 RCP15_PRRR(prrr);
206 RCP15_NMRR(nmrr);
207 SET_PRRR(base, ctx, prrr);
208 SET_NMRR(base, ctx, nmrr);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700209
210 /* Turn on BFB prefetch */
211 SET_BFBDFE(base, ctx, 1);
212
213#ifdef CONFIG_IOMMU_PGTABLES_L2
214 /* Configure page tables as inner-cacheable and shareable to reduce
215 * the TLB miss penalty.
216 */
217 SET_TTBR0_SH(base, ctx, 1);
218 SET_TTBR1_SH(base, ctx, 1);
219
220 SET_TTBR0_NOS(base, ctx, 1);
221 SET_TTBR1_NOS(base, ctx, 1);
222
223 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
224 SET_TTBR0_IRGNL(base, ctx, 1);
225
226 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
227 SET_TTBR1_IRGNL(base, ctx, 1);
228
229 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
230 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
231#endif
232
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233 /* Find if this page table is used elsewhere, and re-use ASID */
234 found = 0;
235 for (i = 0; i < ncb; i++)
236 if (GET_TTBR0_PA(base, i) == (pgtable >> TTBR0_PA_SHIFT) &&
237 i != ctx) {
238 SET_CONTEXTIDR_ASID(base, ctx, \
239 GET_CONTEXTIDR_ASID(base, i));
240 found = 1;
241 break;
242 }
243
244 /* If page table is new, find an unused ASID */
245 if (!found) {
246 for (i = 0; i < ncb; i++) {
247 found = 0;
248 for (j = 0; j < ncb; j++) {
249 if (GET_CONTEXTIDR_ASID(base, j) == i &&
250 j != ctx)
251 found = 1;
252 }
253
254 if (!found) {
255 SET_CONTEXTIDR_ASID(base, ctx, i);
256 break;
257 }
258 }
259 BUG_ON(found);
260 }
261
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700262 /* Enable the MMU */
263 SET_M(base, ctx, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700265}
266
Stepan Moskovchenkoff2d3662011-08-31 17:13:32 -0700267static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700268{
269 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
270
271 if (!priv)
272 goto fail_nomem;
273
274 INIT_LIST_HEAD(&priv->list_attached);
275 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
276 get_order(SZ_16K));
277
278 if (!priv->pgtable)
279 goto fail_nomem;
280
281 memset(priv->pgtable, 0, SZ_16K);
282 domain->priv = priv;
283 return 0;
284
285fail_nomem:
286 kfree(priv);
287 return -ENOMEM;
288}
289
290static void msm_iommu_domain_destroy(struct iommu_domain *domain)
291{
292 struct msm_priv *priv;
293 unsigned long flags;
294 unsigned long *fl_table;
295 int i;
296
297 spin_lock_irqsave(&msm_iommu_lock, flags);
298 priv = domain->priv;
299 domain->priv = NULL;
300
301 if (priv) {
302 fl_table = priv->pgtable;
303
304 for (i = 0; i < NUM_FL_PTE; i++)
305 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
306 free_page((unsigned long) __va(((fl_table[i]) &
307 FL_BASE_MASK)));
308
309 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
310 priv->pgtable = NULL;
311 }
312
313 kfree(priv);
314 spin_unlock_irqrestore(&msm_iommu_lock, flags);
315}
316
317static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
318{
319 struct msm_priv *priv;
320 struct msm_iommu_ctx_dev *ctx_dev;
321 struct msm_iommu_drvdata *iommu_drvdata;
322 struct msm_iommu_ctx_drvdata *ctx_drvdata;
323 struct msm_iommu_ctx_drvdata *tmp_drvdata;
324 int ret = 0;
325 unsigned long flags;
326
327 spin_lock_irqsave(&msm_iommu_lock, flags);
328
329 priv = domain->priv;
330
331 if (!priv || !dev) {
332 ret = -EINVAL;
333 goto fail;
334 }
335
336 iommu_drvdata = dev_get_drvdata(dev->parent);
337 ctx_drvdata = dev_get_drvdata(dev);
338 ctx_dev = dev->platform_data;
339
340 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
341 ret = -EINVAL;
342 goto fail;
343 }
344
Stepan Moskovchenko00d4b2b2010-11-12 19:29:56 -0800345 if (!list_empty(&ctx_drvdata->attached_elm)) {
346 ret = -EBUSY;
347 goto fail;
348 }
349
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700350 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
351 if (tmp_drvdata == ctx_drvdata) {
352 ret = -EBUSY;
353 goto fail;
354 }
355
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800356 ret = __enable_clocks(iommu_drvdata);
357 if (ret)
358 goto fail;
359
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 __program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700361 __pa(priv->pgtable));
362
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800363 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700364 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700365
366fail:
367 spin_unlock_irqrestore(&msm_iommu_lock, flags);
368 return ret;
369}
370
371static void msm_iommu_detach_dev(struct iommu_domain *domain,
372 struct device *dev)
373{
374 struct msm_priv *priv;
375 struct msm_iommu_ctx_dev *ctx_dev;
376 struct msm_iommu_drvdata *iommu_drvdata;
377 struct msm_iommu_ctx_drvdata *ctx_drvdata;
378 unsigned long flags;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800379 int ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700380
381 spin_lock_irqsave(&msm_iommu_lock, flags);
382 priv = domain->priv;
383
384 if (!priv || !dev)
385 goto fail;
386
387 iommu_drvdata = dev_get_drvdata(dev->parent);
388 ctx_drvdata = dev_get_drvdata(dev);
389 ctx_dev = dev->platform_data;
390
391 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
392 goto fail;
393
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800394 ret = __enable_clocks(iommu_drvdata);
395 if (ret)
396 goto fail;
397
Stepan Moskovchenkof17c16c2011-08-05 12:16:39 -0700398 SET_TLBIASID(iommu_drvdata->base, ctx_dev->num,
399 GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_dev->num));
400
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700401 __reset_context(iommu_drvdata->base, ctx_dev->num);
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800402 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700403 list_del_init(&ctx_drvdata->attached_elm);
404
405fail:
406 spin_unlock_irqrestore(&msm_iommu_lock, flags);
407}
408
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700409static int __get_pgprot(int prot, int len)
410{
411 unsigned int pgprot;
412 int tex, sh;
413
414 sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
415 tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
416
417 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
418 return 0;
419
420 if (len == SZ_16M || len == SZ_1M) {
421 pgprot = sh ? FL_SHARED : 0;
422 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
423 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
424 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
425 } else {
426 pgprot = sh ? SL_SHARED : 0;
427 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
428 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
429 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
430 }
431
432 return pgprot;
433}
434
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700435static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
436 phys_addr_t pa, int order, int prot)
437{
438 struct msm_priv *priv;
439 unsigned long flags;
440 unsigned long *fl_table;
441 unsigned long *fl_pte;
442 unsigned long fl_offset;
443 unsigned long *sl_table;
444 unsigned long *sl_pte;
445 unsigned long sl_offset;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800446 unsigned int pgprot;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700447 size_t len = 0x1000UL << order;
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700448 int ret = 0;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700449
450 spin_lock_irqsave(&msm_iommu_lock, flags);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700451
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800452 priv = domain->priv;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700453 if (!priv) {
454 ret = -EINVAL;
455 goto fail;
456 }
457
458 fl_table = priv->pgtable;
459
460 if (len != SZ_16M && len != SZ_1M &&
461 len != SZ_64K && len != SZ_4K) {
462 pr_debug("Bad size: %d\n", len);
463 ret = -EINVAL;
464 goto fail;
465 }
466
467 if (!fl_table) {
468 pr_debug("Null page table\n");
469 ret = -EINVAL;
470 goto fail;
471 }
472
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700473 pgprot = __get_pgprot(prot, len);
474
475 if (!pgprot) {
476 ret = -EINVAL;
477 goto fail;
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800478 }
479
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700480 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
481 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
482
483 if (len == SZ_16M) {
484 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485
486 for (i = 0; i < 16; i++)
487 if (*(fl_pte+i)) {
488 ret = -EBUSY;
489 goto fail;
490 }
491
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700492 for (i = 0; i < 16; i++)
493 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
494 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800495 FL_SHARED | FL_NG | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700496
497 clean_pte(fl_pte, fl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700498 }
499
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 if (len == SZ_1M) {
501 if (*fl_pte) {
502 ret = -EBUSY;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700503 goto fail;
504 }
505
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
507 FL_TYPE_SECT | FL_SHARED | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700508
509 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 }
511
512 /* Need a 2nd level table */
513 if (len == SZ_4K || len == SZ_64K) {
514
515 if (*fl_pte == 0) {
516 unsigned long *sl;
517 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
518 get_order(SZ_4K));
519
520 if (!sl) {
521 pr_debug("Could not allocate second level table\n");
522 ret = -ENOMEM;
523 goto fail;
524 }
525 memset(sl, 0, SZ_4K);
526
527 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
528 FL_TYPE_TABLE);
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700529
530 clean_pte(fl_pte, fl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531 }
532
533 if (!(*fl_pte & FL_TYPE_TABLE)) {
534 ret = -EBUSY;
535 goto fail;
536 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700537 }
538
539 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
540 sl_offset = SL_OFFSET(va);
541 sl_pte = sl_table + sl_offset;
542
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 if (len == SZ_4K) {
544 if (*sl_pte) {
545 ret = -EBUSY;
546 goto fail;
547 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700548
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800549 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800550 SL_SHARED | SL_TYPE_SMALL | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700551 clean_pte(sl_pte, sl_pte + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552 }
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700553
554 if (len == SZ_64K) {
555 int i;
556
557 for (i = 0; i < 16; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 if (*(sl_pte+i)) {
559 ret = -EBUSY;
560 goto fail;
561 }
562
563 for (i = 0; i < 16; i++)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700564 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
Stepan Moskovchenko2e8c8ba2011-02-24 18:00:41 -0800565 SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700566
567 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700568 }
569
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700570 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700571fail:
572 spin_unlock_irqrestore(&msm_iommu_lock, flags);
573 return ret;
574}
575
576static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
577 int order)
578{
579 struct msm_priv *priv;
580 unsigned long flags;
581 unsigned long *fl_table;
582 unsigned long *fl_pte;
583 unsigned long fl_offset;
584 unsigned long *sl_table;
585 unsigned long *sl_pte;
586 unsigned long sl_offset;
587 size_t len = 0x1000UL << order;
588 int i, ret = 0;
589
590 spin_lock_irqsave(&msm_iommu_lock, flags);
591
592 priv = domain->priv;
593
594 if (!priv) {
595 ret = -ENODEV;
596 goto fail;
597 }
598
599 fl_table = priv->pgtable;
600
601 if (len != SZ_16M && len != SZ_1M &&
602 len != SZ_64K && len != SZ_4K) {
603 pr_debug("Bad length: %d\n", len);
604 ret = -EINVAL;
605 goto fail;
606 }
607
608 if (!fl_table) {
609 pr_debug("Null page table\n");
610 ret = -EINVAL;
611 goto fail;
612 }
613
614 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
615 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
616
617 if (*fl_pte == 0) {
618 pr_debug("First level PTE is 0\n");
619 ret = -ENODEV;
620 goto fail;
621 }
622
623 /* Unmap supersection */
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700624 if (len == SZ_16M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700625 for (i = 0; i < 16; i++)
626 *(fl_pte+i) = 0;
627
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700628 clean_pte(fl_pte, fl_pte + 16);
629 }
630
631 if (len == SZ_1M) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700632 *fl_pte = 0;
633
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700634 clean_pte(fl_pte, fl_pte + 1);
635 }
636
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700637 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
638 sl_offset = SL_OFFSET(va);
639 sl_pte = sl_table + sl_offset;
640
641 if (len == SZ_64K) {
642 for (i = 0; i < 16; i++)
643 *(sl_pte+i) = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700644
645 clean_pte(sl_pte, sl_pte + 16);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700646 }
647
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700648 if (len == SZ_4K) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700649 *sl_pte = 0;
650
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700651 clean_pte(sl_pte, sl_pte + 1);
652 }
653
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700654 if (len == SZ_4K || len == SZ_64K) {
655 int used = 0;
656
657 for (i = 0; i < NUM_SL_PTE; i++)
658 if (sl_table[i])
659 used = 1;
660 if (!used) {
661 free_page((unsigned long)sl_table);
662 *fl_pte = 0;
Stepan Moskovchenko094475d2011-08-03 13:38:29 -0700663
664 clean_pte(fl_pte, fl_pte + 1);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700665 }
666 }
667
Stepan Moskovchenkobd1ad612011-08-03 16:24:54 -0700668 ret = __flush_iotlb_va(domain, va);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700669fail:
670 spin_unlock_irqrestore(&msm_iommu_lock, flags);
671 return ret;
672}
673
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700674static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
675 struct scatterlist *sg, unsigned int len,
676 int prot)
677{
678 unsigned int pa;
679 unsigned int offset = 0;
680 unsigned int pgprot;
681 unsigned long *fl_table;
682 unsigned long *fl_pte;
683 unsigned long fl_offset;
684 unsigned long *sl_table;
685 unsigned long sl_offset, sl_start;
686 unsigned long flags;
687 unsigned int chunk_offset = 0;
688 unsigned int chunk_pa;
689 int ret = 0;
690 struct msm_priv *priv;
691
692 spin_lock_irqsave(&msm_iommu_lock, flags);
693
694 BUG_ON(len & (SZ_4K - 1));
695
696 priv = domain->priv;
697 fl_table = priv->pgtable;
698
699 pgprot = __get_pgprot(prot, SZ_4K);
700
701 if (!pgprot) {
702 ret = -EINVAL;
703 goto fail;
704 }
705
706 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
707 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
708
709 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
710 sl_offset = SL_OFFSET(va);
711
712 chunk_pa = sg_phys(sg);
713
714 while (offset < len) {
715 /* Set up a 2nd level page table if one doesn't exist */
716 if (*fl_pte == 0) {
717 sl_table = (unsigned long *)
718 __get_free_pages(GFP_ATOMIC, get_order(SZ_4K));
719
720 if (!sl_table) {
721 pr_debug("Could not allocate second level table\n");
722 ret = -ENOMEM;
723 goto fail;
724 }
725
726 memset(sl_table, 0, SZ_4K);
727 *fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
728 FL_TYPE_TABLE);
729 clean_pte(fl_pte, fl_pte + 1);
730 } else
731 sl_table = (unsigned long *)
732 __va(((*fl_pte) & FL_BASE_MASK));
733
734 /* Keep track of initial position so we
735 * don't clean more than we have to
736 */
737 sl_start = sl_offset;
738
739 /* Build the 2nd level page table */
740 while (offset < len && sl_offset < NUM_SL_PTE) {
741 pa = chunk_pa + chunk_offset;
742 sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
743 pgprot | SL_AP0 | SL_AP1 | SL_NG |
744 SL_SHARED | SL_TYPE_SMALL;
745 sl_offset++;
746 offset += SZ_4K;
747
748 chunk_offset += SZ_4K;
749
750 if (chunk_offset >= sg->length && offset < len) {
751 chunk_offset = 0;
752 sg = sg_next(sg);
753 chunk_pa = sg_phys(sg);
754 }
755 }
756
757 clean_pte(sl_table + sl_start, sl_table + sl_offset);
758
759 fl_pte++;
760 sl_offset = 0;
761 }
762 __flush_iotlb(domain);
763fail:
764 spin_unlock_irqrestore(&msm_iommu_lock, flags);
765 return ret;
766}
767
768
769static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
770 unsigned int len)
771{
772 unsigned int offset = 0;
773 unsigned long *fl_table;
774 unsigned long *fl_pte;
775 unsigned long fl_offset;
776 unsigned long *sl_table;
777 unsigned long sl_start, sl_end;
778 unsigned long flags;
779 int used, i;
780 struct msm_priv *priv;
781
782 spin_lock_irqsave(&msm_iommu_lock, flags);
783
784 BUG_ON(len & (SZ_4K - 1));
785
786 priv = domain->priv;
787 fl_table = priv->pgtable;
788
789 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
790 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
791
792 sl_start = SL_OFFSET(va);
793
794 while (offset < len) {
795 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
796 sl_end = ((len - offset) / SZ_4K) + sl_start;
797
798 if (sl_end > NUM_SL_PTE)
799 sl_end = NUM_SL_PTE;
800
801 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
802 clean_pte(sl_table + sl_start, sl_table + sl_end);
803
804 offset += (sl_end - sl_start) * SZ_4K;
805
806 /* Unmap and free the 2nd level table if all mappings in it
807 * were removed. This saves memory, but the table will need
808 * to be re-allocated the next time someone tries to map these
809 * VAs.
810 */
811 used = 0;
812
813 /* If we just unmapped the whole table, don't bother
814 * seeing if there are still used entries left.
815 */
816 if (sl_end - sl_start != NUM_SL_PTE)
817 for (i = 0; i < NUM_SL_PTE; i++)
818 if (sl_table[i]) {
819 used = 1;
820 break;
821 }
822 if (!used) {
823 free_page((unsigned long)sl_table);
824 *fl_pte = 0;
825
826 clean_pte(fl_pte, fl_pte + 1);
827 }
828
829 sl_start = 0;
830 fl_pte++;
831 }
832
833 __flush_iotlb(domain);
834 spin_unlock_irqrestore(&msm_iommu_lock, flags);
835 return 0;
836}
837
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700838static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
839 unsigned long va)
840{
841 struct msm_priv *priv;
842 struct msm_iommu_drvdata *iommu_drvdata;
843 struct msm_iommu_ctx_drvdata *ctx_drvdata;
844 unsigned int par;
845 unsigned long flags;
846 void __iomem *base;
847 phys_addr_t ret = 0;
848 int ctx;
849
850 spin_lock_irqsave(&msm_iommu_lock, flags);
851
852 priv = domain->priv;
853 if (list_empty(&priv->list_attached))
854 goto fail;
855
856 ctx_drvdata = list_entry(priv->list_attached.next,
857 struct msm_iommu_ctx_drvdata, attached_elm);
858 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
859
860 base = iommu_drvdata->base;
861 ctx = ctx_drvdata->num;
862
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800863 ret = __enable_clocks(iommu_drvdata);
864 if (ret)
865 goto fail;
866
Stepan Moskovchenkob0e78082011-02-28 16:04:55 -0800867 SET_V2PPR(base, ctx, va & V2Pxx_VA);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700868
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700869 mb();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700870 par = GET_PAR(base, ctx);
871
872 /* We are dealing with a supersection */
873 if (GET_NOFAULT_SS(base, ctx))
874 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
875 else /* Upper 20 bits from PAR, lower 12 from VA */
876 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
877
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800878 if (GET_FAULT(base, ctx))
879 ret = 0;
880
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800881 __disable_clocks(iommu_drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700882fail:
883 spin_unlock_irqrestore(&msm_iommu_lock, flags);
884 return ret;
885}
886
887static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
888 unsigned long cap)
889{
890 return 0;
891}
892
893static void print_ctx_regs(void __iomem *base, int ctx)
894{
895 unsigned int fsr = GET_FSR(base, ctx);
896 pr_err("FAR = %08x PAR = %08x\n",
897 GET_FAR(base, ctx), GET_PAR(base, ctx));
898 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
899 (fsr & 0x02) ? "TF " : "",
900 (fsr & 0x04) ? "AFF " : "",
901 (fsr & 0x08) ? "APF " : "",
902 (fsr & 0x10) ? "TLBMF " : "",
903 (fsr & 0x20) ? "HTWDEEF " : "",
904 (fsr & 0x40) ? "HTWSEEF " : "",
905 (fsr & 0x80) ? "MHF " : "",
906 (fsr & 0x10000) ? "SL " : "",
907 (fsr & 0x40000000) ? "SS " : "",
908 (fsr & 0x80000000) ? "MULTI " : "");
909
910 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
911 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
912 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
913 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
914 pr_err("SCTLR = %08x ACTLR = %08x\n",
915 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
916 pr_err("PRRR = %08x NMRR = %08x\n",
917 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
918}
919
920irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
921{
922 struct msm_iommu_drvdata *drvdata = dev_id;
923 void __iomem *base;
Stepan Moskovchenko33069732010-11-12 19:30:00 -0800924 unsigned int fsr;
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800925 int i, ret;
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700926
927 spin_lock(&msm_iommu_lock);
928
929 if (!drvdata) {
930 pr_err("Invalid device ID in context interrupt handler\n");
931 goto fail;
932 }
933
934 base = drvdata->base;
935
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700936 pr_err("Unexpected IOMMU page fault!\n");
937 pr_err("base = %08x\n", (unsigned int) base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700938 pr_err("name = %s\n", drvdata->name);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700939
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800940 ret = __enable_clocks(drvdata);
941 if (ret)
942 goto fail;
943
Stepan Moskovchenkoa43d8c12011-02-24 18:00:42 -0800944 for (i = 0; i < drvdata->ncb; i++) {
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700945 fsr = GET_FSR(base, i);
946 if (fsr) {
947 pr_err("Fault occurred in context %d.\n", i);
948 pr_err("Interesting registers:\n");
949 print_ctx_regs(base, i);
950 SET_FSR(base, i, 0x4000000F);
951 }
952 }
Stepan Moskovchenko41f3f512011-02-24 18:00:39 -0800953 __disable_clocks(drvdata);
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700954fail:
955 spin_unlock(&msm_iommu_lock);
956 return 0;
957}
958
959static struct iommu_ops msm_iommu_ops = {
960 .domain_init = msm_iommu_domain_init,
961 .domain_destroy = msm_iommu_domain_destroy,
962 .attach_dev = msm_iommu_attach_dev,
963 .detach_dev = msm_iommu_detach_dev,
964 .map = msm_iommu_map,
965 .unmap = msm_iommu_unmap,
Stepan Moskovchenko04255ee2011-08-11 19:45:23 -0700966 .map_range = msm_iommu_map_range,
967 .unmap_range = msm_iommu_unmap_range,
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700968 .iova_to_phys = msm_iommu_iova_to_phys,
969 .domain_has_cap = msm_iommu_domain_has_cap
970};
971
Stepan Moskovchenko100832c2010-11-15 18:20:08 -0800972static int __init get_tex_class(int icp, int ocp, int mt, int nos)
973{
974 int i = 0;
975 unsigned int prrr = 0;
976 unsigned int nmrr = 0;
977 int c_icp, c_ocp, c_mt, c_nos;
978
979 RCP15_PRRR(prrr);
980 RCP15_NMRR(nmrr);
981
982 for (i = 0; i < NUM_TEX_CLASS; i++) {
983 c_nos = PRRR_NOS(prrr, i);
984 c_mt = PRRR_MT(prrr, i);
985 c_icp = NMRR_ICP(nmrr, i);
986 c_ocp = NMRR_OCP(nmrr, i);
987
988 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
989 return i;
990 }
991
992 return -ENODEV;
993}
994
995static void __init setup_iommu_tex_classes(void)
996{
997 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
998 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
999
1000 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
1001 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
1002
1003 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
1004 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
1005
1006 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
1007 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
1008}
1009
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -08001010static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001011{
Stepan Moskovchenko100832c2010-11-15 18:20:08 -08001012 setup_iommu_tex_classes();
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001013 register_iommu(&msm_iommu_ops);
1014 return 0;
1015}
1016
1017subsys_initcall(msm_iommu_init);
1018
1019MODULE_LICENSE("GPL v2");
1020MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");