blob: b62bb7655dc7ddf8708ea7c5af02063ebd875220 [file] [log] [blame]
Kevin Matlagefbcd3112013-02-01 12:41:04 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Steve Mucklef132c6c2012-06-06 18:30:57 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/io.h>
17#include <linux/iommu.h>
18#include <linux/scatterlist.h>
19
20#include <asm/cacheflush.h>
21
22#include <mach/iommu.h>
Olav Haugan090614f2013-03-22 12:14:18 -070023#include <mach/msm_iommu_priv.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070024#include "msm_iommu_pagetable.h"
25
26/* Sharability attributes of MSM IOMMU mappings */
27#define MSM_IOMMU_ATTR_NON_SH 0x0
28#define MSM_IOMMU_ATTR_SH 0x4
29
30/* Cacheability attributes of MSM IOMMU mappings */
31#define MSM_IOMMU_ATTR_NONCACHED 0x0
32#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
33#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
34#define MSM_IOMMU_ATTR_CACHED_WT 0x3
35
36static int msm_iommu_tex_class[4];
37
38static inline void clean_pte(unsigned long *start, unsigned long *end,
39 int redirect)
40{
41 if (!redirect)
42 dmac_flush_range(start, end);
43}
44
Olav Haugan090614f2013-03-22 12:14:18 -070045int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt)
Steve Mucklef132c6c2012-06-06 18:30:57 -070046{
47 pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
48 get_order(SZ_16K));
49 if (!pt->fl_table)
50 return -ENOMEM;
51
52 memset(pt->fl_table, 0, SZ_16K);
53 clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect);
54
55 return 0;
56}
57
Olav Haugan090614f2013-03-22 12:14:18 -070058void msm_iommu_pagetable_free(struct msm_iommu_pt *pt)
Steve Mucklef132c6c2012-06-06 18:30:57 -070059{
60 unsigned long *fl_table;
61 int i;
62
63 fl_table = pt->fl_table;
64 for (i = 0; i < NUM_FL_PTE; i++)
65 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
66 free_page((unsigned long) __va(((fl_table[i]) &
67 FL_BASE_MASK)));
68 free_pages((unsigned long)fl_table, get_order(SZ_16K));
69 pt->fl_table = 0;
70}
71
72static int __get_pgprot(int prot, int len)
73{
74 unsigned int pgprot;
75 int tex;
76
77 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
78 prot |= IOMMU_READ | IOMMU_WRITE;
79 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
80 }
81
82 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
83 prot |= IOMMU_READ;
84 WARN_ONCE(1, "Write-only unsupported; falling back to RW\n");
85 }
86
87 if (prot & IOMMU_CACHE)
88 tex = (pgprot_kernel >> 2) & 0x07;
89 else
90 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
91
92 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
93 return 0;
94
95 if (len == SZ_16M || len == SZ_1M) {
96 pgprot = FL_SHARED;
97 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
98 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
99 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
100 pgprot |= FL_AP0 | FL_AP1;
101 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
102 } else {
103 pgprot = SL_SHARED;
104 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
105 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
106 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
107 pgprot |= SL_AP0 | SL_AP1;
108 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
109 }
110
111 return pgprot;
112}
113
Olav Haugan090614f2013-03-22 12:14:18 -0700114static unsigned long *make_second_level(struct msm_iommu_pt *pt,
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700115 unsigned long *fl_pte)
116{
117 unsigned long *sl;
118 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
119 get_order(SZ_4K));
120
121 if (!sl) {
122 pr_debug("Could not allocate second level table\n");
123 goto fail;
124 }
125 memset(sl, 0, SZ_4K);
126 clean_pte(sl, sl + NUM_SL_PTE, pt->redirect);
127
128 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
129 FL_TYPE_TABLE);
130
131 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
132fail:
133 return sl;
134}
135
136static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
137{
138 int ret = 0;
139
140 if (*sl_pte) {
141 ret = -EBUSY;
142 goto fail;
143 }
144
145 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
146 | SL_TYPE_SMALL | pgprot;
147fail:
148 return ret;
149}
150
151static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
152{
153 int ret = 0;
154
155 int i;
156
157 for (i = 0; i < 16; i++)
158 if (*(sl_pte+i)) {
159 ret = -EBUSY;
160 goto fail;
161 }
162
163 for (i = 0; i < 16; i++)
164 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
165 | SL_SHARED | SL_TYPE_LARGE | pgprot;
166
167fail:
168 return ret;
169}
170
171static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
172{
173 if (*fl_pte)
174 return -EBUSY;
175
176 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
177 | pgprot;
178
179 return 0;
180}
181
182static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
183{
184 int i;
185 int ret = 0;
186 for (i = 0; i < 16; i++)
187 if (*(fl_pte+i)) {
188 ret = -EBUSY;
189 goto fail;
190 }
191 for (i = 0; i < 16; i++)
192 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
193 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
194fail:
195 return ret;
196}
197
Olav Haugan090614f2013-03-22 12:14:18 -0700198int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700199 phys_addr_t pa, size_t len, int prot)
200{
201 unsigned long *fl_pte;
202 unsigned long fl_offset;
203 unsigned long *sl_table;
204 unsigned long *sl_pte;
205 unsigned long sl_offset;
206 unsigned int pgprot;
207 int ret = 0;
208
209 if (len != SZ_16M && len != SZ_1M &&
210 len != SZ_64K && len != SZ_4K) {
211 pr_debug("Bad size: %d\n", len);
212 ret = -EINVAL;
213 goto fail;
214 }
215
216 if (!pt->fl_table) {
217 pr_debug("Null page table\n");
218 ret = -EINVAL;
219 goto fail;
220 }
221
222 pgprot = __get_pgprot(prot, len);
223 if (!pgprot) {
224 ret = -EINVAL;
225 goto fail;
226 }
227
228 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
229 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
230
231 if (len == SZ_16M) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700232 ret = fl_16m(fl_pte, pa, pgprot);
233 if (ret)
234 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700235 clean_pte(fl_pte, fl_pte + 16, pt->redirect);
236 }
237
238 if (len == SZ_1M) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700239 ret = fl_1m(fl_pte, pa, pgprot);
240 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700241 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700242 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
243 }
244
245 /* Need a 2nd level table */
246 if (len == SZ_4K || len == SZ_64K) {
247
248 if (*fl_pte == 0) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700249 if (make_second_level(pt, fl_pte) == NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700250 ret = -ENOMEM;
251 goto fail;
252 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700253 }
254
255 if (!(*fl_pte & FL_TYPE_TABLE)) {
256 ret = -EBUSY;
257 goto fail;
258 }
259 }
260
261 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
262 sl_offset = SL_OFFSET(va);
263 sl_pte = sl_table + sl_offset;
264
265 if (len == SZ_4K) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700266 ret = sl_4k(sl_pte, pa, pgprot);
267 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700268 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700269 clean_pte(sl_pte, sl_pte + 1, pt->redirect);
270 }
271
272 if (len == SZ_64K) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700273 ret = sl_64k(sl_pte, pa, pgprot);
274 if (ret)
275 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700276 clean_pte(sl_pte, sl_pte + 16, pt->redirect);
277 }
278
279fail:
280 return ret;
281}
282
Olav Haugan090614f2013-03-22 12:14:18 -0700283size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700284 size_t len)
285{
286 unsigned long *fl_pte;
287 unsigned long fl_offset;
288 unsigned long *sl_table;
289 unsigned long *sl_pte;
290 unsigned long sl_offset;
291 int i, ret = 0;
292
293 if (len != SZ_16M && len != SZ_1M &&
294 len != SZ_64K && len != SZ_4K) {
295 pr_debug("Bad length: %d\n", len);
296 ret = -EINVAL;
297 goto fail;
298 }
299
300 if (!pt->fl_table) {
301 pr_debug("Null page table\n");
302 ret = -EINVAL;
303 goto fail;
304 }
305
306 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
307 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
308
309 if (*fl_pte == 0) {
310 pr_debug("First level PTE is 0\n");
311 ret = -ENODEV;
312 goto fail;
313 }
314
315 /* Unmap supersection */
316 if (len == SZ_16M) {
317 for (i = 0; i < 16; i++)
318 *(fl_pte+i) = 0;
319
320 clean_pte(fl_pte, fl_pte + 16, pt->redirect);
321 }
322
323 if (len == SZ_1M) {
324 *fl_pte = 0;
325 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
326 }
327
328 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
329 sl_offset = SL_OFFSET(va);
330 sl_pte = sl_table + sl_offset;
331
332 if (len == SZ_64K) {
333 for (i = 0; i < 16; i++)
334 *(sl_pte+i) = 0;
335
336 clean_pte(sl_pte, sl_pte + 16, pt->redirect);
337 }
338
339 if (len == SZ_4K) {
340 *sl_pte = 0;
341 clean_pte(sl_pte, sl_pte + 1, pt->redirect);
342 }
343
344 if (len == SZ_4K || len == SZ_64K) {
345 int used = 0;
346
347 for (i = 0; i < NUM_SL_PTE; i++)
348 if (sl_table[i])
349 used = 1;
350 if (!used) {
351 free_page((unsigned long)sl_table);
352 *fl_pte = 0;
353 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
354 }
355 }
356
357fail:
358 return ret;
359}
360
Olav Haugan252353a2013-03-08 10:50:48 -0800361static phys_addr_t get_phys_addr(struct scatterlist *sg)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700362{
363 /*
364 * Try sg_dma_address first so that we can
365 * map carveout regions that do not have a
366 * struct page associated with them.
367 */
Olav Haugan252353a2013-03-08 10:50:48 -0800368 phys_addr_t pa = sg_dma_address(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700369 if (pa == 0)
370 pa = sg_phys(sg);
371 return pa;
372}
373
Jeremy Gebben87cc5022013-01-21 14:09:15 -0700374static int check_range(unsigned long *fl_table, unsigned int va,
375 unsigned int len)
376{
377 unsigned int offset = 0;
378 unsigned long *fl_pte;
379 unsigned long fl_offset;
380 unsigned long *sl_table;
381 unsigned long sl_start, sl_end;
382 int i;
383
384 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
385 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
386
387 while (offset < len) {
388 if (*fl_pte & FL_TYPE_TABLE) {
389 sl_start = SL_OFFSET(va);
390 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
391 sl_end = ((len - offset) / SZ_4K) + sl_start;
392
393 if (sl_end > NUM_SL_PTE)
394 sl_end = NUM_SL_PTE;
395
396 for (i = sl_start; i < sl_end; i++) {
397 if (sl_table[i] != 0) {
398 pr_err("%08x - %08x already mapped\n",
399 va, va + SZ_4K);
400 return -EBUSY;
401 }
402 offset += SZ_4K;
403 va += SZ_4K;
404 }
405
406
407 sl_start = 0;
408 } else {
409 if (*fl_pte != 0) {
410 pr_err("%08x - %08x already mapped\n",
411 va, va + SZ_1M);
412 return -EBUSY;
413 }
414 va += SZ_1M;
415 offset += SZ_1M;
416 sl_start = 0;
417 }
418 fl_pte++;
419 }
420 return 0;
421}
422
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700423static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
424 int align)
425{
426 return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
427 && (len >= align);
428}
429
Olav Haugan090614f2013-03-22 12:14:18 -0700430int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700431 struct scatterlist *sg, unsigned int len, int prot)
432{
Olav Haugan252353a2013-03-08 10:50:48 -0800433 phys_addr_t pa;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700434 unsigned int offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700435 unsigned long *fl_pte;
436 unsigned long fl_offset;
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700437 unsigned long *sl_table = NULL;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700438 unsigned long sl_offset, sl_start;
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700439 unsigned int chunk_size, chunk_offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700440 int ret = 0;
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700441 unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700442
443 BUG_ON(len & (SZ_4K - 1));
444
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700445 pgprot4k = __get_pgprot(prot, SZ_4K);
446 pgprot64k = __get_pgprot(prot, SZ_64K);
447 pgprot1m = __get_pgprot(prot, SZ_1M);
448 pgprot16m = __get_pgprot(prot, SZ_16M);
449 if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700450 ret = -EINVAL;
451 goto fail;
452 }
453
454 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
455 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700456 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700457
Jeremy Gebben87cc5022013-01-21 14:09:15 -0700458 ret = check_range(pt->fl_table, va, len);
459 if (ret)
460 goto fail;
461
Steve Mucklef132c6c2012-06-06 18:30:57 -0700462 while (offset < len) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700463 chunk_size = SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700464
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700465 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
466 SZ_16M))
467 chunk_size = SZ_16M;
468 else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
469 SZ_1M))
470 chunk_size = SZ_1M;
471 /* 64k or 4k determined later */
472
473 /* for 1M and 16M, only first level entries are required */
474 if (chunk_size >= SZ_1M) {
475 if (chunk_size == SZ_16M) {
476 ret = fl_16m(fl_pte, pa, pgprot16m);
477 if (ret)
478 goto fail;
479 clean_pte(fl_pte, fl_pte + 16, pt->redirect);
480 fl_pte += 16;
481 } else if (chunk_size == SZ_1M) {
482 ret = fl_1m(fl_pte, pa, pgprot1m);
483 if (ret)
484 goto fail;
485 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
486 fl_pte++;
487 }
488
489 offset += chunk_size;
490 chunk_offset += chunk_size;
491 va += chunk_size;
492 pa += chunk_size;
493
494 if (chunk_offset >= sg->length && offset < len) {
495 chunk_offset = 0;
496 sg = sg_next(sg);
497 pa = get_phys_addr(sg);
498 if (pa == 0) {
499 pr_debug("No dma address for sg %p\n",
500 sg);
501 ret = -EINVAL;
502 goto fail;
503 }
504 }
505 continue;
506 }
507 /* for 4K or 64K, make sure there is a second level table */
508 if (*fl_pte == 0) {
509 if (!make_second_level(pt, fl_pte)) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700510 ret = -ENOMEM;
511 goto fail;
512 }
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700513 }
514 if (!(*fl_pte & FL_TYPE_TABLE)) {
515 ret = -EBUSY;
516 goto fail;
517 }
518 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
519 sl_offset = SL_OFFSET(va);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700520 /* Keep track of initial position so we
521 * don't clean more than we have to
522 */
523 sl_start = sl_offset;
524
525 /* Build the 2nd level page table */
526 while (offset < len && sl_offset < NUM_SL_PTE) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700527 /* Map a large 64K page if the chunk is large enough and
528 * the pa and va are aligned
529 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700530
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700531 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
532 SZ_64K))
533 chunk_size = SZ_64K;
534 else
535 chunk_size = SZ_4K;
536
537 if (chunk_size == SZ_4K) {
538 sl_4k(&sl_table[sl_offset], pa, pgprot4k);
539 sl_offset++;
540 } else {
541 BUG_ON(sl_offset + 16 > NUM_SL_PTE);
542 sl_64k(&sl_table[sl_offset], pa, pgprot64k);
543 sl_offset += 16;
544 }
545
546
547 offset += chunk_size;
548 chunk_offset += chunk_size;
549 va += chunk_size;
550 pa += chunk_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700551
552 if (chunk_offset >= sg->length && offset < len) {
553 chunk_offset = 0;
554 sg = sg_next(sg);
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700555 pa = get_phys_addr(sg);
556 if (pa == 0) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700557 pr_debug("No dma address for sg %p\n",
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700558 sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700559 ret = -EINVAL;
560 goto fail;
561 }
562 }
563 }
564
565 clean_pte(sl_table + sl_start, sl_table + sl_offset,
566 pt->redirect);
567 fl_pte++;
568 sl_offset = 0;
569 }
570
571fail:
572 return ret;
573}
574
Olav Haugan090614f2013-03-22 12:14:18 -0700575void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700576 unsigned int len)
577{
578 unsigned int offset = 0;
579 unsigned long *fl_pte;
580 unsigned long fl_offset;
581 unsigned long *sl_table;
582 unsigned long sl_start, sl_end;
583 int used, i;
584
585 BUG_ON(len & (SZ_4K - 1));
586
587 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
588 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
589
Steve Mucklef132c6c2012-06-06 18:30:57 -0700590 while (offset < len) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700591 if (*fl_pte & FL_TYPE_TABLE) {
592 sl_start = SL_OFFSET(va);
593 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
594 sl_end = ((len - offset) / SZ_4K) + sl_start;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700595
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700596 if (sl_end > NUM_SL_PTE)
597 sl_end = NUM_SL_PTE;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700598
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700599 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
600 clean_pte(sl_table + sl_start, sl_table + sl_end,
601 pt->redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700602
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700603 offset += (sl_end - sl_start) * SZ_4K;
604 va += (sl_end - sl_start) * SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700605
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700606 /* Unmap and free the 2nd level table if all mappings
607 * in it were removed. This saves memory, but the table
608 * will need to be re-allocated the next time someone
609 * tries to map these VAs.
610 */
611 used = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700612
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700613 /* If we just unmapped the whole table, don't bother
614 * seeing if there are still used entries left.
615 */
616 if (sl_end - sl_start != NUM_SL_PTE)
617 for (i = 0; i < NUM_SL_PTE; i++)
618 if (sl_table[i]) {
619 used = 1;
620 break;
621 }
622 if (!used) {
623 free_page((unsigned long)sl_table);
624 *fl_pte = 0;
625
626 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
627 }
628
629 sl_start = 0;
630 } else {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700631 *fl_pte = 0;
632 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700633 va += SZ_1M;
634 offset += SZ_1M;
635 sl_start = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700636 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700637 fl_pte++;
638 }
639}
640
641static int __init get_tex_class(int icp, int ocp, int mt, int nos)
642{
643 int i = 0;
644 unsigned int prrr = 0;
645 unsigned int nmrr = 0;
646 int c_icp, c_ocp, c_mt, c_nos;
647
648 RCP15_PRRR(prrr);
649 RCP15_NMRR(nmrr);
650
651 for (i = 0; i < NUM_TEX_CLASS; i++) {
652 c_nos = PRRR_NOS(prrr, i);
653 c_mt = PRRR_MT(prrr, i);
654 c_icp = NMRR_ICP(nmrr, i);
655 c_ocp = NMRR_OCP(nmrr, i);
656
657 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
658 return i;
659 }
660
661 return -ENODEV;
662}
663
664static void __init setup_iommu_tex_classes(void)
665{
666 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
667 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
668
669 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
670 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
671
672 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
673 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
674
675 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
676 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
677}
678
679void __init msm_iommu_pagetable_init(void)
680{
681 setup_iommu_tex_classes();
682}