blob: 1e4bff8d4967b46f640a53d8be8e4f0bf8f71d29 [file] [log] [blame]
Kevin Matlagefbcd3112013-02-01 12:41:04 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Steve Mucklef132c6c2012-06-06 18:30:57 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/io.h>
17#include <linux/iommu.h>
18#include <linux/scatterlist.h>
19
20#include <asm/cacheflush.h>
21
22#include <mach/iommu.h>
Olav Haugan090614f2013-03-22 12:14:18 -070023#include <mach/msm_iommu_priv.h>
Adrian Alexei78c7a0f2013-04-22 12:16:56 -070024#include <trace/events/kmem.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070025#include "msm_iommu_pagetable.h"
26
Olav Haugan580c07e2013-06-17 16:09:22 -070027#define NUM_FL_PTE 4096
28#define NUM_SL_PTE 256
29#define NUM_TEX_CLASS 8
30
31/* First-level page table bits */
32#define FL_BASE_MASK 0xFFFFFC00
33#define FL_TYPE_TABLE (1 << 0)
34#define FL_TYPE_SECT (2 << 0)
35#define FL_SUPERSECTION (1 << 18)
36#define FL_AP0 (1 << 10)
37#define FL_AP1 (1 << 11)
38#define FL_AP2 (1 << 15)
39#define FL_SHARED (1 << 16)
40#define FL_BUFFERABLE (1 << 2)
41#define FL_CACHEABLE (1 << 3)
42#define FL_TEX0 (1 << 12)
43#define FL_OFFSET(va) (((va) & 0xFFF00000) >> 20)
44#define FL_NG (1 << 17)
45
46/* Second-level page table bits */
47#define SL_BASE_MASK_LARGE 0xFFFF0000
48#define SL_BASE_MASK_SMALL 0xFFFFF000
49#define SL_TYPE_LARGE (1 << 0)
50#define SL_TYPE_SMALL (2 << 0)
51#define SL_AP0 (1 << 4)
52#define SL_AP1 (2 << 4)
53#define SL_AP2 (1 << 9)
54#define SL_SHARED (1 << 10)
55#define SL_BUFFERABLE (1 << 2)
56#define SL_CACHEABLE (1 << 3)
57#define SL_TEX0 (1 << 6)
58#define SL_OFFSET(va) (((va) & 0xFF000) >> 12)
59#define SL_NG (1 << 11)
60
61/* Memory type and cache policy attributes */
62#define MT_SO 0
63#define MT_DEV 1
64#define MT_NORMAL 2
65#define CP_NONCACHED 0
66#define CP_WB_WA 1
67#define CP_WT 2
68#define CP_WB_NWA 3
69
Steve Mucklef132c6c2012-06-06 18:30:57 -070070/* Sharability attributes of MSM IOMMU mappings */
71#define MSM_IOMMU_ATTR_NON_SH 0x0
72#define MSM_IOMMU_ATTR_SH 0x4
73
74/* Cacheability attributes of MSM IOMMU mappings */
75#define MSM_IOMMU_ATTR_NONCACHED 0x0
76#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
77#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
78#define MSM_IOMMU_ATTR_CACHED_WT 0x3
79
80static int msm_iommu_tex_class[4];
81
Olav Haugan580c07e2013-06-17 16:09:22 -070082/* TEX Remap Registers */
83#define NMRR_ICP(nmrr, n) (((nmrr) & (3 << ((n) * 2))) >> ((n) * 2))
84#define NMRR_OCP(nmrr, n) (((nmrr) & (3 << ((n) * 2 + 16))) >> ((n) * 2 + 16))
85
86#define PRRR_NOS(prrr, n) ((prrr) & (1 << ((n) + 24)) ? 1 : 0)
87#define PRRR_MT(prrr, n) ((((prrr) & (3 << ((n) * 2))) >> ((n) * 2)))
88
Steve Mucklef132c6c2012-06-06 18:30:57 -070089static inline void clean_pte(unsigned long *start, unsigned long *end,
90 int redirect)
91{
92 if (!redirect)
93 dmac_flush_range(start, end);
94}
95
Olav Haugan090614f2013-03-22 12:14:18 -070096int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt)
Steve Mucklef132c6c2012-06-06 18:30:57 -070097{
98 pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
99 get_order(SZ_16K));
100 if (!pt->fl_table)
101 return -ENOMEM;
102
103 memset(pt->fl_table, 0, SZ_16K);
104 clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect);
105
106 return 0;
107}
108
Olav Haugan090614f2013-03-22 12:14:18 -0700109void msm_iommu_pagetable_free(struct msm_iommu_pt *pt)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700110{
111 unsigned long *fl_table;
112 int i;
113
114 fl_table = pt->fl_table;
115 for (i = 0; i < NUM_FL_PTE; i++)
116 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
117 free_page((unsigned long) __va(((fl_table[i]) &
118 FL_BASE_MASK)));
119 free_pages((unsigned long)fl_table, get_order(SZ_16K));
120 pt->fl_table = 0;
121}
122
123static int __get_pgprot(int prot, int len)
124{
125 unsigned int pgprot;
126 int tex;
127
128 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
129 prot |= IOMMU_READ | IOMMU_WRITE;
130 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
131 }
132
133 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
134 prot |= IOMMU_READ;
135 WARN_ONCE(1, "Write-only unsupported; falling back to RW\n");
136 }
137
138 if (prot & IOMMU_CACHE)
139 tex = (pgprot_kernel >> 2) & 0x07;
140 else
141 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
142
143 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
144 return 0;
145
146 if (len == SZ_16M || len == SZ_1M) {
147 pgprot = FL_SHARED;
148 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
149 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
150 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
151 pgprot |= FL_AP0 | FL_AP1;
152 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
153 } else {
154 pgprot = SL_SHARED;
155 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
156 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
157 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
158 pgprot |= SL_AP0 | SL_AP1;
159 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
160 }
161
162 return pgprot;
163}
164
Olav Haugan090614f2013-03-22 12:14:18 -0700165static unsigned long *make_second_level(struct msm_iommu_pt *pt,
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700166 unsigned long *fl_pte)
167{
168 unsigned long *sl;
169 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
170 get_order(SZ_4K));
171
172 if (!sl) {
173 pr_debug("Could not allocate second level table\n");
174 goto fail;
175 }
176 memset(sl, 0, SZ_4K);
177 clean_pte(sl, sl + NUM_SL_PTE, pt->redirect);
178
179 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
180 FL_TYPE_TABLE);
181
182 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
183fail:
184 return sl;
185}
186
187static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
188{
189 int ret = 0;
190
191 if (*sl_pte) {
192 ret = -EBUSY;
193 goto fail;
194 }
195
196 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
197 | SL_TYPE_SMALL | pgprot;
198fail:
199 return ret;
200}
201
202static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
203{
204 int ret = 0;
205
206 int i;
207
208 for (i = 0; i < 16; i++)
209 if (*(sl_pte+i)) {
210 ret = -EBUSY;
211 goto fail;
212 }
213
214 for (i = 0; i < 16; i++)
215 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
216 | SL_SHARED | SL_TYPE_LARGE | pgprot;
217
218fail:
219 return ret;
220}
221
222static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
223{
224 if (*fl_pte)
225 return -EBUSY;
226
227 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
228 | pgprot;
229
230 return 0;
231}
232
233static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
234{
235 int i;
236 int ret = 0;
237 for (i = 0; i < 16; i++)
238 if (*(fl_pte+i)) {
239 ret = -EBUSY;
240 goto fail;
241 }
242 for (i = 0; i < 16; i++)
243 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
244 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
245fail:
246 return ret;
247}
248
Olav Haugan090614f2013-03-22 12:14:18 -0700249int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700250 phys_addr_t pa, size_t len, int prot)
251{
252 unsigned long *fl_pte;
253 unsigned long fl_offset;
254 unsigned long *sl_table;
255 unsigned long *sl_pte;
256 unsigned long sl_offset;
257 unsigned int pgprot;
258 int ret = 0;
259
260 if (len != SZ_16M && len != SZ_1M &&
261 len != SZ_64K && len != SZ_4K) {
262 pr_debug("Bad size: %d\n", len);
263 ret = -EINVAL;
264 goto fail;
265 }
266
267 if (!pt->fl_table) {
268 pr_debug("Null page table\n");
269 ret = -EINVAL;
270 goto fail;
271 }
272
273 pgprot = __get_pgprot(prot, len);
274 if (!pgprot) {
275 ret = -EINVAL;
276 goto fail;
277 }
278
279 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
280 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
281
282 if (len == SZ_16M) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700283 ret = fl_16m(fl_pte, pa, pgprot);
284 if (ret)
285 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700286 clean_pte(fl_pte, fl_pte + 16, pt->redirect);
287 }
288
289 if (len == SZ_1M) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700290 ret = fl_1m(fl_pte, pa, pgprot);
291 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700292 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700293 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
294 }
295
296 /* Need a 2nd level table */
297 if (len == SZ_4K || len == SZ_64K) {
298
299 if (*fl_pte == 0) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700300 if (make_second_level(pt, fl_pte) == NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700301 ret = -ENOMEM;
302 goto fail;
303 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700304 }
305
306 if (!(*fl_pte & FL_TYPE_TABLE)) {
307 ret = -EBUSY;
308 goto fail;
309 }
310 }
311
312 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
313 sl_offset = SL_OFFSET(va);
314 sl_pte = sl_table + sl_offset;
315
316 if (len == SZ_4K) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700317 ret = sl_4k(sl_pte, pa, pgprot);
318 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700319 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700320 clean_pte(sl_pte, sl_pte + 1, pt->redirect);
321 }
322
323 if (len == SZ_64K) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700324 ret = sl_64k(sl_pte, pa, pgprot);
325 if (ret)
326 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700327 clean_pte(sl_pte, sl_pte + 16, pt->redirect);
328 }
329
330fail:
331 return ret;
332}
333
Olav Haugan090614f2013-03-22 12:14:18 -0700334size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700335 size_t len)
336{
Olav Haugan8a089ac2013-06-19 11:17:41 -0700337 msm_iommu_pagetable_unmap_range(pt, va, len);
338 return len;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700339}
340
Olav Haugan252353a2013-03-08 10:50:48 -0800341static phys_addr_t get_phys_addr(struct scatterlist *sg)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700342{
343 /*
344 * Try sg_dma_address first so that we can
345 * map carveout regions that do not have a
346 * struct page associated with them.
347 */
Olav Haugan252353a2013-03-08 10:50:48 -0800348 phys_addr_t pa = sg_dma_address(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700349 if (pa == 0)
350 pa = sg_phys(sg);
351 return pa;
352}
353
Jeremy Gebben87cc5022013-01-21 14:09:15 -0700354static int check_range(unsigned long *fl_table, unsigned int va,
355 unsigned int len)
356{
357 unsigned int offset = 0;
358 unsigned long *fl_pte;
359 unsigned long fl_offset;
360 unsigned long *sl_table;
361 unsigned long sl_start, sl_end;
362 int i;
363
364 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
365 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
366
367 while (offset < len) {
368 if (*fl_pte & FL_TYPE_TABLE) {
369 sl_start = SL_OFFSET(va);
370 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
371 sl_end = ((len - offset) / SZ_4K) + sl_start;
372
373 if (sl_end > NUM_SL_PTE)
374 sl_end = NUM_SL_PTE;
375
376 for (i = sl_start; i < sl_end; i++) {
377 if (sl_table[i] != 0) {
378 pr_err("%08x - %08x already mapped\n",
379 va, va + SZ_4K);
380 return -EBUSY;
381 }
382 offset += SZ_4K;
383 va += SZ_4K;
384 }
385
386
387 sl_start = 0;
388 } else {
389 if (*fl_pte != 0) {
390 pr_err("%08x - %08x already mapped\n",
391 va, va + SZ_1M);
392 return -EBUSY;
393 }
394 va += SZ_1M;
395 offset += SZ_1M;
396 sl_start = 0;
397 }
398 fl_pte++;
399 }
400 return 0;
401}
402
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700403static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
404 int align)
405{
406 return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
407 && (len >= align);
408}
409
Olav Haugan090614f2013-03-22 12:14:18 -0700410int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700411 struct scatterlist *sg, unsigned int len, int prot)
412{
Olav Haugan252353a2013-03-08 10:50:48 -0800413 phys_addr_t pa;
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -0600414 unsigned int start_va = va;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700415 unsigned int offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700416 unsigned long *fl_pte;
417 unsigned long fl_offset;
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700418 unsigned long *sl_table = NULL;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700419 unsigned long sl_offset, sl_start;
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700420 unsigned int chunk_size, chunk_offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700421 int ret = 0;
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700422 unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700423
424 BUG_ON(len & (SZ_4K - 1));
425
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700426 pgprot4k = __get_pgprot(prot, SZ_4K);
427 pgprot64k = __get_pgprot(prot, SZ_64K);
428 pgprot1m = __get_pgprot(prot, SZ_1M);
429 pgprot16m = __get_pgprot(prot, SZ_16M);
430 if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700431 ret = -EINVAL;
432 goto fail;
433 }
434
435 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
436 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700437 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700438
Jeremy Gebben87cc5022013-01-21 14:09:15 -0700439 ret = check_range(pt->fl_table, va, len);
440 if (ret)
441 goto fail;
442
Steve Mucklef132c6c2012-06-06 18:30:57 -0700443 while (offset < len) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700444 chunk_size = SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700445
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700446 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
447 SZ_16M))
448 chunk_size = SZ_16M;
449 else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
450 SZ_1M))
451 chunk_size = SZ_1M;
452 /* 64k or 4k determined later */
453
Adrian Alexei78c7a0f2013-04-22 12:16:56 -0700454 trace_iommu_map_range(va, pa, sg->length, chunk_size);
455
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700456 /* for 1M and 16M, only first level entries are required */
457 if (chunk_size >= SZ_1M) {
458 if (chunk_size == SZ_16M) {
459 ret = fl_16m(fl_pte, pa, pgprot16m);
460 if (ret)
461 goto fail;
462 clean_pte(fl_pte, fl_pte + 16, pt->redirect);
463 fl_pte += 16;
464 } else if (chunk_size == SZ_1M) {
465 ret = fl_1m(fl_pte, pa, pgprot1m);
466 if (ret)
467 goto fail;
468 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
469 fl_pte++;
470 }
471
472 offset += chunk_size;
473 chunk_offset += chunk_size;
474 va += chunk_size;
475 pa += chunk_size;
476
477 if (chunk_offset >= sg->length && offset < len) {
478 chunk_offset = 0;
479 sg = sg_next(sg);
480 pa = get_phys_addr(sg);
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700481 }
482 continue;
483 }
484 /* for 4K or 64K, make sure there is a second level table */
485 if (*fl_pte == 0) {
486 if (!make_second_level(pt, fl_pte)) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700487 ret = -ENOMEM;
488 goto fail;
489 }
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700490 }
491 if (!(*fl_pte & FL_TYPE_TABLE)) {
492 ret = -EBUSY;
493 goto fail;
494 }
495 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
496 sl_offset = SL_OFFSET(va);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700497 /* Keep track of initial position so we
498 * don't clean more than we have to
499 */
500 sl_start = sl_offset;
501
502 /* Build the 2nd level page table */
503 while (offset < len && sl_offset < NUM_SL_PTE) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700504 /* Map a large 64K page if the chunk is large enough and
505 * the pa and va are aligned
506 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700507
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700508 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
509 SZ_64K))
510 chunk_size = SZ_64K;
511 else
512 chunk_size = SZ_4K;
513
Adrian Alexei78c7a0f2013-04-22 12:16:56 -0700514 trace_iommu_map_range(va, pa, sg->length,
515 chunk_size);
516
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700517 if (chunk_size == SZ_4K) {
518 sl_4k(&sl_table[sl_offset], pa, pgprot4k);
519 sl_offset++;
520 } else {
521 BUG_ON(sl_offset + 16 > NUM_SL_PTE);
522 sl_64k(&sl_table[sl_offset], pa, pgprot64k);
523 sl_offset += 16;
524 }
525
526
527 offset += chunk_size;
528 chunk_offset += chunk_size;
529 va += chunk_size;
530 pa += chunk_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700531
532 if (chunk_offset >= sg->length && offset < len) {
533 chunk_offset = 0;
534 sg = sg_next(sg);
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700535 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700536 }
537 }
538
539 clean_pte(sl_table + sl_start, sl_table + sl_offset,
540 pt->redirect);
541 fl_pte++;
542 sl_offset = 0;
543 }
544
545fail:
Jeremy Gebben4b1bfae2013-04-19 13:45:03 -0600546 if (ret && offset > 0)
547 msm_iommu_pagetable_unmap_range(pt, start_va, offset);
548
Steve Mucklef132c6c2012-06-06 18:30:57 -0700549 return ret;
550}
551
Olav Haugan090614f2013-03-22 12:14:18 -0700552void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700553 unsigned int len)
554{
555 unsigned int offset = 0;
556 unsigned long *fl_pte;
557 unsigned long fl_offset;
558 unsigned long *sl_table;
559 unsigned long sl_start, sl_end;
560 int used, i;
561
562 BUG_ON(len & (SZ_4K - 1));
563
564 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
565 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
566
Steve Mucklef132c6c2012-06-06 18:30:57 -0700567 while (offset < len) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700568 if (*fl_pte & FL_TYPE_TABLE) {
569 sl_start = SL_OFFSET(va);
570 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
571 sl_end = ((len - offset) / SZ_4K) + sl_start;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700572
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700573 if (sl_end > NUM_SL_PTE)
574 sl_end = NUM_SL_PTE;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700575
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700576 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
577 clean_pte(sl_table + sl_start, sl_table + sl_end,
578 pt->redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700579
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700580 offset += (sl_end - sl_start) * SZ_4K;
581 va += (sl_end - sl_start) * SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700582
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700583 /* Unmap and free the 2nd level table if all mappings
584 * in it were removed. This saves memory, but the table
585 * will need to be re-allocated the next time someone
586 * tries to map these VAs.
587 */
588 used = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700589
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700590 /* If we just unmapped the whole table, don't bother
591 * seeing if there are still used entries left.
592 */
593 if (sl_end - sl_start != NUM_SL_PTE)
594 for (i = 0; i < NUM_SL_PTE; i++)
595 if (sl_table[i]) {
596 used = 1;
597 break;
598 }
599 if (!used) {
600 free_page((unsigned long)sl_table);
601 *fl_pte = 0;
602
603 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
604 }
605
606 sl_start = 0;
607 } else {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700608 *fl_pte = 0;
609 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700610 va += SZ_1M;
611 offset += SZ_1M;
612 sl_start = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700613 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700614 fl_pte++;
615 }
616}
617
618static int __init get_tex_class(int icp, int ocp, int mt, int nos)
619{
620 int i = 0;
621 unsigned int prrr = 0;
622 unsigned int nmrr = 0;
623 int c_icp, c_ocp, c_mt, c_nos;
624
625 RCP15_PRRR(prrr);
626 RCP15_NMRR(nmrr);
627
628 for (i = 0; i < NUM_TEX_CLASS; i++) {
629 c_nos = PRRR_NOS(prrr, i);
630 c_mt = PRRR_MT(prrr, i);
631 c_icp = NMRR_ICP(nmrr, i);
632 c_ocp = NMRR_OCP(nmrr, i);
633
634 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
635 return i;
636 }
637
638 return -ENODEV;
639}
640
641static void __init setup_iommu_tex_classes(void)
642{
643 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
644 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
645
646 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
647 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
648
649 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
650 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
651
652 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
653 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
654}
655
656void __init msm_iommu_pagetable_init(void)
657{
658 setup_iommu_tex_classes();
659}