blob: 99841cd94bf21796a7cf536bd084c02999e30760 [file] [log] [blame]
Kevin Matlagefbcd3112013-02-01 12:41:04 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Steve Mucklef132c6c2012-06-06 18:30:57 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/io.h>
17#include <linux/iommu.h>
18#include <linux/scatterlist.h>
19
20#include <asm/cacheflush.h>
21
22#include <mach/iommu.h>
23#include "msm_iommu_pagetable.h"
24
25/* Sharability attributes of MSM IOMMU mappings */
26#define MSM_IOMMU_ATTR_NON_SH 0x0
27#define MSM_IOMMU_ATTR_SH 0x4
28
29/* Cacheability attributes of MSM IOMMU mappings */
30#define MSM_IOMMU_ATTR_NONCACHED 0x0
31#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
32#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
33#define MSM_IOMMU_ATTR_CACHED_WT 0x3
34
35static int msm_iommu_tex_class[4];
36
37static inline void clean_pte(unsigned long *start, unsigned long *end,
38 int redirect)
39{
40 if (!redirect)
41 dmac_flush_range(start, end);
42}
43
44int msm_iommu_pagetable_alloc(struct iommu_pt *pt)
45{
46 pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
47 get_order(SZ_16K));
48 if (!pt->fl_table)
49 return -ENOMEM;
50
51 memset(pt->fl_table, 0, SZ_16K);
52 clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect);
53
54 return 0;
55}
56
57void msm_iommu_pagetable_free(struct iommu_pt *pt)
58{
59 unsigned long *fl_table;
60 int i;
61
62 fl_table = pt->fl_table;
63 for (i = 0; i < NUM_FL_PTE; i++)
64 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
65 free_page((unsigned long) __va(((fl_table[i]) &
66 FL_BASE_MASK)));
67 free_pages((unsigned long)fl_table, get_order(SZ_16K));
68 pt->fl_table = 0;
69}
70
71static int __get_pgprot(int prot, int len)
72{
73 unsigned int pgprot;
74 int tex;
75
76 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
77 prot |= IOMMU_READ | IOMMU_WRITE;
78 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
79 }
80
81 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
82 prot |= IOMMU_READ;
83 WARN_ONCE(1, "Write-only unsupported; falling back to RW\n");
84 }
85
86 if (prot & IOMMU_CACHE)
87 tex = (pgprot_kernel >> 2) & 0x07;
88 else
89 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
90
91 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
92 return 0;
93
94 if (len == SZ_16M || len == SZ_1M) {
95 pgprot = FL_SHARED;
96 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
97 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
98 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
99 pgprot |= FL_AP0 | FL_AP1;
100 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
101 } else {
102 pgprot = SL_SHARED;
103 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
104 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
105 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
106 pgprot |= SL_AP0 | SL_AP1;
107 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
108 }
109
110 return pgprot;
111}
112
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700113static unsigned long *make_second_level(struct iommu_pt *pt,
114 unsigned long *fl_pte)
115{
116 unsigned long *sl;
117 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
118 get_order(SZ_4K));
119
120 if (!sl) {
121 pr_debug("Could not allocate second level table\n");
122 goto fail;
123 }
124 memset(sl, 0, SZ_4K);
125 clean_pte(sl, sl + NUM_SL_PTE, pt->redirect);
126
127 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
128 FL_TYPE_TABLE);
129
130 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
131fail:
132 return sl;
133}
134
135static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
136{
137 int ret = 0;
138
139 if (*sl_pte) {
140 ret = -EBUSY;
141 goto fail;
142 }
143
144 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
145 | SL_TYPE_SMALL | pgprot;
146fail:
147 return ret;
148}
149
150static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
151{
152 int ret = 0;
153
154 int i;
155
156 for (i = 0; i < 16; i++)
157 if (*(sl_pte+i)) {
158 ret = -EBUSY;
159 goto fail;
160 }
161
162 for (i = 0; i < 16; i++)
163 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
164 | SL_SHARED | SL_TYPE_LARGE | pgprot;
165
166fail:
167 return ret;
168}
169
170static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
171{
172 if (*fl_pte)
173 return -EBUSY;
174
175 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
176 | pgprot;
177
178 return 0;
179}
180
181static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
182{
183 int i;
184 int ret = 0;
185 for (i = 0; i < 16; i++)
186 if (*(fl_pte+i)) {
187 ret = -EBUSY;
188 goto fail;
189 }
190 for (i = 0; i < 16; i++)
191 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
192 | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
193fail:
194 return ret;
195}
196
Steve Mucklef132c6c2012-06-06 18:30:57 -0700197int msm_iommu_pagetable_map(struct iommu_pt *pt, unsigned long va,
198 phys_addr_t pa, size_t len, int prot)
199{
200 unsigned long *fl_pte;
201 unsigned long fl_offset;
202 unsigned long *sl_table;
203 unsigned long *sl_pte;
204 unsigned long sl_offset;
205 unsigned int pgprot;
206 int ret = 0;
207
208 if (len != SZ_16M && len != SZ_1M &&
209 len != SZ_64K && len != SZ_4K) {
210 pr_debug("Bad size: %d\n", len);
211 ret = -EINVAL;
212 goto fail;
213 }
214
215 if (!pt->fl_table) {
216 pr_debug("Null page table\n");
217 ret = -EINVAL;
218 goto fail;
219 }
220
221 pgprot = __get_pgprot(prot, len);
222 if (!pgprot) {
223 ret = -EINVAL;
224 goto fail;
225 }
226
227 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
228 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
229
230 if (len == SZ_16M) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700231 ret = fl_16m(fl_pte, pa, pgprot);
232 if (ret)
233 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700234 clean_pte(fl_pte, fl_pte + 16, pt->redirect);
235 }
236
237 if (len == SZ_1M) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700238 ret = fl_1m(fl_pte, pa, pgprot);
239 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700240 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700241 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
242 }
243
244 /* Need a 2nd level table */
245 if (len == SZ_4K || len == SZ_64K) {
246
247 if (*fl_pte == 0) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700248 if (make_second_level(pt, fl_pte) == NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700249 ret = -ENOMEM;
250 goto fail;
251 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700252 }
253
254 if (!(*fl_pte & FL_TYPE_TABLE)) {
255 ret = -EBUSY;
256 goto fail;
257 }
258 }
259
260 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
261 sl_offset = SL_OFFSET(va);
262 sl_pte = sl_table + sl_offset;
263
264 if (len == SZ_4K) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700265 ret = sl_4k(sl_pte, pa, pgprot);
266 if (ret)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700267 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700268 clean_pte(sl_pte, sl_pte + 1, pt->redirect);
269 }
270
271 if (len == SZ_64K) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700272 ret = sl_64k(sl_pte, pa, pgprot);
273 if (ret)
274 goto fail;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700275 clean_pte(sl_pte, sl_pte + 16, pt->redirect);
276 }
277
278fail:
279 return ret;
280}
281
282size_t msm_iommu_pagetable_unmap(struct iommu_pt *pt, unsigned long va,
283 size_t len)
284{
285 unsigned long *fl_pte;
286 unsigned long fl_offset;
287 unsigned long *sl_table;
288 unsigned long *sl_pte;
289 unsigned long sl_offset;
290 int i, ret = 0;
291
292 if (len != SZ_16M && len != SZ_1M &&
293 len != SZ_64K && len != SZ_4K) {
294 pr_debug("Bad length: %d\n", len);
295 ret = -EINVAL;
296 goto fail;
297 }
298
299 if (!pt->fl_table) {
300 pr_debug("Null page table\n");
301 ret = -EINVAL;
302 goto fail;
303 }
304
305 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
306 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
307
308 if (*fl_pte == 0) {
309 pr_debug("First level PTE is 0\n");
310 ret = -ENODEV;
311 goto fail;
312 }
313
314 /* Unmap supersection */
315 if (len == SZ_16M) {
316 for (i = 0; i < 16; i++)
317 *(fl_pte+i) = 0;
318
319 clean_pte(fl_pte, fl_pte + 16, pt->redirect);
320 }
321
322 if (len == SZ_1M) {
323 *fl_pte = 0;
324 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
325 }
326
327 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
328 sl_offset = SL_OFFSET(va);
329 sl_pte = sl_table + sl_offset;
330
331 if (len == SZ_64K) {
332 for (i = 0; i < 16; i++)
333 *(sl_pte+i) = 0;
334
335 clean_pte(sl_pte, sl_pte + 16, pt->redirect);
336 }
337
338 if (len == SZ_4K) {
339 *sl_pte = 0;
340 clean_pte(sl_pte, sl_pte + 1, pt->redirect);
341 }
342
343 if (len == SZ_4K || len == SZ_64K) {
344 int used = 0;
345
346 for (i = 0; i < NUM_SL_PTE; i++)
347 if (sl_table[i])
348 used = 1;
349 if (!used) {
350 free_page((unsigned long)sl_table);
351 *fl_pte = 0;
352 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
353 }
354 }
355
356fail:
357 return ret;
358}
359
360static unsigned int get_phys_addr(struct scatterlist *sg)
361{
362 /*
363 * Try sg_dma_address first so that we can
364 * map carveout regions that do not have a
365 * struct page associated with them.
366 */
367 unsigned int pa = sg_dma_address(sg);
368 if (pa == 0)
369 pa = sg_phys(sg);
370 return pa;
371}
372
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700373static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
374 int align)
375{
376 return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
377 && (len >= align);
378}
379
Steve Mucklef132c6c2012-06-06 18:30:57 -0700380int msm_iommu_pagetable_map_range(struct iommu_pt *pt, unsigned int va,
381 struct scatterlist *sg, unsigned int len, int prot)
382{
383 unsigned int pa;
384 unsigned int offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700385 unsigned long *fl_pte;
386 unsigned long fl_offset;
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700387 unsigned long *sl_table = NULL;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700388 unsigned long sl_offset, sl_start;
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700389 unsigned int chunk_size, chunk_offset = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700390 int ret = 0;
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700391 unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700392
393 BUG_ON(len & (SZ_4K - 1));
394
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700395 pgprot4k = __get_pgprot(prot, SZ_4K);
396 pgprot64k = __get_pgprot(prot, SZ_64K);
397 pgprot1m = __get_pgprot(prot, SZ_1M);
398 pgprot16m = __get_pgprot(prot, SZ_16M);
399 if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700400 ret = -EINVAL;
401 goto fail;
402 }
403
404 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
405 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700406 pa = get_phys_addr(sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700407
408 while (offset < len) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700409 chunk_size = SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700410
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700411 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
412 SZ_16M))
413 chunk_size = SZ_16M;
414 else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
415 SZ_1M))
416 chunk_size = SZ_1M;
417 /* 64k or 4k determined later */
418
419 /* for 1M and 16M, only first level entries are required */
420 if (chunk_size >= SZ_1M) {
421 if (chunk_size == SZ_16M) {
422 ret = fl_16m(fl_pte, pa, pgprot16m);
423 if (ret)
424 goto fail;
425 clean_pte(fl_pte, fl_pte + 16, pt->redirect);
426 fl_pte += 16;
427 } else if (chunk_size == SZ_1M) {
428 ret = fl_1m(fl_pte, pa, pgprot1m);
429 if (ret)
430 goto fail;
431 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
432 fl_pte++;
433 }
434
435 offset += chunk_size;
436 chunk_offset += chunk_size;
437 va += chunk_size;
438 pa += chunk_size;
439
440 if (chunk_offset >= sg->length && offset < len) {
441 chunk_offset = 0;
442 sg = sg_next(sg);
443 pa = get_phys_addr(sg);
444 if (pa == 0) {
445 pr_debug("No dma address for sg %p\n",
446 sg);
447 ret = -EINVAL;
448 goto fail;
449 }
450 }
451 continue;
452 }
453 /* for 4K or 64K, make sure there is a second level table */
454 if (*fl_pte == 0) {
455 if (!make_second_level(pt, fl_pte)) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700456 ret = -ENOMEM;
457 goto fail;
458 }
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700459 }
460 if (!(*fl_pte & FL_TYPE_TABLE)) {
461 ret = -EBUSY;
462 goto fail;
463 }
464 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
465 sl_offset = SL_OFFSET(va);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700466 /* Keep track of initial position so we
467 * don't clean more than we have to
468 */
469 sl_start = sl_offset;
470
471 /* Build the 2nd level page table */
472 while (offset < len && sl_offset < NUM_SL_PTE) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700473 /* Map a large 64K page if the chunk is large enough and
474 * the pa and va are aligned
475 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700476
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700477 if (is_fully_aligned(va, pa, sg->length - chunk_offset,
478 SZ_64K))
479 chunk_size = SZ_64K;
480 else
481 chunk_size = SZ_4K;
482
483 if (chunk_size == SZ_4K) {
484 sl_4k(&sl_table[sl_offset], pa, pgprot4k);
485 sl_offset++;
486 } else {
487 BUG_ON(sl_offset + 16 > NUM_SL_PTE);
488 sl_64k(&sl_table[sl_offset], pa, pgprot64k);
489 sl_offset += 16;
490 }
491
492
493 offset += chunk_size;
494 chunk_offset += chunk_size;
495 va += chunk_size;
496 pa += chunk_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700497
498 if (chunk_offset >= sg->length && offset < len) {
499 chunk_offset = 0;
500 sg = sg_next(sg);
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700501 pa = get_phys_addr(sg);
502 if (pa == 0) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700503 pr_debug("No dma address for sg %p\n",
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700504 sg);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700505 ret = -EINVAL;
506 goto fail;
507 }
508 }
509 }
510
511 clean_pte(sl_table + sl_start, sl_table + sl_offset,
512 pt->redirect);
513 fl_pte++;
514 sl_offset = 0;
515 }
516
517fail:
518 return ret;
519}
520
521void msm_iommu_pagetable_unmap_range(struct iommu_pt *pt, unsigned int va,
522 unsigned int len)
523{
524 unsigned int offset = 0;
525 unsigned long *fl_pte;
526 unsigned long fl_offset;
527 unsigned long *sl_table;
528 unsigned long sl_start, sl_end;
529 int used, i;
530
531 BUG_ON(len & (SZ_4K - 1));
532
533 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
534 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
535
Steve Mucklef132c6c2012-06-06 18:30:57 -0700536 while (offset < len) {
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700537 if (*fl_pte & FL_TYPE_TABLE) {
538 sl_start = SL_OFFSET(va);
539 sl_table = __va(((*fl_pte) & FL_BASE_MASK));
540 sl_end = ((len - offset) / SZ_4K) + sl_start;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700541
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700542 if (sl_end > NUM_SL_PTE)
543 sl_end = NUM_SL_PTE;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700544
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700545 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
546 clean_pte(sl_table + sl_start, sl_table + sl_end,
547 pt->redirect);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700548
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700549 offset += (sl_end - sl_start) * SZ_4K;
550 va += (sl_end - sl_start) * SZ_4K;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700551
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700552 /* Unmap and free the 2nd level table if all mappings
553 * in it were removed. This saves memory, but the table
554 * will need to be re-allocated the next time someone
555 * tries to map these VAs.
556 */
557 used = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700558
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700559 /* If we just unmapped the whole table, don't bother
560 * seeing if there are still used entries left.
561 */
562 if (sl_end - sl_start != NUM_SL_PTE)
563 for (i = 0; i < NUM_SL_PTE; i++)
564 if (sl_table[i]) {
565 used = 1;
566 break;
567 }
568 if (!used) {
569 free_page((unsigned long)sl_table);
570 *fl_pte = 0;
571
572 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
573 }
574
575 sl_start = 0;
576 } else {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700577 *fl_pte = 0;
578 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
Kevin Matlagefbcd3112013-02-01 12:41:04 -0700579 va += SZ_1M;
580 offset += SZ_1M;
581 sl_start = 0;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700582 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700583 fl_pte++;
584 }
585}
586
587static int __init get_tex_class(int icp, int ocp, int mt, int nos)
588{
589 int i = 0;
590 unsigned int prrr = 0;
591 unsigned int nmrr = 0;
592 int c_icp, c_ocp, c_mt, c_nos;
593
594 RCP15_PRRR(prrr);
595 RCP15_NMRR(nmrr);
596
597 for (i = 0; i < NUM_TEX_CLASS; i++) {
598 c_nos = PRRR_NOS(prrr, i);
599 c_mt = PRRR_MT(prrr, i);
600 c_icp = NMRR_ICP(nmrr, i);
601 c_ocp = NMRR_OCP(nmrr, i);
602
603 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
604 return i;
605 }
606
607 return -ENODEV;
608}
609
610static void __init setup_iommu_tex_classes(void)
611{
612 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
613 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
614
615 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
616 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
617
618 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
619 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
620
621 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
622 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
623}
624
625void __init msm_iommu_pagetable_init(void)
626{
627 setup_iommu_tex_classes();
628}