blob: b93860eb50a249d46079b2bf704ced7a200e05e5 [file] [log] [blame]
Steve Mucklef132c6c2012-06-06 18:30:57 -07001/* Copyright (c) 2012 Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/io.h>
17#include <linux/iommu.h>
18#include <linux/scatterlist.h>
19
20#include <asm/cacheflush.h>
21
22#include <mach/iommu.h>
23#include "msm_iommu_pagetable.h"
24
25/* Sharability attributes of MSM IOMMU mappings */
26#define MSM_IOMMU_ATTR_NON_SH 0x0
27#define MSM_IOMMU_ATTR_SH 0x4
28
29/* Cacheability attributes of MSM IOMMU mappings */
30#define MSM_IOMMU_ATTR_NONCACHED 0x0
31#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
32#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
33#define MSM_IOMMU_ATTR_CACHED_WT 0x3
34
35static int msm_iommu_tex_class[4];
36
37static inline void clean_pte(unsigned long *start, unsigned long *end,
38 int redirect)
39{
40 if (!redirect)
41 dmac_flush_range(start, end);
42}
43
44int msm_iommu_pagetable_alloc(struct iommu_pt *pt)
45{
46 pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
47 get_order(SZ_16K));
48 if (!pt->fl_table)
49 return -ENOMEM;
50
51 memset(pt->fl_table, 0, SZ_16K);
52 clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect);
53
54 return 0;
55}
56
57void msm_iommu_pagetable_free(struct iommu_pt *pt)
58{
59 unsigned long *fl_table;
60 int i;
61
62 fl_table = pt->fl_table;
63 for (i = 0; i < NUM_FL_PTE; i++)
64 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
65 free_page((unsigned long) __va(((fl_table[i]) &
66 FL_BASE_MASK)));
67 free_pages((unsigned long)fl_table, get_order(SZ_16K));
68 pt->fl_table = 0;
69}
70
71static int __get_pgprot(int prot, int len)
72{
73 unsigned int pgprot;
74 int tex;
75
76 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
77 prot |= IOMMU_READ | IOMMU_WRITE;
78 WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
79 }
80
81 if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
82 prot |= IOMMU_READ;
83 WARN_ONCE(1, "Write-only unsupported; falling back to RW\n");
84 }
85
86 if (prot & IOMMU_CACHE)
87 tex = (pgprot_kernel >> 2) & 0x07;
88 else
89 tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
90
91 if (tex < 0 || tex > NUM_TEX_CLASS - 1)
92 return 0;
93
94 if (len == SZ_16M || len == SZ_1M) {
95 pgprot = FL_SHARED;
96 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
97 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
98 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
99 pgprot |= FL_AP0 | FL_AP1;
100 pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
101 } else {
102 pgprot = SL_SHARED;
103 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
104 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
105 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
106 pgprot |= SL_AP0 | SL_AP1;
107 pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
108 }
109
110 return pgprot;
111}
112
113int msm_iommu_pagetable_map(struct iommu_pt *pt, unsigned long va,
114 phys_addr_t pa, size_t len, int prot)
115{
116 unsigned long *fl_pte;
117 unsigned long fl_offset;
118 unsigned long *sl_table;
119 unsigned long *sl_pte;
120 unsigned long sl_offset;
121 unsigned int pgprot;
122 int ret = 0;
123
124 if (len != SZ_16M && len != SZ_1M &&
125 len != SZ_64K && len != SZ_4K) {
126 pr_debug("Bad size: %d\n", len);
127 ret = -EINVAL;
128 goto fail;
129 }
130
131 if (!pt->fl_table) {
132 pr_debug("Null page table\n");
133 ret = -EINVAL;
134 goto fail;
135 }
136
137 pgprot = __get_pgprot(prot, len);
138 if (!pgprot) {
139 ret = -EINVAL;
140 goto fail;
141 }
142
143 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
144 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
145
146 if (len == SZ_16M) {
147 int i = 0;
148
149 for (i = 0; i < 16; i++)
150 if (*(fl_pte+i)) {
151 ret = -EBUSY;
152 goto fail;
153 }
154
155 for (i = 0; i < 16; i++)
156 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
157 FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
158 clean_pte(fl_pte, fl_pte + 16, pt->redirect);
159 }
160
161 if (len == SZ_1M) {
162 if (*fl_pte) {
163 ret = -EBUSY;
164 goto fail;
165 }
166
167 *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT
168 | FL_SHARED | pgprot;
169 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
170 }
171
172 /* Need a 2nd level table */
173 if (len == SZ_4K || len == SZ_64K) {
174
175 if (*fl_pte == 0) {
176 unsigned long *sl;
177 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
178 get_order(SZ_4K));
179
180 if (!sl) {
181 pr_debug("Could not allocate second level table\n");
182 ret = -ENOMEM;
183 goto fail;
184 }
185 memset(sl, 0, SZ_4K);
186 clean_pte(sl, sl + NUM_SL_PTE, pt->redirect);
187
188 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
189 FL_TYPE_TABLE);
190 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
191 }
192
193 if (!(*fl_pte & FL_TYPE_TABLE)) {
194 ret = -EBUSY;
195 goto fail;
196 }
197 }
198
199 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
200 sl_offset = SL_OFFSET(va);
201 sl_pte = sl_table + sl_offset;
202
203 if (len == SZ_4K) {
204 if (*sl_pte) {
205 ret = -EBUSY;
206 goto fail;
207 }
208
209 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
210 | SL_TYPE_SMALL | pgprot;
211 clean_pte(sl_pte, sl_pte + 1, pt->redirect);
212 }
213
214 if (len == SZ_64K) {
215 int i;
216
217 for (i = 0; i < 16; i++)
218 if (*(sl_pte+i)) {
219 ret = -EBUSY;
220 goto fail;
221 }
222
223 for (i = 0; i < 16; i++)
224 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
225 | SL_SHARED | SL_TYPE_LARGE | pgprot;
226
227 clean_pte(sl_pte, sl_pte + 16, pt->redirect);
228 }
229
230fail:
231 return ret;
232}
233
234size_t msm_iommu_pagetable_unmap(struct iommu_pt *pt, unsigned long va,
235 size_t len)
236{
237 unsigned long *fl_pte;
238 unsigned long fl_offset;
239 unsigned long *sl_table;
240 unsigned long *sl_pte;
241 unsigned long sl_offset;
242 int i, ret = 0;
243
244 if (len != SZ_16M && len != SZ_1M &&
245 len != SZ_64K && len != SZ_4K) {
246 pr_debug("Bad length: %d\n", len);
247 ret = -EINVAL;
248 goto fail;
249 }
250
251 if (!pt->fl_table) {
252 pr_debug("Null page table\n");
253 ret = -EINVAL;
254 goto fail;
255 }
256
257 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
258 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
259
260 if (*fl_pte == 0) {
261 pr_debug("First level PTE is 0\n");
262 ret = -ENODEV;
263 goto fail;
264 }
265
266 /* Unmap supersection */
267 if (len == SZ_16M) {
268 for (i = 0; i < 16; i++)
269 *(fl_pte+i) = 0;
270
271 clean_pte(fl_pte, fl_pte + 16, pt->redirect);
272 }
273
274 if (len == SZ_1M) {
275 *fl_pte = 0;
276 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
277 }
278
279 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
280 sl_offset = SL_OFFSET(va);
281 sl_pte = sl_table + sl_offset;
282
283 if (len == SZ_64K) {
284 for (i = 0; i < 16; i++)
285 *(sl_pte+i) = 0;
286
287 clean_pte(sl_pte, sl_pte + 16, pt->redirect);
288 }
289
290 if (len == SZ_4K) {
291 *sl_pte = 0;
292 clean_pte(sl_pte, sl_pte + 1, pt->redirect);
293 }
294
295 if (len == SZ_4K || len == SZ_64K) {
296 int used = 0;
297
298 for (i = 0; i < NUM_SL_PTE; i++)
299 if (sl_table[i])
300 used = 1;
301 if (!used) {
302 free_page((unsigned long)sl_table);
303 *fl_pte = 0;
304 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
305 }
306 }
307
308fail:
309 return ret;
310}
311
312static unsigned int get_phys_addr(struct scatterlist *sg)
313{
314 /*
315 * Try sg_dma_address first so that we can
316 * map carveout regions that do not have a
317 * struct page associated with them.
318 */
319 unsigned int pa = sg_dma_address(sg);
320 if (pa == 0)
321 pa = sg_phys(sg);
322 return pa;
323}
324
325int msm_iommu_pagetable_map_range(struct iommu_pt *pt, unsigned int va,
326 struct scatterlist *sg, unsigned int len, int prot)
327{
328 unsigned int pa;
329 unsigned int offset = 0;
330 unsigned int pgprot;
331 unsigned long *fl_pte;
332 unsigned long fl_offset;
333 unsigned long *sl_table;
334 unsigned long sl_offset, sl_start;
335 unsigned int chunk_offset = 0;
336 unsigned int chunk_pa;
337 int ret = 0;
338
339 BUG_ON(len & (SZ_4K - 1));
340
341 pgprot = __get_pgprot(prot, SZ_4K);
342 if (!pgprot) {
343 ret = -EINVAL;
344 goto fail;
345 }
346
347 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
348 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
349
350 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
351 sl_offset = SL_OFFSET(va);
352
353 chunk_pa = get_phys_addr(sg);
354 if (chunk_pa == 0) {
355 pr_debug("No dma address for sg %p\n", sg);
356 ret = -EINVAL;
357 goto fail;
358 }
359
360 while (offset < len) {
361 /* Set up a 2nd level page table if one doesn't exist */
362 if (*fl_pte == 0) {
363 sl_table = (unsigned long *)
364 __get_free_pages(GFP_KERNEL, get_order(SZ_4K));
365
366 if (!sl_table) {
367 pr_debug("Could not allocate second level table\n");
368 ret = -ENOMEM;
369 goto fail;
370 }
371
372 memset(sl_table, 0, SZ_4K);
373 clean_pte(sl_table, sl_table + NUM_SL_PTE,
374 pt->redirect);
375
376 *fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
377 FL_TYPE_TABLE);
378 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
379 } else
380 sl_table = (unsigned long *)
381 __va(((*fl_pte) & FL_BASE_MASK));
382
383 /* Keep track of initial position so we
384 * don't clean more than we have to
385 */
386 sl_start = sl_offset;
387
388 /* Build the 2nd level page table */
389 while (offset < len && sl_offset < NUM_SL_PTE) {
390 pa = chunk_pa + chunk_offset;
391 sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
392 pgprot | SL_NG | SL_SHARED | SL_TYPE_SMALL;
393 sl_offset++;
394 offset += SZ_4K;
395
396 chunk_offset += SZ_4K;
397
398 if (chunk_offset >= sg->length && offset < len) {
399 chunk_offset = 0;
400 sg = sg_next(sg);
401 chunk_pa = get_phys_addr(sg);
402 if (chunk_pa == 0) {
403 pr_debug("No dma address for sg %p\n",
404 sg);
405 ret = -EINVAL;
406 goto fail;
407 }
408 }
409 }
410
411 clean_pte(sl_table + sl_start, sl_table + sl_offset,
412 pt->redirect);
413 fl_pte++;
414 sl_offset = 0;
415 }
416
417fail:
418 return ret;
419}
420
421void msm_iommu_pagetable_unmap_range(struct iommu_pt *pt, unsigned int va,
422 unsigned int len)
423{
424 unsigned int offset = 0;
425 unsigned long *fl_pte;
426 unsigned long fl_offset;
427 unsigned long *sl_table;
428 unsigned long sl_start, sl_end;
429 int used, i;
430
431 BUG_ON(len & (SZ_4K - 1));
432
433 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
434 fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
435
436 sl_start = SL_OFFSET(va);
437
438 while (offset < len) {
439 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
440 sl_end = ((len - offset) / SZ_4K) + sl_start;
441
442 if (sl_end > NUM_SL_PTE)
443 sl_end = NUM_SL_PTE;
444
445 memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
446 clean_pte(sl_table + sl_start, sl_table + sl_end,
447 pt->redirect);
448
449 offset += (sl_end - sl_start) * SZ_4K;
450
451 /* Unmap and free the 2nd level table if all mappings in it
452 * were removed. This saves memory, but the table will need
453 * to be re-allocated the next time someone tries to map these
454 * VAs.
455 */
456 used = 0;
457
458 /* If we just unmapped the whole table, don't bother
459 * seeing if there are still used entries left.
460 */
461 if (sl_end - sl_start != NUM_SL_PTE)
462 for (i = 0; i < NUM_SL_PTE; i++)
463 if (sl_table[i]) {
464 used = 1;
465 break;
466 }
467 if (!used) {
468 free_page((unsigned long)sl_table);
469 *fl_pte = 0;
470 clean_pte(fl_pte, fl_pte + 1, pt->redirect);
471 }
472
473 sl_start = 0;
474 fl_pte++;
475 }
476}
477
478static int __init get_tex_class(int icp, int ocp, int mt, int nos)
479{
480 int i = 0;
481 unsigned int prrr = 0;
482 unsigned int nmrr = 0;
483 int c_icp, c_ocp, c_mt, c_nos;
484
485 RCP15_PRRR(prrr);
486 RCP15_NMRR(nmrr);
487
488 for (i = 0; i < NUM_TEX_CLASS; i++) {
489 c_nos = PRRR_NOS(prrr, i);
490 c_mt = PRRR_MT(prrr, i);
491 c_icp = NMRR_ICP(nmrr, i);
492 c_ocp = NMRR_OCP(nmrr, i);
493
494 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
495 return i;
496 }
497
498 return -ENODEV;
499}
500
501static void __init setup_iommu_tex_classes(void)
502{
503 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
504 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
505
506 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
507 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
508
509 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
510 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
511
512 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
513 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
514}
515
516void __init msm_iommu_pagetable_init(void)
517{
518 setup_iommu_tex_classes();
519}