Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/errno.h> |
| 16 | #include <linux/io.h> |
| 17 | #include <linux/iommu.h> |
| 18 | #include <linux/scatterlist.h> |
| 19 | |
| 20 | #include <asm/cacheflush.h> |
| 21 | |
| 22 | #include <mach/iommu.h> |
Olav Haugan | 090614f | 2013-03-22 12:14:18 -0700 | [diff] [blame] | 23 | #include <mach/msm_iommu_priv.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 24 | #include "msm_iommu_pagetable.h" |
| 25 | |
| 26 | /* Sharability attributes of MSM IOMMU mappings */ |
| 27 | #define MSM_IOMMU_ATTR_NON_SH 0x0 |
| 28 | #define MSM_IOMMU_ATTR_SH 0x4 |
| 29 | |
| 30 | /* Cacheability attributes of MSM IOMMU mappings */ |
| 31 | #define MSM_IOMMU_ATTR_NONCACHED 0x0 |
| 32 | #define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1 |
| 33 | #define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2 |
| 34 | #define MSM_IOMMU_ATTR_CACHED_WT 0x3 |
| 35 | |
| 36 | static int msm_iommu_tex_class[4]; |
| 37 | |
| 38 | static inline void clean_pte(unsigned long *start, unsigned long *end, |
| 39 | int redirect) |
| 40 | { |
| 41 | if (!redirect) |
| 42 | dmac_flush_range(start, end); |
| 43 | } |
| 44 | |
Olav Haugan | 090614f | 2013-03-22 12:14:18 -0700 | [diff] [blame] | 45 | int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt) |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 46 | { |
| 47 | pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL, |
| 48 | get_order(SZ_16K)); |
| 49 | if (!pt->fl_table) |
| 50 | return -ENOMEM; |
| 51 | |
| 52 | memset(pt->fl_table, 0, SZ_16K); |
| 53 | clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect); |
| 54 | |
| 55 | return 0; |
| 56 | } |
| 57 | |
Olav Haugan | 090614f | 2013-03-22 12:14:18 -0700 | [diff] [blame] | 58 | void msm_iommu_pagetable_free(struct msm_iommu_pt *pt) |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 59 | { |
| 60 | unsigned long *fl_table; |
| 61 | int i; |
| 62 | |
| 63 | fl_table = pt->fl_table; |
| 64 | for (i = 0; i < NUM_FL_PTE; i++) |
| 65 | if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) |
| 66 | free_page((unsigned long) __va(((fl_table[i]) & |
| 67 | FL_BASE_MASK))); |
| 68 | free_pages((unsigned long)fl_table, get_order(SZ_16K)); |
| 69 | pt->fl_table = 0; |
| 70 | } |
| 71 | |
| 72 | static int __get_pgprot(int prot, int len) |
| 73 | { |
| 74 | unsigned int pgprot; |
| 75 | int tex; |
| 76 | |
| 77 | if (!(prot & (IOMMU_READ | IOMMU_WRITE))) { |
| 78 | prot |= IOMMU_READ | IOMMU_WRITE; |
| 79 | WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n"); |
| 80 | } |
| 81 | |
| 82 | if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) { |
| 83 | prot |= IOMMU_READ; |
| 84 | WARN_ONCE(1, "Write-only unsupported; falling back to RW\n"); |
| 85 | } |
| 86 | |
| 87 | if (prot & IOMMU_CACHE) |
| 88 | tex = (pgprot_kernel >> 2) & 0x07; |
| 89 | else |
| 90 | tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED]; |
| 91 | |
| 92 | if (tex < 0 || tex > NUM_TEX_CLASS - 1) |
| 93 | return 0; |
| 94 | |
| 95 | if (len == SZ_16M || len == SZ_1M) { |
| 96 | pgprot = FL_SHARED; |
| 97 | pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; |
| 98 | pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; |
| 99 | pgprot |= tex & 0x04 ? FL_TEX0 : 0; |
| 100 | pgprot |= FL_AP0 | FL_AP1; |
| 101 | pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2; |
| 102 | } else { |
| 103 | pgprot = SL_SHARED; |
| 104 | pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; |
| 105 | pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; |
| 106 | pgprot |= tex & 0x04 ? SL_TEX0 : 0; |
| 107 | pgprot |= SL_AP0 | SL_AP1; |
| 108 | pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2; |
| 109 | } |
| 110 | |
| 111 | return pgprot; |
| 112 | } |
| 113 | |
Olav Haugan | 090614f | 2013-03-22 12:14:18 -0700 | [diff] [blame] | 114 | static unsigned long *make_second_level(struct msm_iommu_pt *pt, |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 115 | unsigned long *fl_pte) |
| 116 | { |
| 117 | unsigned long *sl; |
| 118 | sl = (unsigned long *) __get_free_pages(GFP_KERNEL, |
| 119 | get_order(SZ_4K)); |
| 120 | |
| 121 | if (!sl) { |
| 122 | pr_debug("Could not allocate second level table\n"); |
| 123 | goto fail; |
| 124 | } |
| 125 | memset(sl, 0, SZ_4K); |
| 126 | clean_pte(sl, sl + NUM_SL_PTE, pt->redirect); |
| 127 | |
| 128 | *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \ |
| 129 | FL_TYPE_TABLE); |
| 130 | |
| 131 | clean_pte(fl_pte, fl_pte + 1, pt->redirect); |
| 132 | fail: |
| 133 | return sl; |
| 134 | } |
| 135 | |
| 136 | static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot) |
| 137 | { |
| 138 | int ret = 0; |
| 139 | |
| 140 | if (*sl_pte) { |
| 141 | ret = -EBUSY; |
| 142 | goto fail; |
| 143 | } |
| 144 | |
| 145 | *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED |
| 146 | | SL_TYPE_SMALL | pgprot; |
| 147 | fail: |
| 148 | return ret; |
| 149 | } |
| 150 | |
| 151 | static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot) |
| 152 | { |
| 153 | int ret = 0; |
| 154 | |
| 155 | int i; |
| 156 | |
| 157 | for (i = 0; i < 16; i++) |
| 158 | if (*(sl_pte+i)) { |
| 159 | ret = -EBUSY; |
| 160 | goto fail; |
| 161 | } |
| 162 | |
| 163 | for (i = 0; i < 16; i++) |
| 164 | *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG |
| 165 | | SL_SHARED | SL_TYPE_LARGE | pgprot; |
| 166 | |
| 167 | fail: |
| 168 | return ret; |
| 169 | } |
| 170 | |
| 171 | static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot) |
| 172 | { |
| 173 | if (*fl_pte) |
| 174 | return -EBUSY; |
| 175 | |
| 176 | *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED |
| 177 | | pgprot; |
| 178 | |
| 179 | return 0; |
| 180 | } |
| 181 | |
| 182 | static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot) |
| 183 | { |
| 184 | int i; |
| 185 | int ret = 0; |
| 186 | for (i = 0; i < 16; i++) |
| 187 | if (*(fl_pte+i)) { |
| 188 | ret = -EBUSY; |
| 189 | goto fail; |
| 190 | } |
| 191 | for (i = 0; i < 16; i++) |
| 192 | *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
| 193 | | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot; |
| 194 | fail: |
| 195 | return ret; |
| 196 | } |
| 197 | |
Olav Haugan | 090614f | 2013-03-22 12:14:18 -0700 | [diff] [blame] | 198 | int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va, |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 199 | phys_addr_t pa, size_t len, int prot) |
| 200 | { |
| 201 | unsigned long *fl_pte; |
| 202 | unsigned long fl_offset; |
| 203 | unsigned long *sl_table; |
| 204 | unsigned long *sl_pte; |
| 205 | unsigned long sl_offset; |
| 206 | unsigned int pgprot; |
| 207 | int ret = 0; |
| 208 | |
| 209 | if (len != SZ_16M && len != SZ_1M && |
| 210 | len != SZ_64K && len != SZ_4K) { |
| 211 | pr_debug("Bad size: %d\n", len); |
| 212 | ret = -EINVAL; |
| 213 | goto fail; |
| 214 | } |
| 215 | |
| 216 | if (!pt->fl_table) { |
| 217 | pr_debug("Null page table\n"); |
| 218 | ret = -EINVAL; |
| 219 | goto fail; |
| 220 | } |
| 221 | |
| 222 | pgprot = __get_pgprot(prot, len); |
| 223 | if (!pgprot) { |
| 224 | ret = -EINVAL; |
| 225 | goto fail; |
| 226 | } |
| 227 | |
| 228 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ |
| 229 | fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */ |
| 230 | |
| 231 | if (len == SZ_16M) { |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 232 | ret = fl_16m(fl_pte, pa, pgprot); |
| 233 | if (ret) |
| 234 | goto fail; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 235 | clean_pte(fl_pte, fl_pte + 16, pt->redirect); |
| 236 | } |
| 237 | |
| 238 | if (len == SZ_1M) { |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 239 | ret = fl_1m(fl_pte, pa, pgprot); |
| 240 | if (ret) |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 241 | goto fail; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 242 | clean_pte(fl_pte, fl_pte + 1, pt->redirect); |
| 243 | } |
| 244 | |
| 245 | /* Need a 2nd level table */ |
| 246 | if (len == SZ_4K || len == SZ_64K) { |
| 247 | |
| 248 | if (*fl_pte == 0) { |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 249 | if (make_second_level(pt, fl_pte) == NULL) { |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 250 | ret = -ENOMEM; |
| 251 | goto fail; |
| 252 | } |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 253 | } |
| 254 | |
| 255 | if (!(*fl_pte & FL_TYPE_TABLE)) { |
| 256 | ret = -EBUSY; |
| 257 | goto fail; |
| 258 | } |
| 259 | } |
| 260 | |
| 261 | sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); |
| 262 | sl_offset = SL_OFFSET(va); |
| 263 | sl_pte = sl_table + sl_offset; |
| 264 | |
| 265 | if (len == SZ_4K) { |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 266 | ret = sl_4k(sl_pte, pa, pgprot); |
| 267 | if (ret) |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 268 | goto fail; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 269 | clean_pte(sl_pte, sl_pte + 1, pt->redirect); |
| 270 | } |
| 271 | |
| 272 | if (len == SZ_64K) { |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 273 | ret = sl_64k(sl_pte, pa, pgprot); |
| 274 | if (ret) |
| 275 | goto fail; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 276 | clean_pte(sl_pte, sl_pte + 16, pt->redirect); |
| 277 | } |
| 278 | |
| 279 | fail: |
| 280 | return ret; |
| 281 | } |
| 282 | |
Olav Haugan | 090614f | 2013-03-22 12:14:18 -0700 | [diff] [blame] | 283 | size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va, |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 284 | size_t len) |
| 285 | { |
| 286 | unsigned long *fl_pte; |
| 287 | unsigned long fl_offset; |
| 288 | unsigned long *sl_table; |
| 289 | unsigned long *sl_pte; |
| 290 | unsigned long sl_offset; |
| 291 | int i, ret = 0; |
| 292 | |
| 293 | if (len != SZ_16M && len != SZ_1M && |
| 294 | len != SZ_64K && len != SZ_4K) { |
| 295 | pr_debug("Bad length: %d\n", len); |
| 296 | ret = -EINVAL; |
| 297 | goto fail; |
| 298 | } |
| 299 | |
| 300 | if (!pt->fl_table) { |
| 301 | pr_debug("Null page table\n"); |
| 302 | ret = -EINVAL; |
| 303 | goto fail; |
| 304 | } |
| 305 | |
| 306 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ |
| 307 | fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */ |
| 308 | |
| 309 | if (*fl_pte == 0) { |
| 310 | pr_debug("First level PTE is 0\n"); |
| 311 | ret = -ENODEV; |
| 312 | goto fail; |
| 313 | } |
| 314 | |
| 315 | /* Unmap supersection */ |
| 316 | if (len == SZ_16M) { |
| 317 | for (i = 0; i < 16; i++) |
| 318 | *(fl_pte+i) = 0; |
| 319 | |
| 320 | clean_pte(fl_pte, fl_pte + 16, pt->redirect); |
| 321 | } |
| 322 | |
| 323 | if (len == SZ_1M) { |
| 324 | *fl_pte = 0; |
| 325 | clean_pte(fl_pte, fl_pte + 1, pt->redirect); |
| 326 | } |
| 327 | |
| 328 | sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); |
| 329 | sl_offset = SL_OFFSET(va); |
| 330 | sl_pte = sl_table + sl_offset; |
| 331 | |
| 332 | if (len == SZ_64K) { |
| 333 | for (i = 0; i < 16; i++) |
| 334 | *(sl_pte+i) = 0; |
| 335 | |
| 336 | clean_pte(sl_pte, sl_pte + 16, pt->redirect); |
| 337 | } |
| 338 | |
| 339 | if (len == SZ_4K) { |
| 340 | *sl_pte = 0; |
| 341 | clean_pte(sl_pte, sl_pte + 1, pt->redirect); |
| 342 | } |
| 343 | |
| 344 | if (len == SZ_4K || len == SZ_64K) { |
| 345 | int used = 0; |
| 346 | |
| 347 | for (i = 0; i < NUM_SL_PTE; i++) |
| 348 | if (sl_table[i]) |
| 349 | used = 1; |
| 350 | if (!used) { |
| 351 | free_page((unsigned long)sl_table); |
| 352 | *fl_pte = 0; |
| 353 | clean_pte(fl_pte, fl_pte + 1, pt->redirect); |
| 354 | } |
| 355 | } |
| 356 | |
| 357 | fail: |
| 358 | return ret; |
| 359 | } |
| 360 | |
Olav Haugan | 252353a | 2013-03-08 10:50:48 -0800 | [diff] [blame] | 361 | static phys_addr_t get_phys_addr(struct scatterlist *sg) |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 362 | { |
| 363 | /* |
| 364 | * Try sg_dma_address first so that we can |
| 365 | * map carveout regions that do not have a |
| 366 | * struct page associated with them. |
| 367 | */ |
Olav Haugan | 252353a | 2013-03-08 10:50:48 -0800 | [diff] [blame] | 368 | phys_addr_t pa = sg_dma_address(sg); |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 369 | if (pa == 0) |
| 370 | pa = sg_phys(sg); |
| 371 | return pa; |
| 372 | } |
| 373 | |
Jeremy Gebben | 87cc502 | 2013-01-21 14:09:15 -0700 | [diff] [blame] | 374 | static int check_range(unsigned long *fl_table, unsigned int va, |
| 375 | unsigned int len) |
| 376 | { |
| 377 | unsigned int offset = 0; |
| 378 | unsigned long *fl_pte; |
| 379 | unsigned long fl_offset; |
| 380 | unsigned long *sl_table; |
| 381 | unsigned long sl_start, sl_end; |
| 382 | int i; |
| 383 | |
| 384 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ |
| 385 | fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ |
| 386 | |
| 387 | while (offset < len) { |
| 388 | if (*fl_pte & FL_TYPE_TABLE) { |
| 389 | sl_start = SL_OFFSET(va); |
| 390 | sl_table = __va(((*fl_pte) & FL_BASE_MASK)); |
| 391 | sl_end = ((len - offset) / SZ_4K) + sl_start; |
| 392 | |
| 393 | if (sl_end > NUM_SL_PTE) |
| 394 | sl_end = NUM_SL_PTE; |
| 395 | |
| 396 | for (i = sl_start; i < sl_end; i++) { |
| 397 | if (sl_table[i] != 0) { |
| 398 | pr_err("%08x - %08x already mapped\n", |
| 399 | va, va + SZ_4K); |
| 400 | return -EBUSY; |
| 401 | } |
| 402 | offset += SZ_4K; |
| 403 | va += SZ_4K; |
| 404 | } |
| 405 | |
| 406 | |
| 407 | sl_start = 0; |
| 408 | } else { |
| 409 | if (*fl_pte != 0) { |
| 410 | pr_err("%08x - %08x already mapped\n", |
| 411 | va, va + SZ_1M); |
| 412 | return -EBUSY; |
| 413 | } |
| 414 | va += SZ_1M; |
| 415 | offset += SZ_1M; |
| 416 | sl_start = 0; |
| 417 | } |
| 418 | fl_pte++; |
| 419 | } |
| 420 | return 0; |
| 421 | } |
| 422 | |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 423 | static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len, |
| 424 | int align) |
| 425 | { |
| 426 | return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align) |
| 427 | && (len >= align); |
| 428 | } |
| 429 | |
Olav Haugan | 090614f | 2013-03-22 12:14:18 -0700 | [diff] [blame] | 430 | int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va, |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 431 | struct scatterlist *sg, unsigned int len, int prot) |
| 432 | { |
Olav Haugan | 252353a | 2013-03-08 10:50:48 -0800 | [diff] [blame] | 433 | phys_addr_t pa; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 434 | unsigned int offset = 0; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 435 | unsigned long *fl_pte; |
| 436 | unsigned long fl_offset; |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 437 | unsigned long *sl_table = NULL; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 438 | unsigned long sl_offset, sl_start; |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 439 | unsigned int chunk_size, chunk_offset = 0; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 440 | int ret = 0; |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 441 | unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 442 | |
| 443 | BUG_ON(len & (SZ_4K - 1)); |
| 444 | |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 445 | pgprot4k = __get_pgprot(prot, SZ_4K); |
| 446 | pgprot64k = __get_pgprot(prot, SZ_64K); |
| 447 | pgprot1m = __get_pgprot(prot, SZ_1M); |
| 448 | pgprot16m = __get_pgprot(prot, SZ_16M); |
| 449 | if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) { |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 450 | ret = -EINVAL; |
| 451 | goto fail; |
| 452 | } |
| 453 | |
| 454 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ |
| 455 | fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */ |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 456 | pa = get_phys_addr(sg); |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 457 | |
Jeremy Gebben | 87cc502 | 2013-01-21 14:09:15 -0700 | [diff] [blame] | 458 | ret = check_range(pt->fl_table, va, len); |
| 459 | if (ret) |
| 460 | goto fail; |
| 461 | |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 462 | while (offset < len) { |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 463 | chunk_size = SZ_4K; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 464 | |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 465 | if (is_fully_aligned(va, pa, sg->length - chunk_offset, |
| 466 | SZ_16M)) |
| 467 | chunk_size = SZ_16M; |
| 468 | else if (is_fully_aligned(va, pa, sg->length - chunk_offset, |
| 469 | SZ_1M)) |
| 470 | chunk_size = SZ_1M; |
| 471 | /* 64k or 4k determined later */ |
| 472 | |
| 473 | /* for 1M and 16M, only first level entries are required */ |
| 474 | if (chunk_size >= SZ_1M) { |
| 475 | if (chunk_size == SZ_16M) { |
| 476 | ret = fl_16m(fl_pte, pa, pgprot16m); |
| 477 | if (ret) |
| 478 | goto fail; |
| 479 | clean_pte(fl_pte, fl_pte + 16, pt->redirect); |
| 480 | fl_pte += 16; |
| 481 | } else if (chunk_size == SZ_1M) { |
| 482 | ret = fl_1m(fl_pte, pa, pgprot1m); |
| 483 | if (ret) |
| 484 | goto fail; |
| 485 | clean_pte(fl_pte, fl_pte + 1, pt->redirect); |
| 486 | fl_pte++; |
| 487 | } |
| 488 | |
| 489 | offset += chunk_size; |
| 490 | chunk_offset += chunk_size; |
| 491 | va += chunk_size; |
| 492 | pa += chunk_size; |
| 493 | |
| 494 | if (chunk_offset >= sg->length && offset < len) { |
| 495 | chunk_offset = 0; |
| 496 | sg = sg_next(sg); |
| 497 | pa = get_phys_addr(sg); |
| 498 | if (pa == 0) { |
| 499 | pr_debug("No dma address for sg %p\n", |
| 500 | sg); |
| 501 | ret = -EINVAL; |
| 502 | goto fail; |
| 503 | } |
| 504 | } |
| 505 | continue; |
| 506 | } |
| 507 | /* for 4K or 64K, make sure there is a second level table */ |
| 508 | if (*fl_pte == 0) { |
| 509 | if (!make_second_level(pt, fl_pte)) { |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 510 | ret = -ENOMEM; |
| 511 | goto fail; |
| 512 | } |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 513 | } |
| 514 | if (!(*fl_pte & FL_TYPE_TABLE)) { |
| 515 | ret = -EBUSY; |
| 516 | goto fail; |
| 517 | } |
| 518 | sl_table = __va(((*fl_pte) & FL_BASE_MASK)); |
| 519 | sl_offset = SL_OFFSET(va); |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 520 | /* Keep track of initial position so we |
| 521 | * don't clean more than we have to |
| 522 | */ |
| 523 | sl_start = sl_offset; |
| 524 | |
| 525 | /* Build the 2nd level page table */ |
| 526 | while (offset < len && sl_offset < NUM_SL_PTE) { |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 527 | /* Map a large 64K page if the chunk is large enough and |
| 528 | * the pa and va are aligned |
| 529 | */ |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 530 | |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 531 | if (is_fully_aligned(va, pa, sg->length - chunk_offset, |
| 532 | SZ_64K)) |
| 533 | chunk_size = SZ_64K; |
| 534 | else |
| 535 | chunk_size = SZ_4K; |
| 536 | |
| 537 | if (chunk_size == SZ_4K) { |
| 538 | sl_4k(&sl_table[sl_offset], pa, pgprot4k); |
| 539 | sl_offset++; |
| 540 | } else { |
| 541 | BUG_ON(sl_offset + 16 > NUM_SL_PTE); |
| 542 | sl_64k(&sl_table[sl_offset], pa, pgprot64k); |
| 543 | sl_offset += 16; |
| 544 | } |
| 545 | |
| 546 | |
| 547 | offset += chunk_size; |
| 548 | chunk_offset += chunk_size; |
| 549 | va += chunk_size; |
| 550 | pa += chunk_size; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 551 | |
| 552 | if (chunk_offset >= sg->length && offset < len) { |
| 553 | chunk_offset = 0; |
| 554 | sg = sg_next(sg); |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 555 | pa = get_phys_addr(sg); |
| 556 | if (pa == 0) { |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 557 | pr_debug("No dma address for sg %p\n", |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 558 | sg); |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 559 | ret = -EINVAL; |
| 560 | goto fail; |
| 561 | } |
| 562 | } |
| 563 | } |
| 564 | |
| 565 | clean_pte(sl_table + sl_start, sl_table + sl_offset, |
| 566 | pt->redirect); |
| 567 | fl_pte++; |
| 568 | sl_offset = 0; |
| 569 | } |
| 570 | |
| 571 | fail: |
| 572 | return ret; |
| 573 | } |
| 574 | |
Olav Haugan | 090614f | 2013-03-22 12:14:18 -0700 | [diff] [blame] | 575 | void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va, |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 576 | unsigned int len) |
| 577 | { |
| 578 | unsigned int offset = 0; |
| 579 | unsigned long *fl_pte; |
| 580 | unsigned long fl_offset; |
| 581 | unsigned long *sl_table; |
| 582 | unsigned long sl_start, sl_end; |
| 583 | int used, i; |
| 584 | |
| 585 | BUG_ON(len & (SZ_4K - 1)); |
| 586 | |
| 587 | fl_offset = FL_OFFSET(va); /* Upper 12 bits */ |
| 588 | fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */ |
| 589 | |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 590 | while (offset < len) { |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 591 | if (*fl_pte & FL_TYPE_TABLE) { |
| 592 | sl_start = SL_OFFSET(va); |
| 593 | sl_table = __va(((*fl_pte) & FL_BASE_MASK)); |
| 594 | sl_end = ((len - offset) / SZ_4K) + sl_start; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 595 | |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 596 | if (sl_end > NUM_SL_PTE) |
| 597 | sl_end = NUM_SL_PTE; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 598 | |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 599 | memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4); |
| 600 | clean_pte(sl_table + sl_start, sl_table + sl_end, |
| 601 | pt->redirect); |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 602 | |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 603 | offset += (sl_end - sl_start) * SZ_4K; |
| 604 | va += (sl_end - sl_start) * SZ_4K; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 605 | |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 606 | /* Unmap and free the 2nd level table if all mappings |
| 607 | * in it were removed. This saves memory, but the table |
| 608 | * will need to be re-allocated the next time someone |
| 609 | * tries to map these VAs. |
| 610 | */ |
| 611 | used = 0; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 612 | |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 613 | /* If we just unmapped the whole table, don't bother |
| 614 | * seeing if there are still used entries left. |
| 615 | */ |
| 616 | if (sl_end - sl_start != NUM_SL_PTE) |
| 617 | for (i = 0; i < NUM_SL_PTE; i++) |
| 618 | if (sl_table[i]) { |
| 619 | used = 1; |
| 620 | break; |
| 621 | } |
| 622 | if (!used) { |
| 623 | free_page((unsigned long)sl_table); |
| 624 | *fl_pte = 0; |
| 625 | |
| 626 | clean_pte(fl_pte, fl_pte + 1, pt->redirect); |
| 627 | } |
| 628 | |
| 629 | sl_start = 0; |
| 630 | } else { |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 631 | *fl_pte = 0; |
| 632 | clean_pte(fl_pte, fl_pte + 1, pt->redirect); |
Kevin Matlage | fbcd311 | 2013-02-01 12:41:04 -0700 | [diff] [blame] | 633 | va += SZ_1M; |
| 634 | offset += SZ_1M; |
| 635 | sl_start = 0; |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 636 | } |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 637 | fl_pte++; |
| 638 | } |
| 639 | } |
| 640 | |
| 641 | static int __init get_tex_class(int icp, int ocp, int mt, int nos) |
| 642 | { |
| 643 | int i = 0; |
| 644 | unsigned int prrr = 0; |
| 645 | unsigned int nmrr = 0; |
| 646 | int c_icp, c_ocp, c_mt, c_nos; |
| 647 | |
| 648 | RCP15_PRRR(prrr); |
| 649 | RCP15_NMRR(nmrr); |
| 650 | |
| 651 | for (i = 0; i < NUM_TEX_CLASS; i++) { |
| 652 | c_nos = PRRR_NOS(prrr, i); |
| 653 | c_mt = PRRR_MT(prrr, i); |
| 654 | c_icp = NMRR_ICP(nmrr, i); |
| 655 | c_ocp = NMRR_OCP(nmrr, i); |
| 656 | |
| 657 | if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) |
| 658 | return i; |
| 659 | } |
| 660 | |
| 661 | return -ENODEV; |
| 662 | } |
| 663 | |
| 664 | static void __init setup_iommu_tex_classes(void) |
| 665 | { |
| 666 | msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = |
| 667 | get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1); |
| 668 | |
| 669 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = |
| 670 | get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1); |
| 671 | |
| 672 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = |
| 673 | get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1); |
| 674 | |
| 675 | msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = |
| 676 | get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1); |
| 677 | } |
| 678 | |
| 679 | void __init msm_iommu_pagetable_init(void) |
| 680 | { |
| 681 | setup_iommu_tex_classes(); |
| 682 | } |