Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2011, Code Aurora Forum. All rights reserved. |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/io.h> |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/err.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/memory_alloc.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <mach/iommu.h> |
| 20 | #include <mach/iommu_domains.h> |
| 21 | #include <mach/msm_subsystem_map.h> |
| 22 | |
| 23 | /* |
| 24 | * TODO Have this passed in from the board file or convert |
| 25 | * to whatever API upstream comes up with |
| 26 | * |
| 27 | * Listed in decending order as large page sizes should be tried before |
| 28 | * smaller sizes |
| 29 | */ |
| 30 | static unsigned int iommu_page_sizes[4] = {SZ_16M, SZ_1M, SZ_64K, SZ_4K}; |
| 31 | |
| 32 | struct msm_buffer_node { |
| 33 | struct rb_node rb_node_all_buffer; |
| 34 | struct rb_node rb_node_paddr; |
| 35 | struct msm_mapped_buffer *buf; |
| 36 | unsigned long length; |
| 37 | unsigned int *subsystems; |
| 38 | unsigned int nsubsys; |
| 39 | unsigned int pg_size; |
| 40 | unsigned int phys; |
| 41 | }; |
| 42 | |
| 43 | static struct rb_root buffer_root; |
| 44 | static struct rb_root phys_root; |
| 45 | DEFINE_MUTEX(msm_buffer_mutex); |
| 46 | |
| 47 | static struct msm_buffer_node *find_buffer(void *key) |
| 48 | { |
| 49 | struct rb_root *root = &buffer_root; |
| 50 | struct rb_node *p = root->rb_node; |
| 51 | |
| 52 | mutex_lock(&msm_buffer_mutex); |
| 53 | |
| 54 | while (p) { |
| 55 | struct msm_buffer_node *node; |
| 56 | |
| 57 | node = rb_entry(p, struct msm_buffer_node, rb_node_all_buffer); |
| 58 | if (node->buf->vaddr) { |
| 59 | if (key < node->buf->vaddr) |
| 60 | p = p->rb_left; |
| 61 | else if (key > node->buf->vaddr) |
| 62 | p = p->rb_right; |
| 63 | else { |
| 64 | mutex_unlock(&msm_buffer_mutex); |
| 65 | return node; |
| 66 | } |
| 67 | } else { |
| 68 | if (key < (void *)node->buf) |
| 69 | p = p->rb_left; |
| 70 | else if (key > (void *)node->buf) |
| 71 | p = p->rb_right; |
| 72 | else { |
| 73 | mutex_unlock(&msm_buffer_mutex); |
| 74 | return node; |
| 75 | } |
| 76 | } |
| 77 | } |
| 78 | mutex_unlock(&msm_buffer_mutex); |
| 79 | return NULL; |
| 80 | } |
| 81 | |
| 82 | static struct msm_buffer_node *find_buffer_phys(unsigned int phys) |
| 83 | { |
| 84 | struct rb_root *root = &phys_root; |
| 85 | struct rb_node *p = root->rb_node; |
| 86 | |
| 87 | mutex_lock(&msm_buffer_mutex); |
| 88 | |
| 89 | while (p) { |
| 90 | struct msm_buffer_node *node; |
| 91 | |
| 92 | node = rb_entry(p, struct msm_buffer_node, rb_node_paddr); |
| 93 | if (phys < node->phys) |
| 94 | p = p->rb_left; |
| 95 | else if (phys > node->phys) |
| 96 | p = p->rb_right; |
| 97 | else { |
| 98 | mutex_unlock(&msm_buffer_mutex); |
| 99 | return node; |
| 100 | } |
| 101 | } |
| 102 | mutex_unlock(&msm_buffer_mutex); |
| 103 | return NULL; |
| 104 | |
| 105 | } |
| 106 | |
| 107 | static int add_buffer(struct msm_buffer_node *node) |
| 108 | { |
| 109 | struct rb_root *root = &buffer_root; |
| 110 | struct rb_node **p = &root->rb_node; |
| 111 | struct rb_node *parent = NULL; |
| 112 | void *key; |
| 113 | |
| 114 | if (node->buf->vaddr) |
| 115 | key = node->buf->vaddr; |
| 116 | else |
| 117 | key = node->buf; |
| 118 | |
| 119 | mutex_lock(&msm_buffer_mutex); |
| 120 | while (*p) { |
| 121 | struct msm_buffer_node *tmp; |
| 122 | parent = *p; |
| 123 | |
| 124 | tmp = rb_entry(parent, struct msm_buffer_node, |
| 125 | rb_node_all_buffer); |
| 126 | |
| 127 | if (tmp->buf->vaddr) { |
| 128 | if (key < tmp->buf->vaddr) |
| 129 | p = &(*p)->rb_left; |
| 130 | else if (key > tmp->buf->vaddr) |
| 131 | p = &(*p)->rb_right; |
| 132 | else { |
| 133 | WARN(1, "tried to add buffer twice! buf = %p" |
| 134 | " vaddr = %p iova = %p", tmp->buf, |
| 135 | tmp->buf->vaddr, |
| 136 | tmp->buf->iova); |
| 137 | mutex_unlock(&msm_buffer_mutex); |
| 138 | return -EINVAL; |
| 139 | |
| 140 | } |
| 141 | } else { |
| 142 | if (key < (void *)tmp->buf) |
| 143 | p = &(*p)->rb_left; |
| 144 | else if (key > (void *)tmp->buf) |
| 145 | p = &(*p)->rb_right; |
| 146 | else { |
| 147 | WARN(1, "tried to add buffer twice! buf = %p" |
| 148 | " vaddr = %p iova = %p", tmp->buf, |
| 149 | tmp->buf->vaddr, |
| 150 | tmp->buf->iova); |
| 151 | mutex_unlock(&msm_buffer_mutex); |
| 152 | return -EINVAL; |
| 153 | } |
| 154 | } |
| 155 | } |
| 156 | rb_link_node(&node->rb_node_all_buffer, parent, p); |
| 157 | rb_insert_color(&node->rb_node_all_buffer, root); |
| 158 | mutex_unlock(&msm_buffer_mutex); |
| 159 | return 0; |
| 160 | } |
| 161 | |
| 162 | static int add_buffer_phys(struct msm_buffer_node *node) |
| 163 | { |
| 164 | struct rb_root *root = &phys_root; |
| 165 | struct rb_node **p = &root->rb_node; |
| 166 | struct rb_node *parent = NULL; |
| 167 | |
| 168 | mutex_lock(&msm_buffer_mutex); |
| 169 | while (*p) { |
| 170 | struct msm_buffer_node *tmp; |
| 171 | parent = *p; |
| 172 | |
| 173 | tmp = rb_entry(parent, struct msm_buffer_node, rb_node_paddr); |
| 174 | |
| 175 | if (node->phys < tmp->phys) |
| 176 | p = &(*p)->rb_left; |
| 177 | else if (node->phys > tmp->phys) |
| 178 | p = &(*p)->rb_right; |
| 179 | else { |
| 180 | WARN(1, "tried to add buffer twice! buf = %p" |
| 181 | " vaddr = %p iova = %p", tmp->buf, |
| 182 | tmp->buf->vaddr, |
| 183 | tmp->buf->iova); |
| 184 | mutex_unlock(&msm_buffer_mutex); |
| 185 | return -EINVAL; |
| 186 | |
| 187 | } |
| 188 | } |
| 189 | rb_link_node(&node->rb_node_paddr, parent, p); |
| 190 | rb_insert_color(&node->rb_node_paddr, root); |
| 191 | mutex_unlock(&msm_buffer_mutex); |
| 192 | return 0; |
| 193 | } |
| 194 | |
| 195 | static int remove_buffer(struct msm_buffer_node *victim_node) |
| 196 | { |
| 197 | struct rb_root *root = &buffer_root; |
| 198 | |
| 199 | if (!victim_node) |
| 200 | return -EINVAL; |
| 201 | |
| 202 | mutex_lock(&msm_buffer_mutex); |
| 203 | rb_erase(&victim_node->rb_node_all_buffer, root); |
| 204 | mutex_unlock(&msm_buffer_mutex); |
| 205 | return 0; |
| 206 | } |
| 207 | |
| 208 | static int remove_buffer_phys(struct msm_buffer_node *victim_node) |
| 209 | { |
| 210 | struct rb_root *root = &phys_root; |
| 211 | |
| 212 | if (!victim_node) |
| 213 | return -EINVAL; |
| 214 | |
| 215 | mutex_lock(&msm_buffer_mutex); |
| 216 | rb_erase(&victim_node->rb_node_paddr, root); |
| 217 | mutex_unlock(&msm_buffer_mutex); |
| 218 | return 0; |
| 219 | } |
| 220 | |
| 221 | static unsigned long allocate_iova_address(unsigned long size, |
| 222 | int subsys_id, |
| 223 | unsigned long align) |
| 224 | { |
| 225 | struct mem_pool *pool = msm_subsystem_get_pool(subsys_id); |
| 226 | unsigned long iova; |
| 227 | |
| 228 | iova = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align)); |
| 229 | if (iova) |
| 230 | pool->free -= size; |
| 231 | |
| 232 | return iova; |
| 233 | } |
| 234 | |
| 235 | static void free_iova_address(unsigned long iova, |
| 236 | unsigned long size, |
| 237 | int subsys_id) |
| 238 | { |
| 239 | struct mem_pool *pool = msm_subsystem_get_pool(subsys_id); |
| 240 | |
| 241 | pool->free += size; |
| 242 | gen_pool_free(pool->gpool, iova, size); |
| 243 | } |
| 244 | |
| 245 | static int subsys_validate(int subsys_id) |
| 246 | { |
| 247 | struct mem_pool *pool; |
| 248 | struct iommu_domain *subsys_domain; |
| 249 | |
| 250 | if (!msm_subsystem_check_id(subsys_id)) { |
| 251 | WARN(1, "subsystem id is not valid. Caller should check this."); |
| 252 | return 0; |
| 253 | } |
| 254 | |
| 255 | pool = msm_subsystem_get_pool(subsys_id); |
| 256 | subsys_domain = msm_subsystem_get_domain(subsys_id); |
| 257 | |
| 258 | return subsys_domain && pool && pool->gpool; |
| 259 | } |
| 260 | |
| 261 | phys_addr_t msm_subsystem_check_iova_mapping(int subsys_id, unsigned long iova) |
| 262 | { |
| 263 | struct iommu_domain *subsys_domain; |
| 264 | |
| 265 | if (!subsys_validate(subsys_id)) |
| 266 | /* |
| 267 | * If the subsystem is not valid, assume a phys = iova |
| 268 | * mapping. Just return the iova in this case. |
| 269 | */ |
| 270 | return iova; |
| 271 | |
| 272 | subsys_domain = msm_subsystem_get_domain(subsys_id); |
| 273 | |
| 274 | return iommu_iova_to_phys(subsys_domain, iova); |
| 275 | } |
| 276 | EXPORT_SYMBOL(msm_subsystem_check_iova_mapping); |
| 277 | |
| 278 | struct msm_mapped_buffer *msm_subsystem_map_buffer(unsigned long phys, |
| 279 | unsigned int length, |
| 280 | unsigned int flags, |
| 281 | int *subsys_ids, |
| 282 | unsigned int nsubsys) |
| 283 | { |
| 284 | struct msm_mapped_buffer *buf, *err; |
| 285 | struct msm_buffer_node *node; |
| 286 | int i = 0, j = 0, ret; |
| 287 | unsigned long iova_start = 0, temp_phys, temp_va = 0; |
| 288 | unsigned int order = 0, pg_size = 0; |
| 289 | struct iommu_domain *d = NULL; |
| 290 | |
| 291 | if (!((flags & MSM_SUBSYSTEM_MAP_KADDR) || |
| 292 | (flags & MSM_SUBSYSTEM_MAP_IOVA))) { |
| 293 | pr_warn("%s: no mapping flag was specified. The caller" |
| 294 | " should explicitly specify what to map in the" |
| 295 | " flags.\n", __func__); |
| 296 | err = ERR_PTR(-EINVAL); |
| 297 | goto outret; |
| 298 | } |
| 299 | |
| 300 | buf = kzalloc(sizeof(*buf), GFP_ATOMIC); |
| 301 | if (!buf) { |
| 302 | err = ERR_PTR(-ENOMEM); |
| 303 | goto outret; |
| 304 | } |
| 305 | |
| 306 | node = kzalloc(sizeof(*node), GFP_ATOMIC); |
| 307 | if (!node) { |
| 308 | err = ERR_PTR(-ENOMEM); |
| 309 | goto outkfreebuf; |
| 310 | } |
| 311 | |
| 312 | node->phys = phys; |
| 313 | |
| 314 | if (flags & MSM_SUBSYSTEM_MAP_KADDR) { |
| 315 | struct msm_buffer_node *old_buffer; |
| 316 | |
| 317 | old_buffer = find_buffer_phys(phys); |
| 318 | |
| 319 | if (old_buffer) { |
| 320 | WARN(1, "%s: Attempting to map %lx twice in the kernel" |
| 321 | " virtual space. Don't do that!\n", __func__, |
| 322 | phys); |
| 323 | err = ERR_PTR(-EINVAL); |
| 324 | goto outkfreenode; |
| 325 | } |
| 326 | |
| 327 | if (flags & MSM_SUBSYSTEM_MAP_CACHED) |
| 328 | buf->vaddr = ioremap(phys, length); |
| 329 | else if (flags & MSM_SUBSYSTEM_MAP_KADDR) |
| 330 | buf->vaddr = ioremap_nocache(phys, length); |
| 331 | else { |
| 332 | pr_warn("%s: no cachability flag was indicated. Caller" |
| 333 | " must specify a cachability flag.\n", |
| 334 | __func__); |
| 335 | err = ERR_PTR(-EINVAL); |
| 336 | goto outkfreenode; |
| 337 | } |
| 338 | |
| 339 | if (!buf->vaddr) { |
| 340 | pr_err("%s: could not ioremap\n", __func__); |
| 341 | err = ERR_PTR(-EINVAL); |
| 342 | goto outkfreenode; |
| 343 | } |
| 344 | |
| 345 | if (add_buffer_phys(node)) { |
| 346 | err = ERR_PTR(-EINVAL); |
| 347 | goto outiounmap; |
| 348 | } |
| 349 | } |
| 350 | |
| 351 | if ((flags & MSM_SUBSYSTEM_MAP_IOVA) && subsys_ids) { |
Laura Abbott | 675b31f | 2011-07-19 10:37:43 -0700 | [diff] [blame^] | 352 | unsigned int min_align; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 353 | |
| 354 | pg_size = SZ_4K; |
| 355 | |
| 356 | for (i = 0; i < ARRAY_SIZE(iommu_page_sizes); i++) { |
Laura Abbott | 2160bf2 | 2011-07-13 12:39:33 -0700 | [diff] [blame] | 357 | if (IS_ALIGNED(length, iommu_page_sizes[i]) && |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 358 | IS_ALIGNED(phys, iommu_page_sizes[i])) { |
| 359 | pg_size = iommu_page_sizes[i]; |
| 360 | break; |
| 361 | } |
| 362 | } |
| 363 | |
| 364 | length = round_up(length, pg_size); |
| 365 | |
| 366 | buf->iova = kzalloc(sizeof(unsigned long)*nsubsys, GFP_ATOMIC); |
| 367 | if (!buf->iova) { |
| 368 | err = ERR_PTR(-ENOMEM); |
| 369 | goto outremovephys; |
| 370 | } |
| 371 | |
| 372 | order = get_order(pg_size); |
Laura Abbott | 675b31f | 2011-07-19 10:37:43 -0700 | [diff] [blame^] | 373 | |
| 374 | /* |
| 375 | * The alignment must be specified as the exact value wanted |
| 376 | * e.g. 8k alignment must pass (0x2000 | other flags) |
| 377 | */ |
| 378 | min_align = flags & ~(SZ_4K - 1); |
| 379 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 380 | for (i = 0; i < nsubsys; i++) { |
| 381 | if (!subsys_validate(subsys_ids[i])) { |
| 382 | buf->iova[i] = phys; |
| 383 | continue; |
| 384 | } |
| 385 | |
| 386 | d = msm_subsystem_get_domain(subsys_ids[i]); |
| 387 | |
| 388 | iova_start = allocate_iova_address(length, |
Laura Abbott | 675b31f | 2011-07-19 10:37:43 -0700 | [diff] [blame^] | 389 | subsys_ids[i], |
| 390 | max(min_align, pg_size)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 391 | |
| 392 | if (!iova_start) { |
| 393 | pr_err("%s: could not allocate iova address\n", |
| 394 | __func__); |
| 395 | continue; |
| 396 | } |
| 397 | |
| 398 | temp_phys = phys; |
| 399 | temp_va = iova_start; |
| 400 | for (j = length; j > 0; j -= pg_size, |
| 401 | temp_phys += pg_size, |
| 402 | temp_va += pg_size) { |
| 403 | ret = iommu_map(d, temp_va, temp_phys, |
| 404 | order, 0); |
| 405 | if (ret) { |
| 406 | pr_err("%s: could not map iommu for" |
| 407 | " domain %p, iova %lx," |
| 408 | " phys %lx\n", __func__, d, |
| 409 | temp_va, temp_phys); |
| 410 | err = ERR_PTR(-EINVAL); |
| 411 | goto outdomain; |
| 412 | } |
| 413 | } |
| 414 | buf->iova[i] = iova_start; |
| 415 | } |
| 416 | |
| 417 | } |
| 418 | |
| 419 | node->buf = buf; |
| 420 | node->subsystems = subsys_ids; |
| 421 | node->length = length; |
| 422 | node->pg_size = pg_size; |
| 423 | node->nsubsys = nsubsys; |
| 424 | |
| 425 | if (add_buffer(node)) { |
| 426 | err = ERR_PTR(-EINVAL); |
| 427 | goto outiova; |
| 428 | } |
| 429 | |
| 430 | return buf; |
| 431 | |
| 432 | outiova: |
| 433 | if (flags & MSM_SUBSYSTEM_MAP_IOVA) |
| 434 | iommu_unmap(d, temp_va, order); |
| 435 | outdomain: |
| 436 | if (flags & MSM_SUBSYSTEM_MAP_IOVA) { |
| 437 | for (j -= pg_size, temp_va -= pg_size; |
| 438 | j > 0; temp_va -= pg_size, j -= pg_size) |
| 439 | iommu_unmap(d, temp_va, order); |
| 440 | |
| 441 | for (i--; i >= 0; i--) { |
| 442 | if (!subsys_validate(subsys_ids[i])) |
| 443 | continue; |
| 444 | |
| 445 | temp_va = buf->iova[i]; |
| 446 | for (j = length; j > 0; j -= pg_size, |
| 447 | temp_va += pg_size) |
| 448 | iommu_unmap(d, temp_va, order); |
| 449 | free_iova_address(buf->iova[i], length, subsys_ids[i]); |
| 450 | } |
| 451 | |
| 452 | kfree(buf->iova); |
| 453 | } |
| 454 | |
| 455 | outremovephys: |
| 456 | if (flags & MSM_SUBSYSTEM_MAP_KADDR) |
| 457 | remove_buffer_phys(node); |
| 458 | outiounmap: |
| 459 | if (flags & MSM_SUBSYSTEM_MAP_KADDR) |
| 460 | iounmap(buf->vaddr); |
| 461 | outkfreenode: |
| 462 | kfree(node); |
| 463 | outkfreebuf: |
| 464 | kfree(buf); |
| 465 | outret: |
| 466 | return err; |
| 467 | } |
| 468 | EXPORT_SYMBOL(msm_subsystem_map_buffer); |
| 469 | |
| 470 | int msm_subsystem_unmap_buffer(struct msm_mapped_buffer *buf) |
| 471 | { |
| 472 | unsigned int order; |
| 473 | struct msm_buffer_node *node; |
| 474 | int i, j, ret; |
| 475 | unsigned long temp_va; |
| 476 | |
| 477 | if (buf->vaddr) |
| 478 | node = find_buffer(buf->vaddr); |
| 479 | else |
| 480 | node = find_buffer(buf); |
| 481 | |
| 482 | if (!node) |
| 483 | goto out; |
| 484 | |
| 485 | if (node->buf != buf) { |
| 486 | pr_err("%s: caller must pass in the same buffer structure" |
| 487 | " returned from map_buffer when freeding\n", __func__); |
| 488 | goto out; |
| 489 | } |
| 490 | |
| 491 | order = get_order(node->pg_size); |
| 492 | |
| 493 | if (buf->iova) { |
| 494 | for (i = 0; i < node->nsubsys; i++) { |
| 495 | struct iommu_domain *subsys_domain; |
| 496 | |
| 497 | if (!subsys_validate(node->subsystems[i])) |
| 498 | continue; |
| 499 | |
| 500 | subsys_domain = msm_subsystem_get_domain( |
| 501 | node->subsystems[i]); |
| 502 | temp_va = buf->iova[i]; |
| 503 | for (j = node->length; j > 0; j -= node->pg_size, |
| 504 | temp_va += node->pg_size) { |
| 505 | ret = iommu_unmap(subsys_domain, temp_va, |
| 506 | order); |
| 507 | WARN(ret, "iommu_unmap returned a non-zero" |
| 508 | " value.\n"); |
| 509 | } |
| 510 | free_iova_address(buf->iova[i], node->length, |
| 511 | node->subsystems[i]); |
| 512 | } |
| 513 | kfree(buf->iova); |
| 514 | |
| 515 | } |
| 516 | |
| 517 | if (buf->vaddr) { |
| 518 | remove_buffer_phys(node); |
| 519 | iounmap(buf->vaddr); |
| 520 | } |
| 521 | |
| 522 | remove_buffer(node); |
| 523 | kfree(node); |
| 524 | kfree(buf); |
| 525 | |
| 526 | return 0; |
| 527 | out: |
| 528 | return -EINVAL; |
| 529 | } |
| 530 | EXPORT_SYMBOL(msm_subsystem_unmap_buffer); |