blob: 116fc2ee7fe7a4ac3689ce3e0b35a31923b3abab [file] [log] [blame]
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08001/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/io.h>
14#include <linux/types.h>
15#include <linux/err.h>
16#include <linux/slab.h>
17#include <linux/memory_alloc.h>
18#include <linux/module.h>
19#include <mach/iommu.h>
20#include <mach/iommu_domains.h>
21#include <mach/msm_subsystem_map.h>
22
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023struct msm_buffer_node {
24 struct rb_node rb_node_all_buffer;
25 struct rb_node rb_node_paddr;
26 struct msm_mapped_buffer *buf;
27 unsigned long length;
28 unsigned int *subsystems;
29 unsigned int nsubsys;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030 unsigned int phys;
31};
32
33static struct rb_root buffer_root;
34static struct rb_root phys_root;
35DEFINE_MUTEX(msm_buffer_mutex);
36
Laura Abbott9ac7af62012-04-17 11:22:35 -070037static unsigned long subsystem_to_domain_tbl[] = {
38 VIDEO_DOMAIN,
39 VIDEO_DOMAIN,
40 CAMERA_DOMAIN,
Olav Hauganef95ae32012-05-15 09:50:30 -070041 DISPLAY_READ_DOMAIN,
42 DISPLAY_WRITE_DOMAIN,
43 ROTATOR_SRC_DOMAIN,
44 ROTATOR_DST_DOMAIN,
Laura Abbott9ac7af62012-04-17 11:22:35 -070045 0xFFFFFFFF
46};
47
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048static struct msm_buffer_node *find_buffer(void *key)
49{
50 struct rb_root *root = &buffer_root;
51 struct rb_node *p = root->rb_node;
52
53 mutex_lock(&msm_buffer_mutex);
54
55 while (p) {
56 struct msm_buffer_node *node;
57
58 node = rb_entry(p, struct msm_buffer_node, rb_node_all_buffer);
59 if (node->buf->vaddr) {
60 if (key < node->buf->vaddr)
61 p = p->rb_left;
62 else if (key > node->buf->vaddr)
63 p = p->rb_right;
64 else {
65 mutex_unlock(&msm_buffer_mutex);
66 return node;
67 }
68 } else {
69 if (key < (void *)node->buf)
70 p = p->rb_left;
71 else if (key > (void *)node->buf)
72 p = p->rb_right;
73 else {
74 mutex_unlock(&msm_buffer_mutex);
75 return node;
76 }
77 }
78 }
79 mutex_unlock(&msm_buffer_mutex);
80 return NULL;
81}
82
83static struct msm_buffer_node *find_buffer_phys(unsigned int phys)
84{
85 struct rb_root *root = &phys_root;
86 struct rb_node *p = root->rb_node;
87
88 mutex_lock(&msm_buffer_mutex);
89
90 while (p) {
91 struct msm_buffer_node *node;
92
93 node = rb_entry(p, struct msm_buffer_node, rb_node_paddr);
94 if (phys < node->phys)
95 p = p->rb_left;
96 else if (phys > node->phys)
97 p = p->rb_right;
98 else {
99 mutex_unlock(&msm_buffer_mutex);
100 return node;
101 }
102 }
103 mutex_unlock(&msm_buffer_mutex);
104 return NULL;
105
106}
107
108static int add_buffer(struct msm_buffer_node *node)
109{
110 struct rb_root *root = &buffer_root;
111 struct rb_node **p = &root->rb_node;
112 struct rb_node *parent = NULL;
113 void *key;
114
115 if (node->buf->vaddr)
116 key = node->buf->vaddr;
117 else
118 key = node->buf;
119
120 mutex_lock(&msm_buffer_mutex);
121 while (*p) {
122 struct msm_buffer_node *tmp;
123 parent = *p;
124
125 tmp = rb_entry(parent, struct msm_buffer_node,
126 rb_node_all_buffer);
127
128 if (tmp->buf->vaddr) {
129 if (key < tmp->buf->vaddr)
130 p = &(*p)->rb_left;
131 else if (key > tmp->buf->vaddr)
132 p = &(*p)->rb_right;
133 else {
134 WARN(1, "tried to add buffer twice! buf = %p"
135 " vaddr = %p iova = %p", tmp->buf,
136 tmp->buf->vaddr,
137 tmp->buf->iova);
138 mutex_unlock(&msm_buffer_mutex);
139 return -EINVAL;
140
141 }
142 } else {
143 if (key < (void *)tmp->buf)
144 p = &(*p)->rb_left;
145 else if (key > (void *)tmp->buf)
146 p = &(*p)->rb_right;
147 else {
148 WARN(1, "tried to add buffer twice! buf = %p"
149 " vaddr = %p iova = %p", tmp->buf,
150 tmp->buf->vaddr,
151 tmp->buf->iova);
152 mutex_unlock(&msm_buffer_mutex);
153 return -EINVAL;
154 }
155 }
156 }
157 rb_link_node(&node->rb_node_all_buffer, parent, p);
158 rb_insert_color(&node->rb_node_all_buffer, root);
159 mutex_unlock(&msm_buffer_mutex);
160 return 0;
161}
162
163static int add_buffer_phys(struct msm_buffer_node *node)
164{
165 struct rb_root *root = &phys_root;
166 struct rb_node **p = &root->rb_node;
167 struct rb_node *parent = NULL;
168
169 mutex_lock(&msm_buffer_mutex);
170 while (*p) {
171 struct msm_buffer_node *tmp;
172 parent = *p;
173
174 tmp = rb_entry(parent, struct msm_buffer_node, rb_node_paddr);
175
176 if (node->phys < tmp->phys)
177 p = &(*p)->rb_left;
178 else if (node->phys > tmp->phys)
179 p = &(*p)->rb_right;
180 else {
181 WARN(1, "tried to add buffer twice! buf = %p"
182 " vaddr = %p iova = %p", tmp->buf,
183 tmp->buf->vaddr,
184 tmp->buf->iova);
185 mutex_unlock(&msm_buffer_mutex);
186 return -EINVAL;
187
188 }
189 }
190 rb_link_node(&node->rb_node_paddr, parent, p);
191 rb_insert_color(&node->rb_node_paddr, root);
192 mutex_unlock(&msm_buffer_mutex);
193 return 0;
194}
195
196static int remove_buffer(struct msm_buffer_node *victim_node)
197{
198 struct rb_root *root = &buffer_root;
199
200 if (!victim_node)
201 return -EINVAL;
202
203 mutex_lock(&msm_buffer_mutex);
204 rb_erase(&victim_node->rb_node_all_buffer, root);
205 mutex_unlock(&msm_buffer_mutex);
206 return 0;
207}
208
209static int remove_buffer_phys(struct msm_buffer_node *victim_node)
210{
211 struct rb_root *root = &phys_root;
212
213 if (!victim_node)
214 return -EINVAL;
215
216 mutex_lock(&msm_buffer_mutex);
217 rb_erase(&victim_node->rb_node_paddr, root);
218 mutex_unlock(&msm_buffer_mutex);
219 return 0;
220}
221
Laura Abbott9ac7af62012-04-17 11:22:35 -0700222static unsigned long msm_subsystem_get_domain_no(int subsys_id)
223{
224 if (subsys_id > INVALID_SUBSYS_ID && subsys_id <= MAX_SUBSYSTEM_ID &&
225 subsys_id < ARRAY_SIZE(subsystem_to_domain_tbl))
226 return subsystem_to_domain_tbl[subsys_id];
227 else
228 return subsystem_to_domain_tbl[MAX_SUBSYSTEM_ID];
229}
230
231static unsigned long msm_subsystem_get_partition_no(int subsys_id)
232{
233 switch (subsys_id) {
234 case MSM_SUBSYSTEM_VIDEO_FWARE:
235 return VIDEO_FIRMWARE_POOL;
236 case MSM_SUBSYSTEM_VIDEO:
237 return VIDEO_MAIN_POOL;
238 case MSM_SUBSYSTEM_CAMERA:
239 case MSM_SUBSYSTEM_DISPLAY:
240 case MSM_SUBSYSTEM_ROTATOR:
241 return GEN_POOL;
242 default:
243 return 0xFFFFFFFF;
244 }
245}
246
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247phys_addr_t msm_subsystem_check_iova_mapping(int subsys_id, unsigned long iova)
248{
249 struct iommu_domain *subsys_domain;
250
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700251 if (!msm_use_iommu())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252 /*
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700253 * If there is no iommu, Just return the iova in this case.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254 */
255 return iova;
256
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700257 subsys_domain = msm_get_iommu_domain(msm_subsystem_get_domain_no
258 (subsys_id));
Olav Haugan6594eb32013-02-25 14:34:16 -0800259 if (!subsys_domain)
260 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700261
262 return iommu_iova_to_phys(subsys_domain, iova);
263}
264EXPORT_SYMBOL(msm_subsystem_check_iova_mapping);
265
266struct msm_mapped_buffer *msm_subsystem_map_buffer(unsigned long phys,
267 unsigned int length,
268 unsigned int flags,
269 int *subsys_ids,
270 unsigned int nsubsys)
271{
272 struct msm_mapped_buffer *buf, *err;
273 struct msm_buffer_node *node;
274 int i = 0, j = 0, ret;
275 unsigned long iova_start = 0, temp_phys, temp_va = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276 struct iommu_domain *d = NULL;
Laura Abbotte956cce2011-10-25 13:33:20 -0700277 int map_size = length;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278
279 if (!((flags & MSM_SUBSYSTEM_MAP_KADDR) ||
280 (flags & MSM_SUBSYSTEM_MAP_IOVA))) {
281 pr_warn("%s: no mapping flag was specified. The caller"
282 " should explicitly specify what to map in the"
283 " flags.\n", __func__);
284 err = ERR_PTR(-EINVAL);
285 goto outret;
286 }
287
288 buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
289 if (!buf) {
290 err = ERR_PTR(-ENOMEM);
291 goto outret;
292 }
293
294 node = kzalloc(sizeof(*node), GFP_ATOMIC);
295 if (!node) {
296 err = ERR_PTR(-ENOMEM);
297 goto outkfreebuf;
298 }
299
300 node->phys = phys;
301
302 if (flags & MSM_SUBSYSTEM_MAP_KADDR) {
303 struct msm_buffer_node *old_buffer;
304
305 old_buffer = find_buffer_phys(phys);
306
307 if (old_buffer) {
308 WARN(1, "%s: Attempting to map %lx twice in the kernel"
309 " virtual space. Don't do that!\n", __func__,
310 phys);
311 err = ERR_PTR(-EINVAL);
312 goto outkfreenode;
313 }
314
315 if (flags & MSM_SUBSYSTEM_MAP_CACHED)
316 buf->vaddr = ioremap(phys, length);
317 else if (flags & MSM_SUBSYSTEM_MAP_KADDR)
318 buf->vaddr = ioremap_nocache(phys, length);
319 else {
320 pr_warn("%s: no cachability flag was indicated. Caller"
321 " must specify a cachability flag.\n",
322 __func__);
323 err = ERR_PTR(-EINVAL);
324 goto outkfreenode;
325 }
326
327 if (!buf->vaddr) {
328 pr_err("%s: could not ioremap\n", __func__);
329 err = ERR_PTR(-EINVAL);
330 goto outkfreenode;
331 }
332
333 if (add_buffer_phys(node)) {
334 err = ERR_PTR(-EINVAL);
335 goto outiounmap;
336 }
337 }
338
339 if ((flags & MSM_SUBSYSTEM_MAP_IOVA) && subsys_ids) {
Laura Abbott11962582011-08-02 16:29:21 -0700340 int min_align;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700341
Laura Abbott11962582011-08-02 16:29:21 -0700342 length = round_up(length, SZ_4K);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343
Laura Abbotte956cce2011-10-25 13:33:20 -0700344 if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X)
345 map_size = 2 * length;
346 else
347 map_size = length;
348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349 buf->iova = kzalloc(sizeof(unsigned long)*nsubsys, GFP_ATOMIC);
350 if (!buf->iova) {
351 err = ERR_PTR(-ENOMEM);
352 goto outremovephys;
353 }
354
Laura Abbott675b31f2011-07-19 10:37:43 -0700355 /*
356 * The alignment must be specified as the exact value wanted
357 * e.g. 8k alignment must pass (0x2000 | other flags)
358 */
359 min_align = flags & ~(SZ_4K - 1);
360
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361 for (i = 0; i < nsubsys; i++) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700362 unsigned int domain_no, partition_no;
363
364 if (!msm_use_iommu()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700365 buf->iova[i] = phys;
366 continue;
367 }
368
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700369 d = msm_get_iommu_domain(
370 msm_subsystem_get_domain_no(subsys_ids[i]));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700372 if (!d) {
373 pr_err("%s: could not get domain for subsystem"
374 " %d\n", __func__, subsys_ids[i]);
375 continue;
376 }
377
378 domain_no = msm_subsystem_get_domain_no(subsys_ids[i]);
379 partition_no = msm_subsystem_get_partition_no(
380 subsys_ids[i]);
381
Laura Abbottd01221b2012-05-16 17:52:49 -0700382 ret = msm_allocate_iova_address(domain_no,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700383 partition_no,
Laura Abbotte956cce2011-10-25 13:33:20 -0700384 map_size,
Laura Abbottd01221b2012-05-16 17:52:49 -0700385 max(min_align, SZ_4K),
386 &iova_start);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387
Laura Abbottd01221b2012-05-16 17:52:49 -0700388 if (ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389 pr_err("%s: could not allocate iova address\n",
390 __func__);
391 continue;
392 }
393
394 temp_phys = phys;
395 temp_va = iova_start;
Laura Abbott11962582011-08-02 16:29:21 -0700396 for (j = length; j > 0; j -= SZ_4K,
397 temp_phys += SZ_4K,
398 temp_va += SZ_4K) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399 ret = iommu_map(d, temp_va, temp_phys,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700400 SZ_4K,
Olav Hauganf310cf22012-05-08 08:42:49 -0700401 (IOMMU_READ | IOMMU_WRITE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 if (ret) {
403 pr_err("%s: could not map iommu for"
404 " domain %p, iova %lx,"
405 " phys %lx\n", __func__, d,
406 temp_va, temp_phys);
407 err = ERR_PTR(-EINVAL);
408 goto outdomain;
409 }
410 }
411 buf->iova[i] = iova_start;
Laura Abbotte956cce2011-10-25 13:33:20 -0700412
413 if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X)
414 msm_iommu_map_extra
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800415 (d, temp_va, phys, length, SZ_4K,
416 IOMMU_READ);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417 }
418
419 }
420
421 node->buf = buf;
422 node->subsystems = subsys_ids;
Laura Abbotte956cce2011-10-25 13:33:20 -0700423 node->length = map_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424 node->nsubsys = nsubsys;
425
426 if (add_buffer(node)) {
427 err = ERR_PTR(-EINVAL);
428 goto outiova;
429 }
430
431 return buf;
432
433outiova:
Olav Haugan6594eb32013-02-25 14:34:16 -0800434 if (flags & MSM_SUBSYSTEM_MAP_IOVA) {
435 if (d)
436 iommu_unmap(d, temp_va, SZ_4K);
437 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438outdomain:
439 if (flags & MSM_SUBSYSTEM_MAP_IOVA) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700440 /* Unmap the rest of the current domain, i */
Olav Haugan6594eb32013-02-25 14:34:16 -0800441 if (d) {
442 for (j -= SZ_4K, temp_va -= SZ_4K;
443 j > 0; temp_va -= SZ_4K, j -= SZ_4K)
444 iommu_unmap(d, temp_va, SZ_4K);
445 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700446 /* Unmap all the other domains */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 for (i--; i >= 0; i--) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700448 unsigned int domain_no, partition_no;
449 if (!msm_use_iommu())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450 continue;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700451 domain_no = msm_subsystem_get_domain_no(subsys_ids[i]);
452 partition_no = msm_subsystem_get_partition_no(
453 subsys_ids[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454
Olav Haugan6594eb32013-02-25 14:34:16 -0800455 d = msm_get_iommu_domain(domain_no);
456
457 if (d) {
458 temp_va = buf->iova[i];
459 for (j = length; j > 0; j -= SZ_4K,
460 temp_va += SZ_4K)
461 iommu_unmap(d, temp_va, SZ_4K);
462 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700463 msm_free_iova_address(buf->iova[i], domain_no,
464 partition_no, length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 }
466
467 kfree(buf->iova);
468 }
469
470outremovephys:
471 if (flags & MSM_SUBSYSTEM_MAP_KADDR)
472 remove_buffer_phys(node);
473outiounmap:
474 if (flags & MSM_SUBSYSTEM_MAP_KADDR)
475 iounmap(buf->vaddr);
476outkfreenode:
477 kfree(node);
478outkfreebuf:
479 kfree(buf);
480outret:
481 return err;
482}
483EXPORT_SYMBOL(msm_subsystem_map_buffer);
484
485int msm_subsystem_unmap_buffer(struct msm_mapped_buffer *buf)
486{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 struct msm_buffer_node *node;
488 int i, j, ret;
489 unsigned long temp_va;
490
Laura Abbotta7f16e32011-11-30 10:00:18 -0800491 if (IS_ERR_OR_NULL(buf))
492 goto out;
493
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494 if (buf->vaddr)
495 node = find_buffer(buf->vaddr);
496 else
497 node = find_buffer(buf);
498
499 if (!node)
500 goto out;
501
502 if (node->buf != buf) {
503 pr_err("%s: caller must pass in the same buffer structure"
504 " returned from map_buffer when freeding\n", __func__);
505 goto out;
506 }
507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 if (buf->iova) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700509 if (msm_use_iommu())
510 for (i = 0; i < node->nsubsys; i++) {
511 struct iommu_domain *subsys_domain;
512 unsigned int domain_no, partition_no;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700514 subsys_domain = msm_get_iommu_domain(
515 msm_subsystem_get_domain_no(
516 node->subsystems[i]));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517
Olav Haugan6594eb32013-02-25 14:34:16 -0800518 if (!subsys_domain)
519 continue;
520
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700521 domain_no = msm_subsystem_get_domain_no(
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 node->subsystems[i]);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700523 partition_no = msm_subsystem_get_partition_no(
524 node->subsystems[i]);
525
526 temp_va = buf->iova[i];
527 for (j = node->length; j > 0; j -= SZ_4K,
528 temp_va += SZ_4K) {
529 ret = iommu_unmap(subsys_domain,
530 temp_va,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700531 SZ_4K);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700532 WARN(ret, "iommu_unmap returned a "
533 " non-zero value.\n");
534 }
535 msm_free_iova_address(buf->iova[i], domain_no,
536 partition_no, node->length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 kfree(buf->iova);
539
540 }
541
542 if (buf->vaddr) {
543 remove_buffer_phys(node);
544 iounmap(buf->vaddr);
545 }
546
547 remove_buffer(node);
548 kfree(node);
549 kfree(buf);
550
551 return 0;
552out:
553 return -EINVAL;
554}
555EXPORT_SYMBOL(msm_subsystem_unmap_buffer);