blob: 916686f473deb797dcf91d9c5fa30517f4ae64ff [file] [log] [blame]
Laura Abbott9ac7af62012-04-17 11:22:35 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/io.h>
14#include <linux/types.h>
15#include <linux/err.h>
16#include <linux/slab.h>
17#include <linux/memory_alloc.h>
18#include <linux/module.h>
19#include <mach/iommu.h>
20#include <mach/iommu_domains.h>
21#include <mach/msm_subsystem_map.h>
22
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023struct msm_buffer_node {
24 struct rb_node rb_node_all_buffer;
25 struct rb_node rb_node_paddr;
26 struct msm_mapped_buffer *buf;
27 unsigned long length;
28 unsigned int *subsystems;
29 unsigned int nsubsys;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030 unsigned int phys;
31};
32
33static struct rb_root buffer_root;
34static struct rb_root phys_root;
35DEFINE_MUTEX(msm_buffer_mutex);
36
Laura Abbott9ac7af62012-04-17 11:22:35 -070037static unsigned long subsystem_to_domain_tbl[] = {
38 VIDEO_DOMAIN,
39 VIDEO_DOMAIN,
40 CAMERA_DOMAIN,
41 DISPLAY_DOMAIN,
42 ROTATOR_DOMAIN,
43 0xFFFFFFFF
44};
45
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046static struct msm_buffer_node *find_buffer(void *key)
47{
48 struct rb_root *root = &buffer_root;
49 struct rb_node *p = root->rb_node;
50
51 mutex_lock(&msm_buffer_mutex);
52
53 while (p) {
54 struct msm_buffer_node *node;
55
56 node = rb_entry(p, struct msm_buffer_node, rb_node_all_buffer);
57 if (node->buf->vaddr) {
58 if (key < node->buf->vaddr)
59 p = p->rb_left;
60 else if (key > node->buf->vaddr)
61 p = p->rb_right;
62 else {
63 mutex_unlock(&msm_buffer_mutex);
64 return node;
65 }
66 } else {
67 if (key < (void *)node->buf)
68 p = p->rb_left;
69 else if (key > (void *)node->buf)
70 p = p->rb_right;
71 else {
72 mutex_unlock(&msm_buffer_mutex);
73 return node;
74 }
75 }
76 }
77 mutex_unlock(&msm_buffer_mutex);
78 return NULL;
79}
80
81static struct msm_buffer_node *find_buffer_phys(unsigned int phys)
82{
83 struct rb_root *root = &phys_root;
84 struct rb_node *p = root->rb_node;
85
86 mutex_lock(&msm_buffer_mutex);
87
88 while (p) {
89 struct msm_buffer_node *node;
90
91 node = rb_entry(p, struct msm_buffer_node, rb_node_paddr);
92 if (phys < node->phys)
93 p = p->rb_left;
94 else if (phys > node->phys)
95 p = p->rb_right;
96 else {
97 mutex_unlock(&msm_buffer_mutex);
98 return node;
99 }
100 }
101 mutex_unlock(&msm_buffer_mutex);
102 return NULL;
103
104}
105
106static int add_buffer(struct msm_buffer_node *node)
107{
108 struct rb_root *root = &buffer_root;
109 struct rb_node **p = &root->rb_node;
110 struct rb_node *parent = NULL;
111 void *key;
112
113 if (node->buf->vaddr)
114 key = node->buf->vaddr;
115 else
116 key = node->buf;
117
118 mutex_lock(&msm_buffer_mutex);
119 while (*p) {
120 struct msm_buffer_node *tmp;
121 parent = *p;
122
123 tmp = rb_entry(parent, struct msm_buffer_node,
124 rb_node_all_buffer);
125
126 if (tmp->buf->vaddr) {
127 if (key < tmp->buf->vaddr)
128 p = &(*p)->rb_left;
129 else if (key > tmp->buf->vaddr)
130 p = &(*p)->rb_right;
131 else {
132 WARN(1, "tried to add buffer twice! buf = %p"
133 " vaddr = %p iova = %p", tmp->buf,
134 tmp->buf->vaddr,
135 tmp->buf->iova);
136 mutex_unlock(&msm_buffer_mutex);
137 return -EINVAL;
138
139 }
140 } else {
141 if (key < (void *)tmp->buf)
142 p = &(*p)->rb_left;
143 else if (key > (void *)tmp->buf)
144 p = &(*p)->rb_right;
145 else {
146 WARN(1, "tried to add buffer twice! buf = %p"
147 " vaddr = %p iova = %p", tmp->buf,
148 tmp->buf->vaddr,
149 tmp->buf->iova);
150 mutex_unlock(&msm_buffer_mutex);
151 return -EINVAL;
152 }
153 }
154 }
155 rb_link_node(&node->rb_node_all_buffer, parent, p);
156 rb_insert_color(&node->rb_node_all_buffer, root);
157 mutex_unlock(&msm_buffer_mutex);
158 return 0;
159}
160
161static int add_buffer_phys(struct msm_buffer_node *node)
162{
163 struct rb_root *root = &phys_root;
164 struct rb_node **p = &root->rb_node;
165 struct rb_node *parent = NULL;
166
167 mutex_lock(&msm_buffer_mutex);
168 while (*p) {
169 struct msm_buffer_node *tmp;
170 parent = *p;
171
172 tmp = rb_entry(parent, struct msm_buffer_node, rb_node_paddr);
173
174 if (node->phys < tmp->phys)
175 p = &(*p)->rb_left;
176 else if (node->phys > tmp->phys)
177 p = &(*p)->rb_right;
178 else {
179 WARN(1, "tried to add buffer twice! buf = %p"
180 " vaddr = %p iova = %p", tmp->buf,
181 tmp->buf->vaddr,
182 tmp->buf->iova);
183 mutex_unlock(&msm_buffer_mutex);
184 return -EINVAL;
185
186 }
187 }
188 rb_link_node(&node->rb_node_paddr, parent, p);
189 rb_insert_color(&node->rb_node_paddr, root);
190 mutex_unlock(&msm_buffer_mutex);
191 return 0;
192}
193
194static int remove_buffer(struct msm_buffer_node *victim_node)
195{
196 struct rb_root *root = &buffer_root;
197
198 if (!victim_node)
199 return -EINVAL;
200
201 mutex_lock(&msm_buffer_mutex);
202 rb_erase(&victim_node->rb_node_all_buffer, root);
203 mutex_unlock(&msm_buffer_mutex);
204 return 0;
205}
206
207static int remove_buffer_phys(struct msm_buffer_node *victim_node)
208{
209 struct rb_root *root = &phys_root;
210
211 if (!victim_node)
212 return -EINVAL;
213
214 mutex_lock(&msm_buffer_mutex);
215 rb_erase(&victim_node->rb_node_paddr, root);
216 mutex_unlock(&msm_buffer_mutex);
217 return 0;
218}
219
Laura Abbott9ac7af62012-04-17 11:22:35 -0700220static unsigned long msm_subsystem_get_domain_no(int subsys_id)
221{
222 if (subsys_id > INVALID_SUBSYS_ID && subsys_id <= MAX_SUBSYSTEM_ID &&
223 subsys_id < ARRAY_SIZE(subsystem_to_domain_tbl))
224 return subsystem_to_domain_tbl[subsys_id];
225 else
226 return subsystem_to_domain_tbl[MAX_SUBSYSTEM_ID];
227}
228
229static unsigned long msm_subsystem_get_partition_no(int subsys_id)
230{
231 switch (subsys_id) {
232 case MSM_SUBSYSTEM_VIDEO_FWARE:
233 return VIDEO_FIRMWARE_POOL;
234 case MSM_SUBSYSTEM_VIDEO:
235 return VIDEO_MAIN_POOL;
236 case MSM_SUBSYSTEM_CAMERA:
237 case MSM_SUBSYSTEM_DISPLAY:
238 case MSM_SUBSYSTEM_ROTATOR:
239 return GEN_POOL;
240 default:
241 return 0xFFFFFFFF;
242 }
243}
244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245phys_addr_t msm_subsystem_check_iova_mapping(int subsys_id, unsigned long iova)
246{
247 struct iommu_domain *subsys_domain;
248
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700249 if (!msm_use_iommu())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250 /*
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700251 * If there is no iommu, Just return the iova in this case.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252 */
253 return iova;
254
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700255 subsys_domain = msm_get_iommu_domain(msm_subsystem_get_domain_no
256 (subsys_id));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257
258 return iommu_iova_to_phys(subsys_domain, iova);
259}
260EXPORT_SYMBOL(msm_subsystem_check_iova_mapping);
261
262struct msm_mapped_buffer *msm_subsystem_map_buffer(unsigned long phys,
263 unsigned int length,
264 unsigned int flags,
265 int *subsys_ids,
266 unsigned int nsubsys)
267{
268 struct msm_mapped_buffer *buf, *err;
269 struct msm_buffer_node *node;
270 int i = 0, j = 0, ret;
271 unsigned long iova_start = 0, temp_phys, temp_va = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 struct iommu_domain *d = NULL;
Laura Abbotte956cce2011-10-25 13:33:20 -0700273 int map_size = length;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274
275 if (!((flags & MSM_SUBSYSTEM_MAP_KADDR) ||
276 (flags & MSM_SUBSYSTEM_MAP_IOVA))) {
277 pr_warn("%s: no mapping flag was specified. The caller"
278 " should explicitly specify what to map in the"
279 " flags.\n", __func__);
280 err = ERR_PTR(-EINVAL);
281 goto outret;
282 }
283
284 buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
285 if (!buf) {
286 err = ERR_PTR(-ENOMEM);
287 goto outret;
288 }
289
290 node = kzalloc(sizeof(*node), GFP_ATOMIC);
291 if (!node) {
292 err = ERR_PTR(-ENOMEM);
293 goto outkfreebuf;
294 }
295
296 node->phys = phys;
297
298 if (flags & MSM_SUBSYSTEM_MAP_KADDR) {
299 struct msm_buffer_node *old_buffer;
300
301 old_buffer = find_buffer_phys(phys);
302
303 if (old_buffer) {
304 WARN(1, "%s: Attempting to map %lx twice in the kernel"
305 " virtual space. Don't do that!\n", __func__,
306 phys);
307 err = ERR_PTR(-EINVAL);
308 goto outkfreenode;
309 }
310
311 if (flags & MSM_SUBSYSTEM_MAP_CACHED)
312 buf->vaddr = ioremap(phys, length);
313 else if (flags & MSM_SUBSYSTEM_MAP_KADDR)
314 buf->vaddr = ioremap_nocache(phys, length);
315 else {
316 pr_warn("%s: no cachability flag was indicated. Caller"
317 " must specify a cachability flag.\n",
318 __func__);
319 err = ERR_PTR(-EINVAL);
320 goto outkfreenode;
321 }
322
323 if (!buf->vaddr) {
324 pr_err("%s: could not ioremap\n", __func__);
325 err = ERR_PTR(-EINVAL);
326 goto outkfreenode;
327 }
328
329 if (add_buffer_phys(node)) {
330 err = ERR_PTR(-EINVAL);
331 goto outiounmap;
332 }
333 }
334
335 if ((flags & MSM_SUBSYSTEM_MAP_IOVA) && subsys_ids) {
Laura Abbott11962582011-08-02 16:29:21 -0700336 int min_align;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337
Laura Abbott11962582011-08-02 16:29:21 -0700338 length = round_up(length, SZ_4K);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339
Laura Abbotte956cce2011-10-25 13:33:20 -0700340 if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X)
341 map_size = 2 * length;
342 else
343 map_size = length;
344
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345 buf->iova = kzalloc(sizeof(unsigned long)*nsubsys, GFP_ATOMIC);
346 if (!buf->iova) {
347 err = ERR_PTR(-ENOMEM);
348 goto outremovephys;
349 }
350
Laura Abbott675b31f2011-07-19 10:37:43 -0700351 /*
352 * The alignment must be specified as the exact value wanted
353 * e.g. 8k alignment must pass (0x2000 | other flags)
354 */
355 min_align = flags & ~(SZ_4K - 1);
356
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357 for (i = 0; i < nsubsys; i++) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700358 unsigned int domain_no, partition_no;
359
360 if (!msm_use_iommu()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361 buf->iova[i] = phys;
362 continue;
363 }
364
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700365 d = msm_get_iommu_domain(
366 msm_subsystem_get_domain_no(subsys_ids[i]));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700368 if (!d) {
369 pr_err("%s: could not get domain for subsystem"
370 " %d\n", __func__, subsys_ids[i]);
371 continue;
372 }
373
374 domain_no = msm_subsystem_get_domain_no(subsys_ids[i]);
375 partition_no = msm_subsystem_get_partition_no(
376 subsys_ids[i]);
377
Laura Abbottd01221b2012-05-16 17:52:49 -0700378 ret = msm_allocate_iova_address(domain_no,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700379 partition_no,
Laura Abbotte956cce2011-10-25 13:33:20 -0700380 map_size,
Laura Abbottd01221b2012-05-16 17:52:49 -0700381 max(min_align, SZ_4K),
382 &iova_start);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383
Laura Abbottd01221b2012-05-16 17:52:49 -0700384 if (ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385 pr_err("%s: could not allocate iova address\n",
386 __func__);
387 continue;
388 }
389
390 temp_phys = phys;
391 temp_va = iova_start;
Laura Abbott11962582011-08-02 16:29:21 -0700392 for (j = length; j > 0; j -= SZ_4K,
393 temp_phys += SZ_4K,
394 temp_va += SZ_4K) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395 ret = iommu_map(d, temp_va, temp_phys,
Olav Hauganf310cf22012-05-08 08:42:49 -0700396 get_order(SZ_4K),
397 (IOMMU_READ | IOMMU_WRITE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398 if (ret) {
399 pr_err("%s: could not map iommu for"
400 " domain %p, iova %lx,"
401 " phys %lx\n", __func__, d,
402 temp_va, temp_phys);
403 err = ERR_PTR(-EINVAL);
404 goto outdomain;
405 }
406 }
407 buf->iova[i] = iova_start;
Laura Abbotte956cce2011-10-25 13:33:20 -0700408
409 if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X)
410 msm_iommu_map_extra
Olav Haugan8726caf2012-05-10 15:11:35 -0700411 (d, temp_va, length, SZ_4K,
Olav Hauganf310cf22012-05-08 08:42:49 -0700412 (IOMMU_READ | IOMMU_WRITE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413 }
414
415 }
416
417 node->buf = buf;
418 node->subsystems = subsys_ids;
Laura Abbotte956cce2011-10-25 13:33:20 -0700419 node->length = map_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 node->nsubsys = nsubsys;
421
422 if (add_buffer(node)) {
423 err = ERR_PTR(-EINVAL);
424 goto outiova;
425 }
426
427 return buf;
428
429outiova:
430 if (flags & MSM_SUBSYSTEM_MAP_IOVA)
Laura Abbott11962582011-08-02 16:29:21 -0700431 iommu_unmap(d, temp_va, get_order(SZ_4K));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432outdomain:
433 if (flags & MSM_SUBSYSTEM_MAP_IOVA) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700434 /* Unmap the rest of the current domain, i */
Laura Abbott11962582011-08-02 16:29:21 -0700435 for (j -= SZ_4K, temp_va -= SZ_4K;
436 j > 0; temp_va -= SZ_4K, j -= SZ_4K)
437 iommu_unmap(d, temp_va, get_order(SZ_4K));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700439 /* Unmap all the other domains */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440 for (i--; i >= 0; i--) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700441 unsigned int domain_no, partition_no;
442 if (!msm_use_iommu())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443 continue;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700444 domain_no = msm_subsystem_get_domain_no(subsys_ids[i]);
445 partition_no = msm_subsystem_get_partition_no(
446 subsys_ids[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447
448 temp_va = buf->iova[i];
Laura Abbott11962582011-08-02 16:29:21 -0700449 for (j = length; j > 0; j -= SZ_4K,
450 temp_va += SZ_4K)
451 iommu_unmap(d, temp_va, get_order(SZ_4K));
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700452 msm_free_iova_address(buf->iova[i], domain_no,
453 partition_no, length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454 }
455
456 kfree(buf->iova);
457 }
458
459outremovephys:
460 if (flags & MSM_SUBSYSTEM_MAP_KADDR)
461 remove_buffer_phys(node);
462outiounmap:
463 if (flags & MSM_SUBSYSTEM_MAP_KADDR)
464 iounmap(buf->vaddr);
465outkfreenode:
466 kfree(node);
467outkfreebuf:
468 kfree(buf);
469outret:
470 return err;
471}
472EXPORT_SYMBOL(msm_subsystem_map_buffer);
473
474int msm_subsystem_unmap_buffer(struct msm_mapped_buffer *buf)
475{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700476 struct msm_buffer_node *node;
477 int i, j, ret;
478 unsigned long temp_va;
479
Laura Abbotta7f16e32011-11-30 10:00:18 -0800480 if (IS_ERR_OR_NULL(buf))
481 goto out;
482
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 if (buf->vaddr)
484 node = find_buffer(buf->vaddr);
485 else
486 node = find_buffer(buf);
487
488 if (!node)
489 goto out;
490
491 if (node->buf != buf) {
492 pr_err("%s: caller must pass in the same buffer structure"
493 " returned from map_buffer when freeding\n", __func__);
494 goto out;
495 }
496
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 if (buf->iova) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700498 if (msm_use_iommu())
499 for (i = 0; i < node->nsubsys; i++) {
500 struct iommu_domain *subsys_domain;
501 unsigned int domain_no, partition_no;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700503 subsys_domain = msm_get_iommu_domain(
504 msm_subsystem_get_domain_no(
505 node->subsystems[i]));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700507 domain_no = msm_subsystem_get_domain_no(
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 node->subsystems[i]);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700509 partition_no = msm_subsystem_get_partition_no(
510 node->subsystems[i]);
511
512 temp_va = buf->iova[i];
513 for (j = node->length; j > 0; j -= SZ_4K,
514 temp_va += SZ_4K) {
515 ret = iommu_unmap(subsys_domain,
516 temp_va,
Laura Abbott11962582011-08-02 16:29:21 -0700517 get_order(SZ_4K));
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700518 WARN(ret, "iommu_unmap returned a "
519 " non-zero value.\n");
520 }
521 msm_free_iova_address(buf->iova[i], domain_no,
522 partition_no, node->length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524 kfree(buf->iova);
525
526 }
527
528 if (buf->vaddr) {
529 remove_buffer_phys(node);
530 iounmap(buf->vaddr);
531 }
532
533 remove_buffer(node);
534 kfree(node);
535 kfree(buf);
536
537 return 0;
538out:
539 return -EINVAL;
540}
541EXPORT_SYMBOL(msm_subsystem_unmap_buffer);