blob: db9ab308717bd31d0a705b94d43ce6835a82435a [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/io.h>
14#include <linux/types.h>
15#include <linux/err.h>
16#include <linux/slab.h>
17#include <linux/memory_alloc.h>
18#include <linux/module.h>
19#include <mach/iommu.h>
20#include <mach/iommu_domains.h>
21#include <mach/msm_subsystem_map.h>
22
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023struct msm_buffer_node {
24 struct rb_node rb_node_all_buffer;
25 struct rb_node rb_node_paddr;
26 struct msm_mapped_buffer *buf;
27 unsigned long length;
28 unsigned int *subsystems;
29 unsigned int nsubsys;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030 unsigned int phys;
31};
32
33static struct rb_root buffer_root;
34static struct rb_root phys_root;
35DEFINE_MUTEX(msm_buffer_mutex);
36
37static struct msm_buffer_node *find_buffer(void *key)
38{
39 struct rb_root *root = &buffer_root;
40 struct rb_node *p = root->rb_node;
41
42 mutex_lock(&msm_buffer_mutex);
43
44 while (p) {
45 struct msm_buffer_node *node;
46
47 node = rb_entry(p, struct msm_buffer_node, rb_node_all_buffer);
48 if (node->buf->vaddr) {
49 if (key < node->buf->vaddr)
50 p = p->rb_left;
51 else if (key > node->buf->vaddr)
52 p = p->rb_right;
53 else {
54 mutex_unlock(&msm_buffer_mutex);
55 return node;
56 }
57 } else {
58 if (key < (void *)node->buf)
59 p = p->rb_left;
60 else if (key > (void *)node->buf)
61 p = p->rb_right;
62 else {
63 mutex_unlock(&msm_buffer_mutex);
64 return node;
65 }
66 }
67 }
68 mutex_unlock(&msm_buffer_mutex);
69 return NULL;
70}
71
72static struct msm_buffer_node *find_buffer_phys(unsigned int phys)
73{
74 struct rb_root *root = &phys_root;
75 struct rb_node *p = root->rb_node;
76
77 mutex_lock(&msm_buffer_mutex);
78
79 while (p) {
80 struct msm_buffer_node *node;
81
82 node = rb_entry(p, struct msm_buffer_node, rb_node_paddr);
83 if (phys < node->phys)
84 p = p->rb_left;
85 else if (phys > node->phys)
86 p = p->rb_right;
87 else {
88 mutex_unlock(&msm_buffer_mutex);
89 return node;
90 }
91 }
92 mutex_unlock(&msm_buffer_mutex);
93 return NULL;
94
95}
96
97static int add_buffer(struct msm_buffer_node *node)
98{
99 struct rb_root *root = &buffer_root;
100 struct rb_node **p = &root->rb_node;
101 struct rb_node *parent = NULL;
102 void *key;
103
104 if (node->buf->vaddr)
105 key = node->buf->vaddr;
106 else
107 key = node->buf;
108
109 mutex_lock(&msm_buffer_mutex);
110 while (*p) {
111 struct msm_buffer_node *tmp;
112 parent = *p;
113
114 tmp = rb_entry(parent, struct msm_buffer_node,
115 rb_node_all_buffer);
116
117 if (tmp->buf->vaddr) {
118 if (key < tmp->buf->vaddr)
119 p = &(*p)->rb_left;
120 else if (key > tmp->buf->vaddr)
121 p = &(*p)->rb_right;
122 else {
123 WARN(1, "tried to add buffer twice! buf = %p"
124 " vaddr = %p iova = %p", tmp->buf,
125 tmp->buf->vaddr,
126 tmp->buf->iova);
127 mutex_unlock(&msm_buffer_mutex);
128 return -EINVAL;
129
130 }
131 } else {
132 if (key < (void *)tmp->buf)
133 p = &(*p)->rb_left;
134 else if (key > (void *)tmp->buf)
135 p = &(*p)->rb_right;
136 else {
137 WARN(1, "tried to add buffer twice! buf = %p"
138 " vaddr = %p iova = %p", tmp->buf,
139 tmp->buf->vaddr,
140 tmp->buf->iova);
141 mutex_unlock(&msm_buffer_mutex);
142 return -EINVAL;
143 }
144 }
145 }
146 rb_link_node(&node->rb_node_all_buffer, parent, p);
147 rb_insert_color(&node->rb_node_all_buffer, root);
148 mutex_unlock(&msm_buffer_mutex);
149 return 0;
150}
151
152static int add_buffer_phys(struct msm_buffer_node *node)
153{
154 struct rb_root *root = &phys_root;
155 struct rb_node **p = &root->rb_node;
156 struct rb_node *parent = NULL;
157
158 mutex_lock(&msm_buffer_mutex);
159 while (*p) {
160 struct msm_buffer_node *tmp;
161 parent = *p;
162
163 tmp = rb_entry(parent, struct msm_buffer_node, rb_node_paddr);
164
165 if (node->phys < tmp->phys)
166 p = &(*p)->rb_left;
167 else if (node->phys > tmp->phys)
168 p = &(*p)->rb_right;
169 else {
170 WARN(1, "tried to add buffer twice! buf = %p"
171 " vaddr = %p iova = %p", tmp->buf,
172 tmp->buf->vaddr,
173 tmp->buf->iova);
174 mutex_unlock(&msm_buffer_mutex);
175 return -EINVAL;
176
177 }
178 }
179 rb_link_node(&node->rb_node_paddr, parent, p);
180 rb_insert_color(&node->rb_node_paddr, root);
181 mutex_unlock(&msm_buffer_mutex);
182 return 0;
183}
184
185static int remove_buffer(struct msm_buffer_node *victim_node)
186{
187 struct rb_root *root = &buffer_root;
188
189 if (!victim_node)
190 return -EINVAL;
191
192 mutex_lock(&msm_buffer_mutex);
193 rb_erase(&victim_node->rb_node_all_buffer, root);
194 mutex_unlock(&msm_buffer_mutex);
195 return 0;
196}
197
198static int remove_buffer_phys(struct msm_buffer_node *victim_node)
199{
200 struct rb_root *root = &phys_root;
201
202 if (!victim_node)
203 return -EINVAL;
204
205 mutex_lock(&msm_buffer_mutex);
206 rb_erase(&victim_node->rb_node_paddr, root);
207 mutex_unlock(&msm_buffer_mutex);
208 return 0;
209}
210
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211phys_addr_t msm_subsystem_check_iova_mapping(int subsys_id, unsigned long iova)
212{
213 struct iommu_domain *subsys_domain;
214
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700215 if (!msm_use_iommu())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216 /*
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700217 * If there is no iommu, Just return the iova in this case.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218 */
219 return iova;
220
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700221 subsys_domain = msm_get_iommu_domain(msm_subsystem_get_domain_no
222 (subsys_id));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223
224 return iommu_iova_to_phys(subsys_domain, iova);
225}
226EXPORT_SYMBOL(msm_subsystem_check_iova_mapping);
227
228struct msm_mapped_buffer *msm_subsystem_map_buffer(unsigned long phys,
229 unsigned int length,
230 unsigned int flags,
231 int *subsys_ids,
232 unsigned int nsubsys)
233{
234 struct msm_mapped_buffer *buf, *err;
235 struct msm_buffer_node *node;
236 int i = 0, j = 0, ret;
237 unsigned long iova_start = 0, temp_phys, temp_va = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238 struct iommu_domain *d = NULL;
Laura Abbotte956cce2011-10-25 13:33:20 -0700239 int map_size = length;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240
241 if (!((flags & MSM_SUBSYSTEM_MAP_KADDR) ||
242 (flags & MSM_SUBSYSTEM_MAP_IOVA))) {
243 pr_warn("%s: no mapping flag was specified. The caller"
244 " should explicitly specify what to map in the"
245 " flags.\n", __func__);
246 err = ERR_PTR(-EINVAL);
247 goto outret;
248 }
249
250 buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
251 if (!buf) {
252 err = ERR_PTR(-ENOMEM);
253 goto outret;
254 }
255
256 node = kzalloc(sizeof(*node), GFP_ATOMIC);
257 if (!node) {
258 err = ERR_PTR(-ENOMEM);
259 goto outkfreebuf;
260 }
261
262 node->phys = phys;
263
264 if (flags & MSM_SUBSYSTEM_MAP_KADDR) {
265 struct msm_buffer_node *old_buffer;
266
267 old_buffer = find_buffer_phys(phys);
268
269 if (old_buffer) {
270 WARN(1, "%s: Attempting to map %lx twice in the kernel"
271 " virtual space. Don't do that!\n", __func__,
272 phys);
273 err = ERR_PTR(-EINVAL);
274 goto outkfreenode;
275 }
276
277 if (flags & MSM_SUBSYSTEM_MAP_CACHED)
278 buf->vaddr = ioremap(phys, length);
279 else if (flags & MSM_SUBSYSTEM_MAP_KADDR)
280 buf->vaddr = ioremap_nocache(phys, length);
281 else {
282 pr_warn("%s: no cachability flag was indicated. Caller"
283 " must specify a cachability flag.\n",
284 __func__);
285 err = ERR_PTR(-EINVAL);
286 goto outkfreenode;
287 }
288
289 if (!buf->vaddr) {
290 pr_err("%s: could not ioremap\n", __func__);
291 err = ERR_PTR(-EINVAL);
292 goto outkfreenode;
293 }
294
295 if (add_buffer_phys(node)) {
296 err = ERR_PTR(-EINVAL);
297 goto outiounmap;
298 }
299 }
300
301 if ((flags & MSM_SUBSYSTEM_MAP_IOVA) && subsys_ids) {
Laura Abbott11962582011-08-02 16:29:21 -0700302 int min_align;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303
Laura Abbott11962582011-08-02 16:29:21 -0700304 length = round_up(length, SZ_4K);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305
Laura Abbotte956cce2011-10-25 13:33:20 -0700306 if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X)
307 map_size = 2 * length;
308 else
309 map_size = length;
310
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311 buf->iova = kzalloc(sizeof(unsigned long)*nsubsys, GFP_ATOMIC);
312 if (!buf->iova) {
313 err = ERR_PTR(-ENOMEM);
314 goto outremovephys;
315 }
316
Laura Abbott675b31f2011-07-19 10:37:43 -0700317 /*
318 * The alignment must be specified as the exact value wanted
319 * e.g. 8k alignment must pass (0x2000 | other flags)
320 */
321 min_align = flags & ~(SZ_4K - 1);
322
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323 for (i = 0; i < nsubsys; i++) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700324 unsigned int domain_no, partition_no;
325
326 if (!msm_use_iommu()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327 buf->iova[i] = phys;
328 continue;
329 }
330
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700331 d = msm_get_iommu_domain(
332 msm_subsystem_get_domain_no(subsys_ids[i]));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700334 if (!d) {
335 pr_err("%s: could not get domain for subsystem"
336 " %d\n", __func__, subsys_ids[i]);
337 continue;
338 }
339
340 domain_no = msm_subsystem_get_domain_no(subsys_ids[i]);
341 partition_no = msm_subsystem_get_partition_no(
342 subsys_ids[i]);
343
344 iova_start = msm_allocate_iova_address(domain_no,
345 partition_no,
Laura Abbotte956cce2011-10-25 13:33:20 -0700346 map_size,
Laura Abbott11962582011-08-02 16:29:21 -0700347 max(min_align, SZ_4K));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348
349 if (!iova_start) {
350 pr_err("%s: could not allocate iova address\n",
351 __func__);
352 continue;
353 }
354
355 temp_phys = phys;
356 temp_va = iova_start;
Laura Abbott11962582011-08-02 16:29:21 -0700357 for (j = length; j > 0; j -= SZ_4K,
358 temp_phys += SZ_4K,
359 temp_va += SZ_4K) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 ret = iommu_map(d, temp_va, temp_phys,
Laura Abbott11962582011-08-02 16:29:21 -0700361 get_order(SZ_4K), 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 if (ret) {
363 pr_err("%s: could not map iommu for"
364 " domain %p, iova %lx,"
365 " phys %lx\n", __func__, d,
366 temp_va, temp_phys);
367 err = ERR_PTR(-EINVAL);
368 goto outdomain;
369 }
370 }
371 buf->iova[i] = iova_start;
Laura Abbotte956cce2011-10-25 13:33:20 -0700372
373 if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X)
374 msm_iommu_map_extra
375 (d, temp_va, length, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376 }
377
378 }
379
380 node->buf = buf;
381 node->subsystems = subsys_ids;
Laura Abbotte956cce2011-10-25 13:33:20 -0700382 node->length = map_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 node->nsubsys = nsubsys;
384
385 if (add_buffer(node)) {
386 err = ERR_PTR(-EINVAL);
387 goto outiova;
388 }
389
390 return buf;
391
392outiova:
393 if (flags & MSM_SUBSYSTEM_MAP_IOVA)
Laura Abbott11962582011-08-02 16:29:21 -0700394 iommu_unmap(d, temp_va, get_order(SZ_4K));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395outdomain:
396 if (flags & MSM_SUBSYSTEM_MAP_IOVA) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700397 /* Unmap the rest of the current domain, i */
Laura Abbott11962582011-08-02 16:29:21 -0700398 for (j -= SZ_4K, temp_va -= SZ_4K;
399 j > 0; temp_va -= SZ_4K, j -= SZ_4K)
400 iommu_unmap(d, temp_va, get_order(SZ_4K));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700402 /* Unmap all the other domains */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700403 for (i--; i >= 0; i--) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700404 unsigned int domain_no, partition_no;
405 if (!msm_use_iommu())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406 continue;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700407 domain_no = msm_subsystem_get_domain_no(subsys_ids[i]);
408 partition_no = msm_subsystem_get_partition_no(
409 subsys_ids[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410
411 temp_va = buf->iova[i];
Laura Abbott11962582011-08-02 16:29:21 -0700412 for (j = length; j > 0; j -= SZ_4K,
413 temp_va += SZ_4K)
414 iommu_unmap(d, temp_va, get_order(SZ_4K));
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700415 msm_free_iova_address(buf->iova[i], domain_no,
416 partition_no, length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417 }
418
419 kfree(buf->iova);
420 }
421
422outremovephys:
423 if (flags & MSM_SUBSYSTEM_MAP_KADDR)
424 remove_buffer_phys(node);
425outiounmap:
426 if (flags & MSM_SUBSYSTEM_MAP_KADDR)
427 iounmap(buf->vaddr);
428outkfreenode:
429 kfree(node);
430outkfreebuf:
431 kfree(buf);
432outret:
433 return err;
434}
435EXPORT_SYMBOL(msm_subsystem_map_buffer);
436
437int msm_subsystem_unmap_buffer(struct msm_mapped_buffer *buf)
438{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 struct msm_buffer_node *node;
440 int i, j, ret;
441 unsigned long temp_va;
442
443 if (buf->vaddr)
444 node = find_buffer(buf->vaddr);
445 else
446 node = find_buffer(buf);
447
448 if (!node)
449 goto out;
450
451 if (node->buf != buf) {
452 pr_err("%s: caller must pass in the same buffer structure"
453 " returned from map_buffer when freeding\n", __func__);
454 goto out;
455 }
456
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 if (buf->iova) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700458 if (msm_use_iommu())
459 for (i = 0; i < node->nsubsys; i++) {
460 struct iommu_domain *subsys_domain;
461 unsigned int domain_no, partition_no;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700463 subsys_domain = msm_get_iommu_domain(
464 msm_subsystem_get_domain_no(
465 node->subsystems[i]));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700467 domain_no = msm_subsystem_get_domain_no(
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468 node->subsystems[i]);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700469 partition_no = msm_subsystem_get_partition_no(
470 node->subsystems[i]);
471
472 temp_va = buf->iova[i];
473 for (j = node->length; j > 0; j -= SZ_4K,
474 temp_va += SZ_4K) {
475 ret = iommu_unmap(subsys_domain,
476 temp_va,
Laura Abbott11962582011-08-02 16:29:21 -0700477 get_order(SZ_4K));
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700478 WARN(ret, "iommu_unmap returned a "
479 " non-zero value.\n");
480 }
481 msm_free_iova_address(buf->iova[i], domain_no,
482 partition_no, node->length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 kfree(buf->iova);
485
486 }
487
488 if (buf->vaddr) {
489 remove_buffer_phys(node);
490 iounmap(buf->vaddr);
491 }
492
493 remove_buffer(node);
494 kfree(node);
495 kfree(buf);
496
497 return 0;
498out:
499 return -EINVAL;
500}
501EXPORT_SYMBOL(msm_subsystem_unmap_buffer);