blob: 76e10c8033e6356542c957433cd18b5ffc69e380 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Olav Haugan0a852512012-01-09 10:20:55 -08005 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/anon_inodes.h>
22#include <linux/ion.h>
23#include <linux/list.h>
24#include <linux/miscdevice.h>
25#include <linux/mm.h>
26#include <linux/mm_types.h>
27#include <linux/rbtree.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/seq_file.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33
Laura Abbott8c017362011-09-22 20:59:12 -070034#include <mach/iommu_domains.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070035#include "ion_priv.h"
36#define DEBUG
37
38/**
39 * struct ion_device - the metadata of the ion device node
40 * @dev: the actual misc device
41 * @buffers: an rb tree of all the existing buffers
42 * @lock: lock protecting the buffers & heaps trees
43 * @heaps: list of all the heaps in the system
44 * @user_clients: list of all the clients created from userspace
45 */
46struct ion_device {
47 struct miscdevice dev;
48 struct rb_root buffers;
49 struct mutex lock;
50 struct rb_root heaps;
51 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
52 unsigned long arg);
53 struct rb_root user_clients;
54 struct rb_root kernel_clients;
55 struct dentry *debug_root;
56};
57
58/**
59 * struct ion_client - a process/hw block local address space
60 * @ref: for reference counting the client
61 * @node: node in the tree of all clients
62 * @dev: backpointer to ion device
63 * @handles: an rb tree of all the handles in this client
64 * @lock: lock protecting the tree of handles
65 * @heap_mask: mask of all supported heaps
66 * @name: used for debugging
67 * @task: used for debugging
68 *
69 * A client represents a list of buffers this client may access.
70 * The mutex stored here is used to protect both handles tree
71 * as well as the handles themselves, and should be held while modifying either.
72 */
73struct ion_client {
74 struct kref ref;
75 struct rb_node node;
76 struct ion_device *dev;
77 struct rb_root handles;
78 struct mutex lock;
79 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080080 char *name;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070081 struct task_struct *task;
82 pid_t pid;
83 struct dentry *debug_root;
84};
85
86/**
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070087 * ion_handle - a client local reference to a buffer
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070088 * @ref: reference count
89 * @client: back pointer to the client the buffer resides in
90 * @buffer: pointer to the buffer
91 * @node: node in the client's handle rbtree
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070092 * @kmap_cnt: count of times this client has mapped to kernel
93 * @dmap_cnt: count of times this client has mapped for dma
94 * @usermap_cnt: count of times this client has mapped for userspace
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070095 *
96 * Modifications to node, map_cnt or mapping should be protected by the
97 * lock in the client. Other fields are never changed after initialization.
98 */
99struct ion_handle {
100 struct kref ref;
101 struct ion_client *client;
102 struct ion_buffer *buffer;
103 struct rb_node node;
104 unsigned int kmap_cnt;
105 unsigned int dmap_cnt;
106 unsigned int usermap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700107 unsigned int iommu_map_cnt;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700108};
109
Laura Abbott8c017362011-09-22 20:59:12 -0700110static int ion_validate_buffer_flags(struct ion_buffer *buffer,
111 unsigned long flags)
112{
113 if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt ||
114 buffer->iommu_map_cnt) {
115 if (buffer->flags != flags) {
116 pr_err("%s: buffer was already mapped with flags %lx,"
117 " cannot map with flags %lx\n", __func__,
118 buffer->flags, flags);
119 return 1;
120 }
121
122 } else {
123 buffer->flags = flags;
124 }
125 return 0;
126}
127
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700128/* this function should only be called while dev->lock is held */
129static void ion_buffer_add(struct ion_device *dev,
130 struct ion_buffer *buffer)
131{
132 struct rb_node **p = &dev->buffers.rb_node;
133 struct rb_node *parent = NULL;
134 struct ion_buffer *entry;
135
136 while (*p) {
137 parent = *p;
138 entry = rb_entry(parent, struct ion_buffer, node);
139
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700140 if (buffer < entry) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700141 p = &(*p)->rb_left;
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700142 } else if (buffer > entry) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700143 p = &(*p)->rb_right;
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700144 } else {
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700145 pr_err("%s: buffer already found.", __func__);
146 BUG();
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700147 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700148 }
149
150 rb_link_node(&buffer->node, parent, p);
151 rb_insert_color(&buffer->node, &dev->buffers);
152}
153
Olav Haugan0fa9b602012-01-25 11:50:38 -0800154static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700155 struct ion_iommu_map *iommu)
156{
157 struct rb_node **p = &buffer->iommu_maps.rb_node;
158 struct rb_node *parent = NULL;
159 struct ion_iommu_map *entry;
160
161 while (*p) {
162 parent = *p;
163 entry = rb_entry(parent, struct ion_iommu_map, node);
164
165 if (iommu->key < entry->key) {
166 p = &(*p)->rb_left;
167 } else if (iommu->key > entry->key) {
168 p = &(*p)->rb_right;
169 } else {
170 pr_err("%s: buffer %p already has mapping for domain %d"
171 " and partition %d\n", __func__,
172 buffer,
173 iommu_map_domain(iommu),
174 iommu_map_partition(iommu));
175 BUG();
176 }
177 }
178
179 rb_link_node(&iommu->node, parent, p);
180 rb_insert_color(&iommu->node, &buffer->iommu_maps);
181
182}
183
184static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
185 unsigned int domain_no,
186 unsigned int partition_no)
187{
188 struct rb_node **p = &buffer->iommu_maps.rb_node;
189 struct rb_node *parent = NULL;
190 struct ion_iommu_map *entry;
191 uint64_t key = domain_no;
192 key = key << 32 | partition_no;
193
194 while (*p) {
195 parent = *p;
196 entry = rb_entry(parent, struct ion_iommu_map, node);
197
198 if (key < entry->key)
199 p = &(*p)->rb_left;
200 else if (key > entry->key)
201 p = &(*p)->rb_right;
202 else
203 return entry;
204 }
205
206 return NULL;
207}
208
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700209/* this function should only be called while dev->lock is held */
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700210static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700211 struct ion_device *dev,
212 unsigned long len,
213 unsigned long align,
214 unsigned long flags)
215{
216 struct ion_buffer *buffer;
217 int ret;
218
219 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
220 if (!buffer)
221 return ERR_PTR(-ENOMEM);
222
223 buffer->heap = heap;
224 kref_init(&buffer->ref);
225
226 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700227 if (ret) {
228 kfree(buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700229 return ERR_PTR(ret);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700230 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700231 buffer->dev = dev;
232 buffer->size = len;
233 mutex_init(&buffer->lock);
234 ion_buffer_add(dev, buffer);
235 return buffer;
236}
237
238static void ion_buffer_destroy(struct kref *kref)
239{
240 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
241 struct ion_device *dev = buffer->dev;
242
243 buffer->heap->ops->free(buffer);
244 mutex_lock(&dev->lock);
245 rb_erase(&buffer->node, &dev->buffers);
246 mutex_unlock(&dev->lock);
247 kfree(buffer);
248}
249
250static void ion_buffer_get(struct ion_buffer *buffer)
251{
252 kref_get(&buffer->ref);
253}
254
255static int ion_buffer_put(struct ion_buffer *buffer)
256{
257 return kref_put(&buffer->ref, ion_buffer_destroy);
258}
259
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700260static struct ion_handle *ion_handle_create(struct ion_client *client,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700261 struct ion_buffer *buffer)
262{
263 struct ion_handle *handle;
264
265 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
266 if (!handle)
267 return ERR_PTR(-ENOMEM);
268 kref_init(&handle->ref);
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700269 rb_init_node(&handle->node);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700270 handle->client = client;
271 ion_buffer_get(buffer);
272 handle->buffer = buffer;
273
274 return handle;
275}
276
Laura Abbottec149ff2012-01-26 13:33:11 -0800277/* Client lock must be locked when calling */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700278static void ion_handle_destroy(struct kref *kref)
279{
280 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
281 /* XXX Can a handle be destroyed while it's map count is non-zero?:
282 if (handle->map_cnt) unmap
283 */
Laura Abbottd2a87372011-10-20 17:53:49 -0700284 WARN_ON(handle->kmap_cnt || handle->dmap_cnt || handle->usermap_cnt);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700285 ion_buffer_put(handle->buffer);
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700286 if (!RB_EMPTY_NODE(&handle->node))
287 rb_erase(&handle->node, &handle->client->handles);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700288 kfree(handle);
289}
290
291struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
292{
293 return handle->buffer;
294}
295
296static void ion_handle_get(struct ion_handle *handle)
297{
298 kref_get(&handle->ref);
299}
300
301static int ion_handle_put(struct ion_handle *handle)
302{
303 return kref_put(&handle->ref, ion_handle_destroy);
304}
305
306static struct ion_handle *ion_handle_lookup(struct ion_client *client,
307 struct ion_buffer *buffer)
308{
309 struct rb_node *n;
310
311 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
312 struct ion_handle *handle = rb_entry(n, struct ion_handle,
313 node);
314 if (handle->buffer == buffer)
315 return handle;
316 }
317 return NULL;
318}
319
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700320static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700321{
322 struct rb_node *n = client->handles.rb_node;
323
324 while (n) {
325 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
326 node);
327 if (handle < handle_node)
328 n = n->rb_left;
329 else if (handle > handle_node)
330 n = n->rb_right;
331 else
332 return true;
333 }
334 return false;
335}
336
337static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
338{
339 struct rb_node **p = &client->handles.rb_node;
340 struct rb_node *parent = NULL;
341 struct ion_handle *entry;
342
343 while (*p) {
344 parent = *p;
345 entry = rb_entry(parent, struct ion_handle, node);
346
347 if (handle < entry)
348 p = &(*p)->rb_left;
349 else if (handle > entry)
350 p = &(*p)->rb_right;
351 else
352 WARN(1, "%s: buffer already found.", __func__);
353 }
354
355 rb_link_node(&handle->node, parent, p);
356 rb_insert_color(&handle->node, &client->handles);
357}
358
359struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
360 size_t align, unsigned int flags)
361{
362 struct rb_node *n;
363 struct ion_handle *handle;
364 struct ion_device *dev = client->dev;
365 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800366 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800367 const unsigned int MAX_DBG_STR_LEN = 64;
368 char dbg_str[MAX_DBG_STR_LEN];
369 unsigned int dbg_str_idx = 0;
370
371 dbg_str[0] = '\0';
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700372
373 /*
374 * traverse the list of heaps available in this system in priority
375 * order. If the heap type is supported by the client, and matches the
376 * request of the caller allocate from it. Repeat until allocate has
377 * succeeded or all heaps have been tried
378 */
379 mutex_lock(&dev->lock);
380 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
381 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
382 /* if the client doesn't support this heap type */
383 if (!((1 << heap->type) & client->heap_mask))
384 continue;
385 /* if the caller didn't specify this heap type */
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700386 if (!((1 << heap->id) & flags))
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700387 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800388 /* Do not allow un-secure heap if secure is specified */
389 if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP))
390 continue;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700391 buffer = ion_buffer_create(heap, dev, len, align, flags);
392 if (!IS_ERR_OR_NULL(buffer))
393 break;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800394 if (dbg_str_idx < MAX_DBG_STR_LEN) {
395 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
396 int ret_value = snprintf(&dbg_str[dbg_str_idx],
397 len_left, "%s ", heap->name);
398 if (ret_value >= len_left) {
399 /* overflow */
400 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
401 dbg_str_idx = MAX_DBG_STR_LEN;
402 } else if (ret_value >= 0) {
403 dbg_str_idx += ret_value;
404 } else {
405 /* error */
406 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
407 }
408 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700409 }
410 mutex_unlock(&dev->lock);
411
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800412 if (IS_ERR_OR_NULL(buffer)) {
413 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
414 "0x%x) from heap(s) %sfor client %s with heap "
415 "mask 0x%x\n",
416 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700417 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800418 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700419
420 handle = ion_handle_create(client, buffer);
421
422 if (IS_ERR_OR_NULL(handle))
423 goto end;
424
425 /*
426 * ion_buffer_create will create a buffer with a ref_cnt of 1,
427 * and ion_handle_create will take a second reference, drop one here
428 */
429 ion_buffer_put(buffer);
430
431 mutex_lock(&client->lock);
432 ion_handle_add(client, handle);
433 mutex_unlock(&client->lock);
434 return handle;
435
436end:
437 ion_buffer_put(buffer);
438 return handle;
439}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800440EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700441
442void ion_free(struct ion_client *client, struct ion_handle *handle)
443{
Rebecca Schultz Zavinc72866d2011-07-07 17:07:56 -0700444 bool valid_handle;
445
446 BUG_ON(client != handle->client);
447
448 mutex_lock(&client->lock);
449 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinc72866d2011-07-07 17:07:56 -0700450 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800451 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc72866d2011-07-07 17:07:56 -0700452 WARN("%s: invalid handle passed to free.\n", __func__);
453 return;
454 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700455 ion_handle_put(handle);
Laura Abbottec149ff2012-01-26 13:33:11 -0800456 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700457}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800458EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700459
460static void ion_client_get(struct ion_client *client);
461static int ion_client_put(struct ion_client *client);
462
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700463static bool _ion_map(int *buffer_cnt, int *handle_cnt)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700464{
465 bool map;
466
467 BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
468
469 if (*buffer_cnt)
470 map = false;
471 else
472 map = true;
473 if (*handle_cnt == 0)
474 (*buffer_cnt)++;
475 (*handle_cnt)++;
476 return map;
477}
478
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700479static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700480{
481 BUG_ON(*handle_cnt == 0);
482 (*handle_cnt)--;
483 if (*handle_cnt != 0)
484 return false;
485 BUG_ON(*buffer_cnt == 0);
486 (*buffer_cnt)--;
487 if (*buffer_cnt == 0)
488 return true;
489 return false;
490}
491
492int ion_phys(struct ion_client *client, struct ion_handle *handle,
493 ion_phys_addr_t *addr, size_t *len)
494{
495 struct ion_buffer *buffer;
496 int ret;
497
498 mutex_lock(&client->lock);
499 if (!ion_handle_validate(client, handle)) {
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700500 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700501 return -EINVAL;
502 }
503
504 buffer = handle->buffer;
505
506 if (!buffer->heap->ops->phys) {
507 pr_err("%s: ion_phys is not implemented by this heap.\n",
508 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700509 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700510 return -ENODEV;
511 }
512 mutex_unlock(&client->lock);
513 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
514 return ret;
515}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800516EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700517
Laura Abbott894fd582011-08-19 13:33:56 -0700518void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
519 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700520{
521 struct ion_buffer *buffer;
522 void *vaddr;
523
524 mutex_lock(&client->lock);
525 if (!ion_handle_validate(client, handle)) {
526 pr_err("%s: invalid handle passed to map_kernel.\n",
527 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700528 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700529 return ERR_PTR(-EINVAL);
530 }
531
532 buffer = handle->buffer;
533 mutex_lock(&buffer->lock);
534
535 if (!handle->buffer->heap->ops->map_kernel) {
536 pr_err("%s: map_kernel is not implemented by this heap.\n",
537 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700538 mutex_unlock(&buffer->lock);
539 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700540 return ERR_PTR(-ENODEV);
541 }
542
Laura Abbott8c017362011-09-22 20:59:12 -0700543 if (ion_validate_buffer_flags(buffer, flags)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700544 vaddr = ERR_PTR(-EEXIST);
545 goto out;
Laura Abbott894fd582011-08-19 13:33:56 -0700546 }
547
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700548 if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700549 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer,
550 flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700551 if (IS_ERR_OR_NULL(vaddr))
552 _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
553 buffer->vaddr = vaddr;
554 } else {
555 vaddr = buffer->vaddr;
556 }
Laura Abbott894fd582011-08-19 13:33:56 -0700557
558out:
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700559 mutex_unlock(&buffer->lock);
560 mutex_unlock(&client->lock);
561 return vaddr;
562}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800563EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700564
Olav Haugan0fa9b602012-01-25 11:50:38 -0800565static int __ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700566 int domain_num, int partition_num, unsigned long align,
567 unsigned long iova_length, unsigned long flags,
568 unsigned long *iova)
569{
570 struct ion_iommu_map *data;
571 int ret;
572
573 data = kmalloc(sizeof(*data), GFP_ATOMIC);
574
575 if (!data)
576 return -ENOMEM;
577
578 data->buffer = buffer;
579 iommu_map_domain(data) = domain_num;
580 iommu_map_partition(data) = partition_num;
581
582 ret = buffer->heap->ops->map_iommu(buffer, data,
583 domain_num,
584 partition_num,
585 align,
586 iova_length,
587 flags);
588
589 if (ret)
590 goto out;
591
592 kref_init(&data->ref);
593 *iova = data->iova_addr;
594
595 ion_iommu_add(buffer, data);
596
597 return 0;
598
599out:
600 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
601 buffer->size);
602 kfree(data);
603 return ret;
604}
605
606int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
607 int domain_num, int partition_num, unsigned long align,
608 unsigned long iova_length, unsigned long *iova,
609 unsigned long *buffer_size,
610 unsigned long flags)
611{
612 struct ion_buffer *buffer;
613 struct ion_iommu_map *iommu_map;
614 int ret = 0;
615
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800616 if (ION_IS_CACHED(flags)) {
617 pr_err("%s: Cannot map iommu as cached.\n", __func__);
618 return -EINVAL;
619 }
620
Laura Abbott8c017362011-09-22 20:59:12 -0700621 mutex_lock(&client->lock);
622 if (!ion_handle_validate(client, handle)) {
623 pr_err("%s: invalid handle passed to map_kernel.\n",
624 __func__);
625 mutex_unlock(&client->lock);
626 return -EINVAL;
627 }
628
629 buffer = handle->buffer;
630 mutex_lock(&buffer->lock);
631
632 if (!handle->buffer->heap->ops->map_iommu) {
633 pr_err("%s: map_iommu is not implemented by this heap.\n",
634 __func__);
635 ret = -ENODEV;
636 goto out;
637 }
638
Laura Abbott8c017362011-09-22 20:59:12 -0700639 /*
640 * If clients don't want a custom iova length, just use whatever
641 * the buffer size is
642 */
643 if (!iova_length)
644 iova_length = buffer->size;
645
646 if (buffer->size > iova_length) {
647 pr_debug("%s: iova length %lx is not at least buffer size"
648 " %x\n", __func__, iova_length, buffer->size);
649 ret = -EINVAL;
650 goto out;
651 }
652
653 if (buffer->size & ~PAGE_MASK) {
654 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
655 buffer->size, PAGE_SIZE);
656 ret = -EINVAL;
657 goto out;
658 }
659
660 if (iova_length & ~PAGE_MASK) {
661 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
662 iova_length, PAGE_SIZE);
663 ret = -EINVAL;
664 goto out;
665 }
666
667 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
668 if (_ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt) ||
669 !iommu_map) {
670 ret = __ion_iommu_map(buffer, domain_num, partition_num, align,
671 iova_length, flags, iova);
672 if (ret < 0)
673 _ion_unmap(&buffer->iommu_map_cnt,
674 &handle->iommu_map_cnt);
675 } else {
676 if (iommu_map->mapped_size != iova_length) {
677 pr_err("%s: handle %p is already mapped with length"
678 " %x, trying to map with length %lx\n",
679 __func__, handle, iommu_map->mapped_size,
680 iova_length);
681 _ion_unmap(&buffer->iommu_map_cnt,
682 &handle->iommu_map_cnt);
683 ret = -EINVAL;
684 } else {
685 kref_get(&iommu_map->ref);
686 *iova = iommu_map->iova_addr;
687 }
688 }
689 *buffer_size = buffer->size;
690out:
691 mutex_unlock(&buffer->lock);
692 mutex_unlock(&client->lock);
693 return ret;
694}
695EXPORT_SYMBOL(ion_map_iommu);
696
697static void ion_iommu_release(struct kref *kref)
698{
699 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
700 ref);
701 struct ion_buffer *buffer = map->buffer;
702
703 rb_erase(&map->node, &buffer->iommu_maps);
704 buffer->heap->ops->unmap_iommu(map);
705 kfree(map);
706}
707
708void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
709 int domain_num, int partition_num)
710{
711 struct ion_iommu_map *iommu_map;
712 struct ion_buffer *buffer;
713
714 mutex_lock(&client->lock);
715 buffer = handle->buffer;
716
717 mutex_lock(&buffer->lock);
718
719 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
720
721 if (!iommu_map) {
722 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
723 domain_num, partition_num, buffer);
724 goto out;
725 }
726
727 _ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
728 kref_put(&iommu_map->ref, ion_iommu_release);
729
730out:
731 mutex_unlock(&buffer->lock);
732
733 mutex_unlock(&client->lock);
734
735}
736EXPORT_SYMBOL(ion_unmap_iommu);
737
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700738struct scatterlist *ion_map_dma(struct ion_client *client,
Laura Abbott894fd582011-08-19 13:33:56 -0700739 struct ion_handle *handle,
740 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700741{
742 struct ion_buffer *buffer;
743 struct scatterlist *sglist;
744
745 mutex_lock(&client->lock);
746 if (!ion_handle_validate(client, handle)) {
747 pr_err("%s: invalid handle passed to map_dma.\n",
748 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700749 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700750 return ERR_PTR(-EINVAL);
751 }
752 buffer = handle->buffer;
753 mutex_lock(&buffer->lock);
754
755 if (!handle->buffer->heap->ops->map_dma) {
756 pr_err("%s: map_kernel is not implemented by this heap.\n",
757 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700758 mutex_unlock(&buffer->lock);
759 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700760 return ERR_PTR(-ENODEV);
761 }
Laura Abbott894fd582011-08-19 13:33:56 -0700762
Laura Abbott8c017362011-09-22 20:59:12 -0700763 if (ion_validate_buffer_flags(buffer, flags)) {
764 sglist = ERR_PTR(-EEXIST);
765 goto out;
Laura Abbott894fd582011-08-19 13:33:56 -0700766 }
767
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700768 if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
769 sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
770 if (IS_ERR_OR_NULL(sglist))
771 _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
772 buffer->sglist = sglist;
773 } else {
774 sglist = buffer->sglist;
775 }
Laura Abbott894fd582011-08-19 13:33:56 -0700776
777out:
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700778 mutex_unlock(&buffer->lock);
779 mutex_unlock(&client->lock);
780 return sglist;
781}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800782EXPORT_SYMBOL(ion_map_dma);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700783
784void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
785{
786 struct ion_buffer *buffer;
787
788 mutex_lock(&client->lock);
789 buffer = handle->buffer;
790 mutex_lock(&buffer->lock);
791 if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
792 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
793 buffer->vaddr = NULL;
794 }
795 mutex_unlock(&buffer->lock);
796 mutex_unlock(&client->lock);
797}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800798EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700799
800void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
801{
802 struct ion_buffer *buffer;
803
804 mutex_lock(&client->lock);
805 buffer = handle->buffer;
806 mutex_lock(&buffer->lock);
807 if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
808 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
809 buffer->sglist = NULL;
810 }
811 mutex_unlock(&buffer->lock);
812 mutex_unlock(&client->lock);
813}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800814EXPORT_SYMBOL(ion_unmap_dma);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700815
816struct ion_buffer *ion_share(struct ion_client *client,
817 struct ion_handle *handle)
818{
Rebecca Schultz Zavinc72866d2011-07-07 17:07:56 -0700819 bool valid_handle;
820
821 mutex_lock(&client->lock);
822 valid_handle = ion_handle_validate(client, handle);
823 mutex_unlock(&client->lock);
824 if (!valid_handle) {
825 WARN("%s: invalid handle passed to share.\n", __func__);
826 return ERR_PTR(-EINVAL);
827 }
828
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700829 /* do not take an extra reference here, the burden is on the caller
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700830 * to make sure the buffer doesn't go away while it's passing it
831 * to another client -- ion_free should not be called on this handle
832 * until the buffer has been imported into the other client
833 */
834 return handle->buffer;
835}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800836EXPORT_SYMBOL(ion_share);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700837
838struct ion_handle *ion_import(struct ion_client *client,
839 struct ion_buffer *buffer)
840{
841 struct ion_handle *handle = NULL;
842
843 mutex_lock(&client->lock);
844 /* if a handle exists for this buffer just take a reference to it */
845 handle = ion_handle_lookup(client, buffer);
846 if (!IS_ERR_OR_NULL(handle)) {
847 ion_handle_get(handle);
848 goto end;
849 }
850 handle = ion_handle_create(client, buffer);
851 if (IS_ERR_OR_NULL(handle))
852 goto end;
853 ion_handle_add(client, handle);
854end:
855 mutex_unlock(&client->lock);
856 return handle;
857}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800858EXPORT_SYMBOL(ion_import);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700859
Laura Abbottabcb6f72011-10-04 16:26:49 -0700860static int check_vaddr_bounds(unsigned long start, unsigned long end)
861{
862 struct mm_struct *mm = current->active_mm;
863 struct vm_area_struct *vma;
864 int ret = 1;
865
866 if (end < start)
867 goto out;
868
869 down_read(&mm->mmap_sem);
870 vma = find_vma(mm, start);
871 if (vma && vma->vm_start < end) {
872 if (start < vma->vm_start)
873 goto out_up;
874 if (end > vma->vm_end)
875 goto out_up;
876 ret = 0;
877 }
878
879out_up:
880 up_read(&mm->mmap_sem);
881out:
882 return ret;
883}
884
Olav Haugan0fa9b602012-01-25 11:50:38 -0800885static int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700886 void *uaddr, unsigned long offset, unsigned long len,
887 unsigned int cmd)
888{
889 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700890 int ret = -EINVAL;
891
892 mutex_lock(&client->lock);
893 if (!ion_handle_validate(client, handle)) {
894 pr_err("%s: invalid handle passed to do_cache_op.\n",
895 __func__);
896 mutex_unlock(&client->lock);
897 return -EINVAL;
898 }
899 buffer = handle->buffer;
900 mutex_lock(&buffer->lock);
901
Laura Abbottcbaa6682011-10-19 12:14:14 -0700902 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700903 ret = 0;
904 goto out;
905 }
906
907 if (!handle->buffer->heap->ops->cache_op) {
908 pr_err("%s: cache_op is not implemented by this heap.\n",
909 __func__);
910 ret = -ENODEV;
911 goto out;
912 }
913
Laura Abbottabcb6f72011-10-04 16:26:49 -0700914
915 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
916 offset, len, cmd);
917
918out:
919 mutex_unlock(&buffer->lock);
920 mutex_unlock(&client->lock);
921 return ret;
922
923}
924
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700925static const struct file_operations ion_share_fops;
926
927struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
928{
929 struct file *file = fget(fd);
930 struct ion_handle *handle;
931
932 if (!file) {
933 pr_err("%s: imported fd not found in file table.\n", __func__);
934 return ERR_PTR(-EINVAL);
935 }
936 if (file->f_op != &ion_share_fops) {
Laura Abbott084d6eb2011-10-24 19:09:50 -0700937 pr_err("%s: imported file %s is not a shared ion"
938 " file.", __func__, file->f_dentry->d_name.name);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700939 handle = ERR_PTR(-EINVAL);
940 goto end;
941 }
942 handle = ion_import(client, file->private_data);
943end:
944 fput(file);
945 return handle;
946}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800947EXPORT_SYMBOL(ion_import_fd);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700948
949static int ion_debug_client_show(struct seq_file *s, void *unused)
950{
951 struct ion_client *client = s->private;
952 struct rb_node *n;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700953
Laura Abbott68c80642011-10-21 17:32:27 -0700954 seq_printf(s, "%16.16s: %16.16s : %16.16s : %16.16s\n", "heap_name",
955 "size_in_bytes", "handle refcount", "buffer");
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700956 mutex_lock(&client->lock);
957 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
958 struct ion_handle *handle = rb_entry(n, struct ion_handle,
959 node);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700960
Laura Abbott8747bbe2011-10-31 14:18:13 -0700961 seq_printf(s, "%16.16s: %16x : %16d : %16p\n",
Laura Abbott68c80642011-10-21 17:32:27 -0700962 handle->buffer->heap->name,
963 handle->buffer->size,
964 atomic_read(&handle->ref.refcount),
965 handle->buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700966 }
Laura Abbott68c80642011-10-21 17:32:27 -0700967
968 seq_printf(s, "%16.16s %d\n", "client refcount:",
969 atomic_read(&client->ref.refcount));
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700970 mutex_unlock(&client->lock);
971
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700972 return 0;
973}
974
975static int ion_debug_client_open(struct inode *inode, struct file *file)
976{
977 return single_open(file, ion_debug_client_show, inode->i_private);
978}
979
980static const struct file_operations debug_client_fops = {
981 .open = ion_debug_client_open,
982 .read = seq_read,
983 .llseek = seq_lseek,
984 .release = single_release,
985};
986
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700987static struct ion_client *ion_client_lookup(struct ion_device *dev,
988 struct task_struct *task)
989{
990 struct rb_node *n = dev->user_clients.rb_node;
991 struct ion_client *client;
992
993 mutex_lock(&dev->lock);
994 while (n) {
995 client = rb_entry(n, struct ion_client, node);
996 if (task == client->task) {
997 ion_client_get(client);
998 mutex_unlock(&dev->lock);
999 return client;
1000 } else if (task < client->task) {
1001 n = n->rb_left;
1002 } else if (task > client->task) {
1003 n = n->rb_right;
1004 }
1005 }
1006 mutex_unlock(&dev->lock);
1007 return NULL;
1008}
1009
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001010struct ion_client *ion_client_create(struct ion_device *dev,
1011 unsigned int heap_mask,
1012 const char *name)
1013{
1014 struct ion_client *client;
1015 struct task_struct *task;
1016 struct rb_node **p;
1017 struct rb_node *parent = NULL;
1018 struct ion_client *entry;
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -07001019 pid_t pid;
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001020 unsigned int name_len = strnlen(name, 64);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001021
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001022 get_task_struct(current->group_leader);
1023 task_lock(current->group_leader);
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -07001024 pid = task_pid_nr(current->group_leader);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001025 /* don't bother to store task struct for kernel threads,
1026 they can't be killed anyway */
1027 if (current->group_leader->flags & PF_KTHREAD) {
1028 put_task_struct(current->group_leader);
1029 task = NULL;
1030 } else {
1031 task = current->group_leader;
1032 }
1033 task_unlock(current->group_leader);
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -07001034
1035 /* if this isn't a kernel thread, see if a client already
1036 exists */
1037 if (task) {
1038 client = ion_client_lookup(dev, task);
1039 if (!IS_ERR_OR_NULL(client)) {
1040 put_task_struct(current->group_leader);
1041 return client;
1042 }
1043 }
1044
1045 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1046 if (!client) {
1047 put_task_struct(current->group_leader);
1048 return ERR_PTR(-ENOMEM);
1049 }
1050
1051 client->dev = dev;
1052 client->handles = RB_ROOT;
1053 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001054
Olav Haugan6625c7d12012-01-24 13:50:43 -08001055 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001056 if (!client->name) {
1057 put_task_struct(current->group_leader);
1058 kfree(client);
1059 return ERR_PTR(-ENOMEM);
1060 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -08001061 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001062 }
1063
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -07001064 client->heap_mask = heap_mask;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001065 client->task = task;
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -07001066 client->pid = pid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001067 kref_init(&client->ref);
1068
1069 mutex_lock(&dev->lock);
1070 if (task) {
1071 p = &dev->user_clients.rb_node;
1072 while (*p) {
1073 parent = *p;
1074 entry = rb_entry(parent, struct ion_client, node);
1075
1076 if (task < entry->task)
1077 p = &(*p)->rb_left;
1078 else if (task > entry->task)
1079 p = &(*p)->rb_right;
1080 }
1081 rb_link_node(&client->node, parent, p);
1082 rb_insert_color(&client->node, &dev->user_clients);
1083 } else {
1084 p = &dev->kernel_clients.rb_node;
1085 while (*p) {
1086 parent = *p;
1087 entry = rb_entry(parent, struct ion_client, node);
1088
1089 if (client < entry)
1090 p = &(*p)->rb_left;
1091 else if (client > entry)
1092 p = &(*p)->rb_right;
1093 }
1094 rb_link_node(&client->node, parent, p);
1095 rb_insert_color(&client->node, &dev->kernel_clients);
1096 }
1097
Laura Abbotteed86032011-12-05 15:32:36 -08001098
1099 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001100 dev->debug_root, client,
1101 &debug_client_fops);
1102 mutex_unlock(&dev->lock);
1103
1104 return client;
1105}
1106
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -07001107static void _ion_client_destroy(struct kref *kref)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001108{
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -07001109 struct ion_client *client = container_of(kref, struct ion_client, ref);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001110 struct ion_device *dev = client->dev;
1111 struct rb_node *n;
1112
1113 pr_debug("%s: %d\n", __func__, __LINE__);
1114 while ((n = rb_first(&client->handles))) {
1115 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1116 node);
1117 ion_handle_destroy(&handle->ref);
1118 }
1119 mutex_lock(&dev->lock);
1120 if (client->task) {
1121 rb_erase(&client->node, &dev->user_clients);
1122 put_task_struct(client->task);
1123 } else {
1124 rb_erase(&client->node, &dev->kernel_clients);
1125 }
1126 debugfs_remove_recursive(client->debug_root);
1127 mutex_unlock(&dev->lock);
1128
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001129 kfree(client->name);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001130 kfree(client);
1131}
1132
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001133static void ion_client_get(struct ion_client *client)
1134{
1135 kref_get(&client->ref);
1136}
1137
1138static int ion_client_put(struct ion_client *client)
1139{
1140 return kref_put(&client->ref, _ion_client_destroy);
1141}
1142
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -07001143void ion_client_destroy(struct ion_client *client)
1144{
Jordan Crousea75022c2011-10-12 16:57:47 -06001145 if (client)
1146 ion_client_put(client);
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -07001147}
Olav Hauganbd2b6922012-01-25 09:28:55 -08001148EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -07001149
Laura Abbott273dd8e2011-10-12 14:26:33 -07001150int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1151 unsigned long *flags)
1152{
1153 struct ion_buffer *buffer;
1154
1155 mutex_lock(&client->lock);
1156 if (!ion_handle_validate(client, handle)) {
1157 pr_err("%s: invalid handle passed to %s.\n",
1158 __func__, __func__);
1159 mutex_unlock(&client->lock);
1160 return -EINVAL;
1161 }
1162 buffer = handle->buffer;
1163 mutex_lock(&buffer->lock);
1164 *flags = buffer->flags;
1165 mutex_unlock(&buffer->lock);
1166 mutex_unlock(&client->lock);
1167
1168 return 0;
1169}
1170EXPORT_SYMBOL(ion_handle_get_flags);
1171
Laura Abbott8c017362011-09-22 20:59:12 -07001172int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1173 unsigned long *size)
1174{
1175 struct ion_buffer *buffer;
1176
1177 mutex_lock(&client->lock);
1178 if (!ion_handle_validate(client, handle)) {
1179 pr_err("%s: invalid handle passed to %s.\n",
1180 __func__, __func__);
1181 mutex_unlock(&client->lock);
1182 return -EINVAL;
1183 }
1184 buffer = handle->buffer;
1185 mutex_lock(&buffer->lock);
1186 *size = buffer->size;
1187 mutex_unlock(&buffer->lock);
1188 mutex_unlock(&client->lock);
1189
1190 return 0;
1191}
1192EXPORT_SYMBOL(ion_handle_get_size);
1193
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001194static int ion_share_release(struct inode *inode, struct file* file)
1195{
1196 struct ion_buffer *buffer = file->private_data;
1197
1198 pr_debug("%s: %d\n", __func__, __LINE__);
1199 /* drop the reference to the buffer -- this prevents the
1200 buffer from going away because the client holding it exited
1201 while it was being passed */
1202 ion_buffer_put(buffer);
1203 return 0;
1204}
1205
1206static void ion_vma_open(struct vm_area_struct *vma)
1207{
1208
1209 struct ion_buffer *buffer = vma->vm_file->private_data;
1210 struct ion_handle *handle = vma->vm_private_data;
1211 struct ion_client *client;
1212
1213 pr_debug("%s: %d\n", __func__, __LINE__);
1214 /* check that the client still exists and take a reference so
1215 it can't go away until this vma is closed */
1216 client = ion_client_lookup(buffer->dev, current->group_leader);
1217 if (IS_ERR_OR_NULL(client)) {
1218 vma->vm_private_data = NULL;
1219 return;
1220 }
Laura Abbott0f2175b2011-12-09 14:26:07 -08001221 ion_handle_get(handle);
Laura Abbott77168502011-12-05 11:06:24 -08001222 mutex_lock(&buffer->lock);
1223 buffer->umap_cnt++;
1224 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001225 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
1226 __func__, __LINE__,
1227 atomic_read(&client->ref.refcount),
1228 atomic_read(&handle->ref.refcount),
1229 atomic_read(&buffer->ref.refcount));
1230}
1231
1232static void ion_vma_close(struct vm_area_struct *vma)
1233{
1234 struct ion_handle *handle = vma->vm_private_data;
1235 struct ion_buffer *buffer = vma->vm_file->private_data;
1236 struct ion_client *client;
1237
1238 pr_debug("%s: %d\n", __func__, __LINE__);
1239 /* this indicates the client is gone, nothing to do here */
1240 if (!handle)
1241 return;
1242 client = handle->client;
Laura Abbott77168502011-12-05 11:06:24 -08001243 mutex_lock(&buffer->lock);
1244 buffer->umap_cnt--;
1245 mutex_unlock(&buffer->lock);
Laura Abbotta6835092011-11-14 15:27:02 -08001246
1247 if (buffer->heap->ops->unmap_user)
1248 buffer->heap->ops->unmap_user(buffer->heap, buffer);
1249
1250
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001251 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
1252 __func__, __LINE__,
1253 atomic_read(&client->ref.refcount),
1254 atomic_read(&handle->ref.refcount),
1255 atomic_read(&buffer->ref.refcount));
Laura Abbottec149ff2012-01-26 13:33:11 -08001256 mutex_lock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001257 ion_handle_put(handle);
Laura Abbottec149ff2012-01-26 13:33:11 -08001258 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001259 ion_client_put(client);
1260 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
1261 __func__, __LINE__,
1262 atomic_read(&client->ref.refcount),
1263 atomic_read(&handle->ref.refcount),
1264 atomic_read(&buffer->ref.refcount));
1265}
1266
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001267static struct vm_operations_struct ion_vm_ops = {
1268 .open = ion_vma_open,
1269 .close = ion_vma_close,
1270};
1271
1272static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
1273{
1274 struct ion_buffer *buffer = file->private_data;
1275 unsigned long size = vma->vm_end - vma->vm_start;
1276 struct ion_client *client;
1277 struct ion_handle *handle;
1278 int ret;
Laura Abbott894fd582011-08-19 13:33:56 -07001279 unsigned long flags = file->f_flags & O_DSYNC ?
1280 ION_SET_CACHE(UNCACHED) :
1281 ION_SET_CACHE(CACHED);
1282
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001283
1284 pr_debug("%s: %d\n", __func__, __LINE__);
1285 /* make sure the client still exists, it's possible for the client to
1286 have gone away but the map/share fd still to be around, take
1287 a reference to it so it can't go away while this mapping exists */
1288 client = ion_client_lookup(buffer->dev, current->group_leader);
1289 if (IS_ERR_OR_NULL(client)) {
1290 pr_err("%s: trying to mmap an ion handle in a process with no "
1291 "ion client\n", __func__);
1292 return -EINVAL;
1293 }
1294
1295 if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
1296 buffer->size)) {
1297 pr_err("%s: trying to map larger area than handle has available"
1298 "\n", __func__);
1299 ret = -EINVAL;
1300 goto err;
1301 }
1302
1303 /* find the handle and take a reference to it */
1304 handle = ion_import(client, buffer);
1305 if (IS_ERR_OR_NULL(handle)) {
1306 ret = -EINVAL;
1307 goto err;
1308 }
1309
1310 if (!handle->buffer->heap->ops->map_user) {
1311 pr_err("%s: this heap does not define a method for mapping "
1312 "to userspace\n", __func__);
1313 ret = -EINVAL;
1314 goto err1;
1315 }
1316
1317 mutex_lock(&buffer->lock);
Laura Abbott894fd582011-08-19 13:33:56 -07001318
Laura Abbott8c017362011-09-22 20:59:12 -07001319 if (ion_validate_buffer_flags(buffer, flags)) {
1320 ret = -EEXIST;
1321 mutex_unlock(&buffer->lock);
1322 goto err1;
Laura Abbott894fd582011-08-19 13:33:56 -07001323 }
Laura Abbott8c017362011-09-22 20:59:12 -07001324
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001325 /* now map it to userspace */
Laura Abbott894fd582011-08-19 13:33:56 -07001326 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma,
1327 flags);
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001328
1329 buffer->umap_cnt++;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001330 if (ret) {
1331 pr_err("%s: failure mapping buffer to userspace\n",
1332 __func__);
Laura Abbott894fd582011-08-19 13:33:56 -07001333 goto err2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001334 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001335 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001336
1337 vma->vm_ops = &ion_vm_ops;
1338 /* move the handle into the vm_private_data so we can access it from
1339 vma_open/close */
1340 vma->vm_private_data = handle;
1341 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
1342 __func__, __LINE__,
1343 atomic_read(&client->ref.refcount),
1344 atomic_read(&handle->ref.refcount),
1345 atomic_read(&buffer->ref.refcount));
1346 return 0;
1347
Laura Abbott894fd582011-08-19 13:33:56 -07001348err2:
1349 buffer->umap_cnt--;
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001350 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001351 /* drop the reference to the handle */
Laura Abbott894fd582011-08-19 13:33:56 -07001352err1:
Laura Abbottec149ff2012-01-26 13:33:11 -08001353 mutex_lock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001354 ion_handle_put(handle);
Laura Abbottec149ff2012-01-26 13:33:11 -08001355 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001356err:
Iliyan Malchev3fe24362011-08-09 14:42:08 -07001357 /* drop the reference to the client */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001358 ion_client_put(client);
1359 return ret;
1360}
1361
1362static const struct file_operations ion_share_fops = {
1363 .owner = THIS_MODULE,
1364 .release = ion_share_release,
1365 .mmap = ion_share_mmap,
1366};
1367
1368static int ion_ioctl_share(struct file *parent, struct ion_client *client,
1369 struct ion_handle *handle)
1370{
1371 int fd = get_unused_fd();
1372 struct file *file;
1373
1374 if (fd < 0)
1375 return -ENFILE;
1376
1377 file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
1378 handle->buffer, O_RDWR);
1379 if (IS_ERR_OR_NULL(file))
1380 goto err;
Laura Abbott4b5d0482011-09-27 18:35:14 -07001381
1382 if (parent->f_flags & O_DSYNC)
1383 file->f_flags |= O_DSYNC;
1384
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001385 ion_buffer_get(handle->buffer);
1386 fd_install(fd, file);
1387
1388 return fd;
1389
1390err:
1391 put_unused_fd(fd);
1392 return -ENFILE;
1393}
1394
1395static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1396{
1397 struct ion_client *client = filp->private_data;
1398
1399 switch (cmd) {
1400 case ION_IOC_ALLOC:
1401 {
1402 struct ion_allocation_data data;
1403
1404 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1405 return -EFAULT;
1406 data.handle = ion_alloc(client, data.len, data.align,
1407 data.flags);
Laura Abbotte1b9ce52011-11-11 18:31:39 -08001408
1409 if (IS_ERR_OR_NULL(data.handle))
Olav Hauganb06ee072011-12-13 15:31:41 -08001410 return -ENOMEM;
Laura Abbotte1b9ce52011-11-11 18:31:39 -08001411
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001412 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1413 return -EFAULT;
1414 break;
1415 }
1416 case ION_IOC_FREE:
1417 {
1418 struct ion_handle_data data;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001419 bool valid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001420
1421 if (copy_from_user(&data, (void __user *)arg,
1422 sizeof(struct ion_handle_data)))
1423 return -EFAULT;
1424 mutex_lock(&client->lock);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001425 valid = ion_handle_validate(client, data.handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001426 mutex_unlock(&client->lock);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001427 if (!valid)
1428 return -EINVAL;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001429 ion_free(client, data.handle);
1430 break;
1431 }
1432 case ION_IOC_MAP:
1433 case ION_IOC_SHARE:
1434 {
1435 struct ion_fd_data data;
1436
1437 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1438 return -EFAULT;
1439 mutex_lock(&client->lock);
1440 if (!ion_handle_validate(client, data.handle)) {
1441 pr_err("%s: invalid handle passed to share ioctl.\n",
1442 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001443 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001444 return -EINVAL;
1445 }
1446 data.fd = ion_ioctl_share(filp, client, data.handle);
1447 mutex_unlock(&client->lock);
1448 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1449 return -EFAULT;
1450 break;
1451 }
1452 case ION_IOC_IMPORT:
1453 {
1454 struct ion_fd_data data;
1455 if (copy_from_user(&data, (void __user *)arg,
1456 sizeof(struct ion_fd_data)))
1457 return -EFAULT;
1458
1459 data.handle = ion_import_fd(client, data.fd);
1460 if (IS_ERR(data.handle))
1461 data.handle = NULL;
1462 if (copy_to_user((void __user *)arg, &data,
1463 sizeof(struct ion_fd_data)))
1464 return -EFAULT;
1465 break;
1466 }
1467 case ION_IOC_CUSTOM:
1468 {
1469 struct ion_device *dev = client->dev;
1470 struct ion_custom_data data;
1471
1472 if (!dev->custom_ioctl)
1473 return -ENOTTY;
1474 if (copy_from_user(&data, (void __user *)arg,
1475 sizeof(struct ion_custom_data)))
1476 return -EFAULT;
1477 return dev->custom_ioctl(client, data.cmd, data.arg);
1478 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001479 case ION_IOC_CLEAN_CACHES:
1480 case ION_IOC_INV_CACHES:
1481 case ION_IOC_CLEAN_INV_CACHES:
1482 {
1483 struct ion_flush_data data;
Laura Abbott9fa29e82011-11-14 09:42:53 -08001484 unsigned long start, end;
Laura Abbotte80ea012011-11-18 18:36:47 -08001485 struct ion_handle *handle = NULL;
1486 int ret;
Laura Abbottabcb6f72011-10-04 16:26:49 -07001487
1488 if (copy_from_user(&data, (void __user *)arg,
1489 sizeof(struct ion_flush_data)))
1490 return -EFAULT;
1491
Laura Abbott9fa29e82011-11-14 09:42:53 -08001492 start = (unsigned long) data.vaddr;
1493 end = (unsigned long) data.vaddr + data.length;
1494
1495 if (check_vaddr_bounds(start, end)) {
1496 pr_err("%s: virtual address %p is out of bounds\n",
1497 __func__, data.vaddr);
1498 return -EINVAL;
1499 }
1500
Laura Abbotte80ea012011-11-18 18:36:47 -08001501 if (!data.handle) {
1502 handle = ion_import_fd(client, data.fd);
1503 if (IS_ERR_OR_NULL(handle)) {
1504 pr_info("%s: Could not import handle: %d\n",
1505 __func__, (int)handle);
1506 return -EINVAL;
1507 }
1508 }
1509
1510 ret = ion_do_cache_op(client,
1511 data.handle ? data.handle : handle,
1512 data.vaddr, data.offset, data.length,
1513 cmd);
1514
1515 if (!data.handle)
1516 ion_free(client, handle);
1517
1518 break;
Laura Abbottabcb6f72011-10-04 16:26:49 -07001519
1520 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001521 case ION_IOC_GET_FLAGS:
1522 {
1523 struct ion_flag_data data;
1524 int ret;
1525 if (copy_from_user(&data, (void __user *)arg,
1526 sizeof(struct ion_flag_data)))
1527 return -EFAULT;
1528
1529 ret = ion_handle_get_flags(client, data.handle, &data.flags);
1530 if (ret < 0)
1531 return ret;
1532 if (copy_to_user((void __user *)arg, &data,
1533 sizeof(struct ion_flag_data)))
1534 return -EFAULT;
1535 break;
1536 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001537 default:
1538 return -ENOTTY;
1539 }
1540 return 0;
1541}
1542
1543static int ion_release(struct inode *inode, struct file *file)
1544{
1545 struct ion_client *client = file->private_data;
1546
1547 pr_debug("%s: %d\n", __func__, __LINE__);
1548 ion_client_put(client);
1549 return 0;
1550}
1551
1552static int ion_open(struct inode *inode, struct file *file)
1553{
1554 struct miscdevice *miscdev = file->private_data;
1555 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1556 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001557 char debug_name[64];
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001558
1559 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001560 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1561 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin6d3b9582011-07-06 18:07:24 -07001562 if (IS_ERR_OR_NULL(client))
1563 return PTR_ERR(client);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001564 file->private_data = client;
1565
1566 return 0;
1567}
1568
1569static const struct file_operations ion_fops = {
1570 .owner = THIS_MODULE,
1571 .open = ion_open,
1572 .release = ion_release,
1573 .unlocked_ioctl = ion_ioctl,
1574};
1575
1576static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001577 enum ion_heap_ids id)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001578{
1579 size_t size = 0;
1580 struct rb_node *n;
1581
1582 mutex_lock(&client->lock);
1583 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1584 struct ion_handle *handle = rb_entry(n,
1585 struct ion_handle,
1586 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001587 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001588 size += handle->buffer->size;
1589 }
1590 mutex_unlock(&client->lock);
1591 return size;
1592}
1593
1594static int ion_debug_heap_show(struct seq_file *s, void *unused)
1595{
1596 struct ion_heap *heap = s->private;
1597 struct ion_device *dev = heap->dev;
1598 struct rb_node *n;
1599
1600 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1601 for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
1602 struct ion_client *client = rb_entry(n, struct ion_client,
1603 node);
1604 char task_comm[TASK_COMM_LEN];
Laura Abbott3647ac32011-10-31 14:09:53 -07001605 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001606 if (!size)
1607 continue;
1608
1609 get_task_comm(task_comm, client->task);
Laura Abbott8747bbe2011-10-31 14:18:13 -07001610 seq_printf(s, "%16.s %16u %16x\n", task_comm, client->pid,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001611 size);
1612 }
1613
1614 for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1615 struct ion_client *client = rb_entry(n, struct ion_client,
1616 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001617 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001618 if (!size)
1619 continue;
Laura Abbott8747bbe2011-10-31 14:18:13 -07001620 seq_printf(s, "%16.s %16u %16x\n", client->name, client->pid,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001621 size);
1622 }
Olav Haugan3d4fe1a2012-01-13 11:42:15 -08001623 if (heap->ops->print_debug)
1624 heap->ops->print_debug(heap, s);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001625 return 0;
1626}
1627
1628static int ion_debug_heap_open(struct inode *inode, struct file *file)
1629{
1630 return single_open(file, ion_debug_heap_show, inode->i_private);
1631}
1632
1633static const struct file_operations debug_heap_fops = {
1634 .open = ion_debug_heap_open,
1635 .read = seq_read,
1636 .llseek = seq_lseek,
1637 .release = single_release,
1638};
1639
1640void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1641{
1642 struct rb_node **p = &dev->heaps.rb_node;
1643 struct rb_node *parent = NULL;
1644 struct ion_heap *entry;
1645
1646 heap->dev = dev;
1647 mutex_lock(&dev->lock);
1648 while (*p) {
1649 parent = *p;
1650 entry = rb_entry(parent, struct ion_heap, node);
1651
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001652 if (heap->id < entry->id) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001653 p = &(*p)->rb_left;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001654 } else if (heap->id > entry->id ) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001655 p = &(*p)->rb_right;
1656 } else {
1657 pr_err("%s: can not insert multiple heaps with "
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001658 "id %d\n", __func__, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001659 goto end;
1660 }
1661 }
1662
1663 rb_link_node(&heap->node, parent, p);
1664 rb_insert_color(&heap->node, &dev->heaps);
1665 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1666 &debug_heap_fops);
1667end:
1668 mutex_unlock(&dev->lock);
1669}
1670
Olav Haugan0a852512012-01-09 10:20:55 -08001671int ion_secure_heap(struct ion_device *dev, int heap_id)
1672{
1673 struct rb_node *n;
1674 int ret_val = 0;
1675
1676 /*
1677 * traverse the list of heaps available in this system
1678 * and find the heap that is specified.
1679 */
1680 mutex_lock(&dev->lock);
1681 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1682 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1683 if (heap->type != ION_HEAP_TYPE_CP)
1684 continue;
1685 if (ION_HEAP(heap->id) != heap_id)
1686 continue;
1687 if (heap->ops->secure_heap)
1688 ret_val = heap->ops->secure_heap(heap);
1689 else
1690 ret_val = -EINVAL;
1691 break;
1692 }
1693 mutex_unlock(&dev->lock);
1694 return ret_val;
1695}
Olav Haugan0a852512012-01-09 10:20:55 -08001696
1697int ion_unsecure_heap(struct ion_device *dev, int heap_id)
1698{
1699 struct rb_node *n;
1700 int ret_val = 0;
1701
1702 /*
1703 * traverse the list of heaps available in this system
1704 * and find the heap that is specified.
1705 */
1706 mutex_lock(&dev->lock);
1707 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1708 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1709 if (heap->type != ION_HEAP_TYPE_CP)
1710 continue;
1711 if (ION_HEAP(heap->id) != heap_id)
1712 continue;
1713 if (heap->ops->secure_heap)
1714 ret_val = heap->ops->unsecure_heap(heap);
1715 else
1716 ret_val = -EINVAL;
1717 break;
1718 }
1719 mutex_unlock(&dev->lock);
1720 return ret_val;
1721}
Olav Haugan0a852512012-01-09 10:20:55 -08001722
Laura Abbott404f8242011-10-31 14:22:53 -07001723static int ion_debug_leak_show(struct seq_file *s, void *unused)
1724{
1725 struct ion_device *dev = s->private;
1726 struct rb_node *n;
1727 struct rb_node *n2;
1728
1729 /* mark all buffers as 1 */
1730 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1731 "ref cnt");
1732 mutex_lock(&dev->lock);
1733 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1734 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1735 node);
1736
1737 buf->marked = 1;
1738 }
1739
1740 /* now see which buffers we can access */
1741 for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1742 struct ion_client *client = rb_entry(n, struct ion_client,
1743 node);
1744
1745 mutex_lock(&client->lock);
1746 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1747 struct ion_handle *handle = rb_entry(n2,
1748 struct ion_handle, node);
1749
1750 handle->buffer->marked = 0;
1751
1752 }
1753 mutex_unlock(&client->lock);
1754
1755 }
1756
1757 for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
1758 struct ion_client *client = rb_entry(n, struct ion_client,
1759 node);
1760
1761 mutex_lock(&client->lock);
1762 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1763 struct ion_handle *handle = rb_entry(n2,
1764 struct ion_handle, node);
1765
1766 handle->buffer->marked = 0;
1767
1768 }
1769 mutex_unlock(&client->lock);
1770
1771 }
1772 /* And anyone still marked as a 1 means a leaked handle somewhere */
1773 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1774 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1775 node);
1776
1777 if (buf->marked == 1)
1778 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1779 (int)buf, buf->heap->name, buf->size,
1780 atomic_read(&buf->ref.refcount));
1781 }
1782 mutex_unlock(&dev->lock);
1783 return 0;
1784}
1785
1786static int ion_debug_leak_open(struct inode *inode, struct file *file)
1787{
1788 return single_open(file, ion_debug_leak_show, inode->i_private);
1789}
1790
1791static const struct file_operations debug_leak_fops = {
1792 .open = ion_debug_leak_open,
1793 .read = seq_read,
1794 .llseek = seq_lseek,
1795 .release = single_release,
1796};
1797
1798
1799
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001800struct ion_device *ion_device_create(long (*custom_ioctl)
1801 (struct ion_client *client,
1802 unsigned int cmd,
1803 unsigned long arg))
1804{
1805 struct ion_device *idev;
1806 int ret;
1807
1808 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1809 if (!idev)
1810 return ERR_PTR(-ENOMEM);
1811
1812 idev->dev.minor = MISC_DYNAMIC_MINOR;
1813 idev->dev.name = "ion";
1814 idev->dev.fops = &ion_fops;
1815 idev->dev.parent = NULL;
1816 ret = misc_register(&idev->dev);
1817 if (ret) {
1818 pr_err("ion: failed to register misc device.\n");
1819 return ERR_PTR(ret);
1820 }
1821
1822 idev->debug_root = debugfs_create_dir("ion", NULL);
1823 if (IS_ERR_OR_NULL(idev->debug_root))
1824 pr_err("ion: failed to create debug files.\n");
1825
1826 idev->custom_ioctl = custom_ioctl;
1827 idev->buffers = RB_ROOT;
1828 mutex_init(&idev->lock);
1829 idev->heaps = RB_ROOT;
1830 idev->user_clients = RB_ROOT;
1831 idev->kernel_clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001832 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1833 &debug_leak_fops);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001834 return idev;
1835}
1836
1837void ion_device_destroy(struct ion_device *dev)
1838{
1839 misc_deregister(&dev->dev);
1840 /* XXX need to free the heaps and clients ? */
1841 kfree(dev);
1842}