blob: a9dfc600b2df03afde0713ae6bfabdb334dc64f7 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/ion.h>
22#include <linux/list.h>
23#include <linux/miscdevice.h>
24#include <linux/mm.h>
25#include <linux/mm_types.h>
26#include <linux/rbtree.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/seq_file.h>
30#include <linux/uaccess.h>
31#include <linux/debugfs.h>
32
33#include "ion_priv.h"
34#define DEBUG
35
36/**
37 * struct ion_device - the metadata of the ion device node
38 * @dev: the actual misc device
39 * @buffers: an rb tree of all the existing buffers
40 * @lock: lock protecting the buffers & heaps trees
41 * @heaps: list of all the heaps in the system
42 * @user_clients: list of all the clients created from userspace
43 */
44struct ion_device {
45 struct miscdevice dev;
46 struct rb_root buffers;
47 struct mutex lock;
48 struct rb_root heaps;
49 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
50 unsigned long arg);
51 struct rb_root user_clients;
52 struct rb_root kernel_clients;
53 struct dentry *debug_root;
54};
55
56/**
57 * struct ion_client - a process/hw block local address space
58 * @ref: for reference counting the client
59 * @node: node in the tree of all clients
60 * @dev: backpointer to ion device
61 * @handles: an rb tree of all the handles in this client
62 * @lock: lock protecting the tree of handles
63 * @heap_mask: mask of all supported heaps
64 * @name: used for debugging
65 * @task: used for debugging
66 *
67 * A client represents a list of buffers this client may access.
68 * The mutex stored here is used to protect both handles tree
69 * as well as the handles themselves, and should be held while modifying either.
70 */
71struct ion_client {
72 struct kref ref;
73 struct rb_node node;
74 struct ion_device *dev;
75 struct rb_root handles;
76 struct mutex lock;
77 unsigned int heap_mask;
78 const char *name;
79 struct task_struct *task;
80 pid_t pid;
81 struct dentry *debug_root;
82};
83
84/**
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070085 * ion_handle - a client local reference to a buffer
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070086 * @ref: reference count
87 * @client: back pointer to the client the buffer resides in
88 * @buffer: pointer to the buffer
89 * @node: node in the client's handle rbtree
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070090 * @kmap_cnt: count of times this client has mapped to kernel
91 * @dmap_cnt: count of times this client has mapped for dma
92 * @usermap_cnt: count of times this client has mapped for userspace
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070093 *
94 * Modifications to node, map_cnt or mapping should be protected by the
95 * lock in the client. Other fields are never changed after initialization.
96 */
97struct ion_handle {
98 struct kref ref;
99 struct ion_client *client;
100 struct ion_buffer *buffer;
101 struct rb_node node;
102 unsigned int kmap_cnt;
103 unsigned int dmap_cnt;
104 unsigned int usermap_cnt;
105};
106
107/* this function should only be called while dev->lock is held */
108static void ion_buffer_add(struct ion_device *dev,
109 struct ion_buffer *buffer)
110{
111 struct rb_node **p = &dev->buffers.rb_node;
112 struct rb_node *parent = NULL;
113 struct ion_buffer *entry;
114
115 while (*p) {
116 parent = *p;
117 entry = rb_entry(parent, struct ion_buffer, node);
118
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700119 if (buffer < entry) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700120 p = &(*p)->rb_left;
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700121 } else if (buffer > entry) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700122 p = &(*p)->rb_right;
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700123 } else {
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700124 pr_err("%s: buffer already found.", __func__);
125 BUG();
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700126 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700127 }
128
129 rb_link_node(&buffer->node, parent, p);
130 rb_insert_color(&buffer->node, &dev->buffers);
131}
132
133/* this function should only be called while dev->lock is held */
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700134static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700135 struct ion_device *dev,
136 unsigned long len,
137 unsigned long align,
138 unsigned long flags)
139{
140 struct ion_buffer *buffer;
141 int ret;
142
143 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
144 if (!buffer)
145 return ERR_PTR(-ENOMEM);
146
147 buffer->heap = heap;
148 kref_init(&buffer->ref);
149
150 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700151 if (ret) {
152 kfree(buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700153 return ERR_PTR(ret);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700154 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700155 buffer->dev = dev;
156 buffer->size = len;
157 mutex_init(&buffer->lock);
158 ion_buffer_add(dev, buffer);
159 return buffer;
160}
161
162static void ion_buffer_destroy(struct kref *kref)
163{
164 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
165 struct ion_device *dev = buffer->dev;
166
167 buffer->heap->ops->free(buffer);
168 mutex_lock(&dev->lock);
169 rb_erase(&buffer->node, &dev->buffers);
170 mutex_unlock(&dev->lock);
171 kfree(buffer);
172}
173
174static void ion_buffer_get(struct ion_buffer *buffer)
175{
176 kref_get(&buffer->ref);
177}
178
179static int ion_buffer_put(struct ion_buffer *buffer)
180{
181 return kref_put(&buffer->ref, ion_buffer_destroy);
182}
183
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700184static struct ion_handle *ion_handle_create(struct ion_client *client,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700185 struct ion_buffer *buffer)
186{
187 struct ion_handle *handle;
188
189 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
190 if (!handle)
191 return ERR_PTR(-ENOMEM);
192 kref_init(&handle->ref);
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700193 rb_init_node(&handle->node);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700194 handle->client = client;
195 ion_buffer_get(buffer);
196 handle->buffer = buffer;
197
198 return handle;
199}
200
201static void ion_handle_destroy(struct kref *kref)
202{
203 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
204 /* XXX Can a handle be destroyed while it's map count is non-zero?:
205 if (handle->map_cnt) unmap
206 */
207 ion_buffer_put(handle->buffer);
208 mutex_lock(&handle->client->lock);
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700209 if (!RB_EMPTY_NODE(&handle->node))
210 rb_erase(&handle->node, &handle->client->handles);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700211 mutex_unlock(&handle->client->lock);
212 kfree(handle);
213}
214
215struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
216{
217 return handle->buffer;
218}
219
220static void ion_handle_get(struct ion_handle *handle)
221{
222 kref_get(&handle->ref);
223}
224
225static int ion_handle_put(struct ion_handle *handle)
226{
227 return kref_put(&handle->ref, ion_handle_destroy);
228}
229
230static struct ion_handle *ion_handle_lookup(struct ion_client *client,
231 struct ion_buffer *buffer)
232{
233 struct rb_node *n;
234
235 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
236 struct ion_handle *handle = rb_entry(n, struct ion_handle,
237 node);
238 if (handle->buffer == buffer)
239 return handle;
240 }
241 return NULL;
242}
243
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700244static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700245{
246 struct rb_node *n = client->handles.rb_node;
247
248 while (n) {
249 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
250 node);
251 if (handle < handle_node)
252 n = n->rb_left;
253 else if (handle > handle_node)
254 n = n->rb_right;
255 else
256 return true;
257 }
258 return false;
259}
260
261static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
262{
263 struct rb_node **p = &client->handles.rb_node;
264 struct rb_node *parent = NULL;
265 struct ion_handle *entry;
266
267 while (*p) {
268 parent = *p;
269 entry = rb_entry(parent, struct ion_handle, node);
270
271 if (handle < entry)
272 p = &(*p)->rb_left;
273 else if (handle > entry)
274 p = &(*p)->rb_right;
275 else
276 WARN(1, "%s: buffer already found.", __func__);
277 }
278
279 rb_link_node(&handle->node, parent, p);
280 rb_insert_color(&handle->node, &client->handles);
281}
282
283struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
284 size_t align, unsigned int flags)
285{
286 struct rb_node *n;
287 struct ion_handle *handle;
288 struct ion_device *dev = client->dev;
289 struct ion_buffer *buffer = NULL;
290
291 /*
292 * traverse the list of heaps available in this system in priority
293 * order. If the heap type is supported by the client, and matches the
294 * request of the caller allocate from it. Repeat until allocate has
295 * succeeded or all heaps have been tried
296 */
297 mutex_lock(&dev->lock);
298 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
299 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
300 /* if the client doesn't support this heap type */
301 if (!((1 << heap->type) & client->heap_mask))
302 continue;
303 /* if the caller didn't specify this heap type */
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700304 if (!((1 << heap->id) & flags))
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700305 continue;
306 buffer = ion_buffer_create(heap, dev, len, align, flags);
307 if (!IS_ERR_OR_NULL(buffer))
308 break;
309 }
310 mutex_unlock(&dev->lock);
311
312 if (IS_ERR_OR_NULL(buffer))
313 return ERR_PTR(PTR_ERR(buffer));
314
315 handle = ion_handle_create(client, buffer);
316
317 if (IS_ERR_OR_NULL(handle))
318 goto end;
319
320 /*
321 * ion_buffer_create will create a buffer with a ref_cnt of 1,
322 * and ion_handle_create will take a second reference, drop one here
323 */
324 ion_buffer_put(buffer);
325
326 mutex_lock(&client->lock);
327 ion_handle_add(client, handle);
328 mutex_unlock(&client->lock);
329 return handle;
330
331end:
332 ion_buffer_put(buffer);
333 return handle;
334}
335
336void ion_free(struct ion_client *client, struct ion_handle *handle)
337{
Rebecca Schultz Zavinc72866d2011-07-07 17:07:56 -0700338 bool valid_handle;
339
340 BUG_ON(client != handle->client);
341
342 mutex_lock(&client->lock);
343 valid_handle = ion_handle_validate(client, handle);
344 mutex_unlock(&client->lock);
345
346 if (!valid_handle) {
347 WARN("%s: invalid handle passed to free.\n", __func__);
348 return;
349 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700350 ion_handle_put(handle);
351}
352
353static void ion_client_get(struct ion_client *client);
354static int ion_client_put(struct ion_client *client);
355
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700356static bool _ion_map(int *buffer_cnt, int *handle_cnt)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700357{
358 bool map;
359
360 BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
361
362 if (*buffer_cnt)
363 map = false;
364 else
365 map = true;
366 if (*handle_cnt == 0)
367 (*buffer_cnt)++;
368 (*handle_cnt)++;
369 return map;
370}
371
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700372static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700373{
374 BUG_ON(*handle_cnt == 0);
375 (*handle_cnt)--;
376 if (*handle_cnt != 0)
377 return false;
378 BUG_ON(*buffer_cnt == 0);
379 (*buffer_cnt)--;
380 if (*buffer_cnt == 0)
381 return true;
382 return false;
383}
384
385int ion_phys(struct ion_client *client, struct ion_handle *handle,
386 ion_phys_addr_t *addr, size_t *len)
387{
388 struct ion_buffer *buffer;
389 int ret;
390
391 mutex_lock(&client->lock);
392 if (!ion_handle_validate(client, handle)) {
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700393 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700394 return -EINVAL;
395 }
396
397 buffer = handle->buffer;
398
399 if (!buffer->heap->ops->phys) {
400 pr_err("%s: ion_phys is not implemented by this heap.\n",
401 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700402 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700403 return -ENODEV;
404 }
405 mutex_unlock(&client->lock);
406 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
407 return ret;
408}
409
Laura Abbott894fd582011-08-19 13:33:56 -0700410void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
411 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700412{
413 struct ion_buffer *buffer;
414 void *vaddr;
415
416 mutex_lock(&client->lock);
417 if (!ion_handle_validate(client, handle)) {
418 pr_err("%s: invalid handle passed to map_kernel.\n",
419 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700420 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700421 return ERR_PTR(-EINVAL);
422 }
423
424 buffer = handle->buffer;
425 mutex_lock(&buffer->lock);
426
427 if (!handle->buffer->heap->ops->map_kernel) {
428 pr_err("%s: map_kernel is not implemented by this heap.\n",
429 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700430 mutex_unlock(&buffer->lock);
431 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700432 return ERR_PTR(-ENODEV);
433 }
434
Laura Abbott894fd582011-08-19 13:33:56 -0700435 if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
436 if (buffer->flags != flags) {
437 pr_err("%s: buffer was already mapped with flags %lx,"
438 " cannot map with flags %lx\n", __func__,
439 buffer->flags, flags);
440 vaddr = ERR_PTR(-EEXIST);
441 goto out;
442 }
443
444 } else {
445 buffer->flags = flags;
446 }
447
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700448 if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700449 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer,
450 flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700451 if (IS_ERR_OR_NULL(vaddr))
452 _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
453 buffer->vaddr = vaddr;
454 } else {
455 vaddr = buffer->vaddr;
456 }
Laura Abbott894fd582011-08-19 13:33:56 -0700457
458out:
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700459 mutex_unlock(&buffer->lock);
460 mutex_unlock(&client->lock);
461 return vaddr;
462}
463
464struct scatterlist *ion_map_dma(struct ion_client *client,
Laura Abbott894fd582011-08-19 13:33:56 -0700465 struct ion_handle *handle,
466 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700467{
468 struct ion_buffer *buffer;
469 struct scatterlist *sglist;
470
471 mutex_lock(&client->lock);
472 if (!ion_handle_validate(client, handle)) {
473 pr_err("%s: invalid handle passed to map_dma.\n",
474 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700475 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700476 return ERR_PTR(-EINVAL);
477 }
478 buffer = handle->buffer;
479 mutex_lock(&buffer->lock);
480
481 if (!handle->buffer->heap->ops->map_dma) {
482 pr_err("%s: map_kernel is not implemented by this heap.\n",
483 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700484 mutex_unlock(&buffer->lock);
485 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700486 return ERR_PTR(-ENODEV);
487 }
Laura Abbott894fd582011-08-19 13:33:56 -0700488
489 if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
490 if (buffer->flags != flags) {
491 pr_err("%s: buffer was already mapped with flags %lx,"
492 " cannot map with flags %lx\n", __func__,
493 buffer->flags, flags);
494 sglist = ERR_PTR(-EEXIST);
495 goto out;
496 }
497
498 } else {
499 buffer->flags = flags;
500 }
501
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700502 if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
503 sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
504 if (IS_ERR_OR_NULL(sglist))
505 _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
506 buffer->sglist = sglist;
507 } else {
508 sglist = buffer->sglist;
509 }
Laura Abbott894fd582011-08-19 13:33:56 -0700510
511out:
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700512 mutex_unlock(&buffer->lock);
513 mutex_unlock(&client->lock);
514 return sglist;
515}
516
517void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
518{
519 struct ion_buffer *buffer;
520
521 mutex_lock(&client->lock);
522 buffer = handle->buffer;
523 mutex_lock(&buffer->lock);
524 if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
525 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
526 buffer->vaddr = NULL;
527 }
528 mutex_unlock(&buffer->lock);
529 mutex_unlock(&client->lock);
530}
531
532void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
533{
534 struct ion_buffer *buffer;
535
536 mutex_lock(&client->lock);
537 buffer = handle->buffer;
538 mutex_lock(&buffer->lock);
539 if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
540 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
541 buffer->sglist = NULL;
542 }
543 mutex_unlock(&buffer->lock);
544 mutex_unlock(&client->lock);
545}
546
547
548struct ion_buffer *ion_share(struct ion_client *client,
549 struct ion_handle *handle)
550{
Rebecca Schultz Zavinc72866d2011-07-07 17:07:56 -0700551 bool valid_handle;
552
553 mutex_lock(&client->lock);
554 valid_handle = ion_handle_validate(client, handle);
555 mutex_unlock(&client->lock);
556 if (!valid_handle) {
557 WARN("%s: invalid handle passed to share.\n", __func__);
558 return ERR_PTR(-EINVAL);
559 }
560
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700561 /* do not take an extra reference here, the burden is on the caller
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700562 * to make sure the buffer doesn't go away while it's passing it
563 * to another client -- ion_free should not be called on this handle
564 * until the buffer has been imported into the other client
565 */
566 return handle->buffer;
567}
568
569struct ion_handle *ion_import(struct ion_client *client,
570 struct ion_buffer *buffer)
571{
572 struct ion_handle *handle = NULL;
573
574 mutex_lock(&client->lock);
575 /* if a handle exists for this buffer just take a reference to it */
576 handle = ion_handle_lookup(client, buffer);
577 if (!IS_ERR_OR_NULL(handle)) {
578 ion_handle_get(handle);
579 goto end;
580 }
581 handle = ion_handle_create(client, buffer);
582 if (IS_ERR_OR_NULL(handle))
583 goto end;
584 ion_handle_add(client, handle);
585end:
586 mutex_unlock(&client->lock);
587 return handle;
588}
589
Laura Abbottabcb6f72011-10-04 16:26:49 -0700590static int check_vaddr_bounds(unsigned long start, unsigned long end)
591{
592 struct mm_struct *mm = current->active_mm;
593 struct vm_area_struct *vma;
594 int ret = 1;
595
596 if (end < start)
597 goto out;
598
599 down_read(&mm->mmap_sem);
600 vma = find_vma(mm, start);
601 if (vma && vma->vm_start < end) {
602 if (start < vma->vm_start)
603 goto out_up;
604 if (end > vma->vm_end)
605 goto out_up;
606 ret = 0;
607 }
608
609out_up:
610 up_read(&mm->mmap_sem);
611out:
612 return ret;
613}
614
615int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
616 void *uaddr, unsigned long offset, unsigned long len,
617 unsigned int cmd)
618{
619 struct ion_buffer *buffer;
620 unsigned long start, end;
621 int ret = -EINVAL;
622
623 mutex_lock(&client->lock);
624 if (!ion_handle_validate(client, handle)) {
625 pr_err("%s: invalid handle passed to do_cache_op.\n",
626 __func__);
627 mutex_unlock(&client->lock);
628 return -EINVAL;
629 }
630 buffer = handle->buffer;
631 mutex_lock(&buffer->lock);
632
633 if (ION_IS_CACHED(buffer->flags)) {
634 ret = 0;
635 goto out;
636 }
637
638 if (!handle->buffer->heap->ops->cache_op) {
639 pr_err("%s: cache_op is not implemented by this heap.\n",
640 __func__);
641 ret = -ENODEV;
642 goto out;
643 }
644
645 start = (unsigned long) uaddr;
646 end = (unsigned long) uaddr + len;
647
648 if (check_vaddr_bounds(start, end)) {
649 pr_err("%s: virtual address %p is out of bounds\n",
650 __func__, uaddr);
651 goto out;
652 }
653
654 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
655 offset, len, cmd);
656
657out:
658 mutex_unlock(&buffer->lock);
659 mutex_unlock(&client->lock);
660 return ret;
661
662}
663
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700664static const struct file_operations ion_share_fops;
665
666struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
667{
668 struct file *file = fget(fd);
669 struct ion_handle *handle;
670
671 if (!file) {
672 pr_err("%s: imported fd not found in file table.\n", __func__);
673 return ERR_PTR(-EINVAL);
674 }
675 if (file->f_op != &ion_share_fops) {
676 pr_err("%s: imported file is not a shared ion file.\n",
677 __func__);
678 handle = ERR_PTR(-EINVAL);
679 goto end;
680 }
681 handle = ion_import(client, file->private_data);
682end:
683 fput(file);
684 return handle;
685}
686
687static int ion_debug_client_show(struct seq_file *s, void *unused)
688{
689 struct ion_client *client = s->private;
690 struct rb_node *n;
691 size_t sizes[ION_NUM_HEAPS] = {0};
692 const char *names[ION_NUM_HEAPS] = {0};
693 int i;
694
695 mutex_lock(&client->lock);
696 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
697 struct ion_handle *handle = rb_entry(n, struct ion_handle,
698 node);
699 enum ion_heap_type type = handle->buffer->heap->type;
700
701 if (!names[type])
702 names[type] = handle->buffer->heap->name;
703 sizes[type] += handle->buffer->size;
704 }
705 mutex_unlock(&client->lock);
706
707 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
708 for (i = 0; i < ION_NUM_HEAPS; i++) {
709 if (!names[i])
710 continue;
711 seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i],
712 atomic_read(&client->ref.refcount));
713 }
714 return 0;
715}
716
717static int ion_debug_client_open(struct inode *inode, struct file *file)
718{
719 return single_open(file, ion_debug_client_show, inode->i_private);
720}
721
722static const struct file_operations debug_client_fops = {
723 .open = ion_debug_client_open,
724 .read = seq_read,
725 .llseek = seq_lseek,
726 .release = single_release,
727};
728
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700729static struct ion_client *ion_client_lookup(struct ion_device *dev,
730 struct task_struct *task)
731{
732 struct rb_node *n = dev->user_clients.rb_node;
733 struct ion_client *client;
734
735 mutex_lock(&dev->lock);
736 while (n) {
737 client = rb_entry(n, struct ion_client, node);
738 if (task == client->task) {
739 ion_client_get(client);
740 mutex_unlock(&dev->lock);
741 return client;
742 } else if (task < client->task) {
743 n = n->rb_left;
744 } else if (task > client->task) {
745 n = n->rb_right;
746 }
747 }
748 mutex_unlock(&dev->lock);
749 return NULL;
750}
751
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700752struct ion_client *ion_client_create(struct ion_device *dev,
753 unsigned int heap_mask,
754 const char *name)
755{
756 struct ion_client *client;
757 struct task_struct *task;
758 struct rb_node **p;
759 struct rb_node *parent = NULL;
760 struct ion_client *entry;
761 char debug_name[64];
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700762 pid_t pid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700763
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700764 get_task_struct(current->group_leader);
765 task_lock(current->group_leader);
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700766 pid = task_pid_nr(current->group_leader);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700767 /* don't bother to store task struct for kernel threads,
768 they can't be killed anyway */
769 if (current->group_leader->flags & PF_KTHREAD) {
770 put_task_struct(current->group_leader);
771 task = NULL;
772 } else {
773 task = current->group_leader;
774 }
775 task_unlock(current->group_leader);
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700776
777 /* if this isn't a kernel thread, see if a client already
778 exists */
779 if (task) {
780 client = ion_client_lookup(dev, task);
781 if (!IS_ERR_OR_NULL(client)) {
782 put_task_struct(current->group_leader);
783 return client;
784 }
785 }
786
787 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
788 if (!client) {
789 put_task_struct(current->group_leader);
790 return ERR_PTR(-ENOMEM);
791 }
792
793 client->dev = dev;
794 client->handles = RB_ROOT;
795 mutex_init(&client->lock);
796 client->name = name;
797 client->heap_mask = heap_mask;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700798 client->task = task;
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700799 client->pid = pid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700800 kref_init(&client->ref);
801
802 mutex_lock(&dev->lock);
803 if (task) {
804 p = &dev->user_clients.rb_node;
805 while (*p) {
806 parent = *p;
807 entry = rb_entry(parent, struct ion_client, node);
808
809 if (task < entry->task)
810 p = &(*p)->rb_left;
811 else if (task > entry->task)
812 p = &(*p)->rb_right;
813 }
814 rb_link_node(&client->node, parent, p);
815 rb_insert_color(&client->node, &dev->user_clients);
816 } else {
817 p = &dev->kernel_clients.rb_node;
818 while (*p) {
819 parent = *p;
820 entry = rb_entry(parent, struct ion_client, node);
821
822 if (client < entry)
823 p = &(*p)->rb_left;
824 else if (client > entry)
825 p = &(*p)->rb_right;
826 }
827 rb_link_node(&client->node, parent, p);
828 rb_insert_color(&client->node, &dev->kernel_clients);
829 }
830
831 snprintf(debug_name, 64, "%u", client->pid);
832 client->debug_root = debugfs_create_file(debug_name, 0664,
833 dev->debug_root, client,
834 &debug_client_fops);
835 mutex_unlock(&dev->lock);
836
837 return client;
838}
839
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -0700840static void _ion_client_destroy(struct kref *kref)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700841{
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -0700842 struct ion_client *client = container_of(kref, struct ion_client, ref);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700843 struct ion_device *dev = client->dev;
844 struct rb_node *n;
845
846 pr_debug("%s: %d\n", __func__, __LINE__);
847 while ((n = rb_first(&client->handles))) {
848 struct ion_handle *handle = rb_entry(n, struct ion_handle,
849 node);
850 ion_handle_destroy(&handle->ref);
851 }
852 mutex_lock(&dev->lock);
853 if (client->task) {
854 rb_erase(&client->node, &dev->user_clients);
855 put_task_struct(client->task);
856 } else {
857 rb_erase(&client->node, &dev->kernel_clients);
858 }
859 debugfs_remove_recursive(client->debug_root);
860 mutex_unlock(&dev->lock);
861
862 kfree(client);
863}
864
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700865static void ion_client_get(struct ion_client *client)
866{
867 kref_get(&client->ref);
868}
869
870static int ion_client_put(struct ion_client *client)
871{
872 return kref_put(&client->ref, _ion_client_destroy);
873}
874
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -0700875void ion_client_destroy(struct ion_client *client)
876{
877 ion_client_put(client);
878}
879
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700880static int ion_share_release(struct inode *inode, struct file* file)
881{
882 struct ion_buffer *buffer = file->private_data;
883
884 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbott894fd582011-08-19 13:33:56 -0700885 mutex_lock(&buffer->lock);
886 buffer->umap_cnt--;
887 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700888 /* drop the reference to the buffer -- this prevents the
889 buffer from going away because the client holding it exited
890 while it was being passed */
891 ion_buffer_put(buffer);
892 return 0;
893}
894
895static void ion_vma_open(struct vm_area_struct *vma)
896{
897
898 struct ion_buffer *buffer = vma->vm_file->private_data;
899 struct ion_handle *handle = vma->vm_private_data;
900 struct ion_client *client;
901
902 pr_debug("%s: %d\n", __func__, __LINE__);
903 /* check that the client still exists and take a reference so
904 it can't go away until this vma is closed */
905 client = ion_client_lookup(buffer->dev, current->group_leader);
906 if (IS_ERR_OR_NULL(client)) {
907 vma->vm_private_data = NULL;
908 return;
909 }
910 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
911 __func__, __LINE__,
912 atomic_read(&client->ref.refcount),
913 atomic_read(&handle->ref.refcount),
914 atomic_read(&buffer->ref.refcount));
915}
916
917static void ion_vma_close(struct vm_area_struct *vma)
918{
919 struct ion_handle *handle = vma->vm_private_data;
920 struct ion_buffer *buffer = vma->vm_file->private_data;
921 struct ion_client *client;
922
923 pr_debug("%s: %d\n", __func__, __LINE__);
924 /* this indicates the client is gone, nothing to do here */
925 if (!handle)
926 return;
927 client = handle->client;
928 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
929 __func__, __LINE__,
930 atomic_read(&client->ref.refcount),
931 atomic_read(&handle->ref.refcount),
932 atomic_read(&buffer->ref.refcount));
933 ion_handle_put(handle);
934 ion_client_put(client);
935 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
936 __func__, __LINE__,
937 atomic_read(&client->ref.refcount),
938 atomic_read(&handle->ref.refcount),
939 atomic_read(&buffer->ref.refcount));
940}
941
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700942static struct vm_operations_struct ion_vm_ops = {
943 .open = ion_vma_open,
944 .close = ion_vma_close,
945};
946
947static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
948{
949 struct ion_buffer *buffer = file->private_data;
950 unsigned long size = vma->vm_end - vma->vm_start;
951 struct ion_client *client;
952 struct ion_handle *handle;
953 int ret;
Laura Abbott894fd582011-08-19 13:33:56 -0700954 unsigned long flags = file->f_flags & O_DSYNC ?
955 ION_SET_CACHE(UNCACHED) :
956 ION_SET_CACHE(CACHED);
957
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700958
959 pr_debug("%s: %d\n", __func__, __LINE__);
960 /* make sure the client still exists, it's possible for the client to
961 have gone away but the map/share fd still to be around, take
962 a reference to it so it can't go away while this mapping exists */
963 client = ion_client_lookup(buffer->dev, current->group_leader);
964 if (IS_ERR_OR_NULL(client)) {
965 pr_err("%s: trying to mmap an ion handle in a process with no "
966 "ion client\n", __func__);
967 return -EINVAL;
968 }
969
970 if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
971 buffer->size)) {
972 pr_err("%s: trying to map larger area than handle has available"
973 "\n", __func__);
974 ret = -EINVAL;
975 goto err;
976 }
977
978 /* find the handle and take a reference to it */
979 handle = ion_import(client, buffer);
980 if (IS_ERR_OR_NULL(handle)) {
981 ret = -EINVAL;
982 goto err;
983 }
984
985 if (!handle->buffer->heap->ops->map_user) {
986 pr_err("%s: this heap does not define a method for mapping "
987 "to userspace\n", __func__);
988 ret = -EINVAL;
989 goto err1;
990 }
991
992 mutex_lock(&buffer->lock);
Laura Abbott894fd582011-08-19 13:33:56 -0700993 if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
994 if (buffer->flags != flags) {
995 pr_err("%s: buffer was already mapped with flags %lx,"
996 " cannot map with flags %lx\n", __func__,
997 buffer->flags, flags);
998 ret = -EEXIST;
999 mutex_unlock(&buffer->lock);
1000 goto err1;
1001 }
1002
1003 } else {
1004 buffer->flags = flags;
1005 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001006 /* now map it to userspace */
Laura Abbott894fd582011-08-19 13:33:56 -07001007 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma,
1008 flags);
1009 buffer->umap_cnt++;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001010 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001011 if (ret) {
1012 pr_err("%s: failure mapping buffer to userspace\n",
1013 __func__);
Laura Abbott894fd582011-08-19 13:33:56 -07001014 goto err2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001015 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001016
1017 vma->vm_ops = &ion_vm_ops;
1018 /* move the handle into the vm_private_data so we can access it from
1019 vma_open/close */
1020 vma->vm_private_data = handle;
1021 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
1022 __func__, __LINE__,
1023 atomic_read(&client->ref.refcount),
1024 atomic_read(&handle->ref.refcount),
1025 atomic_read(&buffer->ref.refcount));
1026 return 0;
1027
Laura Abbott894fd582011-08-19 13:33:56 -07001028err2:
1029 buffer->umap_cnt--;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001030 /* drop the reference to the handle */
Laura Abbott894fd582011-08-19 13:33:56 -07001031err1:
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001032 ion_handle_put(handle);
1033err:
Iliyan Malchev3fe24362011-08-09 14:42:08 -07001034 /* drop the reference to the client */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001035 ion_client_put(client);
1036 return ret;
1037}
1038
1039static const struct file_operations ion_share_fops = {
1040 .owner = THIS_MODULE,
1041 .release = ion_share_release,
1042 .mmap = ion_share_mmap,
1043};
1044
1045static int ion_ioctl_share(struct file *parent, struct ion_client *client,
1046 struct ion_handle *handle)
1047{
1048 int fd = get_unused_fd();
1049 struct file *file;
1050
1051 if (fd < 0)
1052 return -ENFILE;
1053
1054 file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
1055 handle->buffer, O_RDWR);
1056 if (IS_ERR_OR_NULL(file))
1057 goto err;
Laura Abbott4b5d0482011-09-27 18:35:14 -07001058
1059 if (parent->f_flags & O_DSYNC)
1060 file->f_flags |= O_DSYNC;
1061
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001062 ion_buffer_get(handle->buffer);
1063 fd_install(fd, file);
1064
1065 return fd;
1066
1067err:
1068 put_unused_fd(fd);
1069 return -ENFILE;
1070}
1071
1072static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1073{
1074 struct ion_client *client = filp->private_data;
1075
1076 switch (cmd) {
1077 case ION_IOC_ALLOC:
1078 {
1079 struct ion_allocation_data data;
1080
1081 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1082 return -EFAULT;
1083 data.handle = ion_alloc(client, data.len, data.align,
1084 data.flags);
1085 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1086 return -EFAULT;
1087 break;
1088 }
1089 case ION_IOC_FREE:
1090 {
1091 struct ion_handle_data data;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001092 bool valid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001093
1094 if (copy_from_user(&data, (void __user *)arg,
1095 sizeof(struct ion_handle_data)))
1096 return -EFAULT;
1097 mutex_lock(&client->lock);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001098 valid = ion_handle_validate(client, data.handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001099 mutex_unlock(&client->lock);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001100 if (!valid)
1101 return -EINVAL;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001102 ion_free(client, data.handle);
1103 break;
1104 }
1105 case ION_IOC_MAP:
1106 case ION_IOC_SHARE:
1107 {
1108 struct ion_fd_data data;
1109
1110 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1111 return -EFAULT;
1112 mutex_lock(&client->lock);
1113 if (!ion_handle_validate(client, data.handle)) {
1114 pr_err("%s: invalid handle passed to share ioctl.\n",
1115 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001116 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001117 return -EINVAL;
1118 }
1119 data.fd = ion_ioctl_share(filp, client, data.handle);
1120 mutex_unlock(&client->lock);
1121 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1122 return -EFAULT;
1123 break;
1124 }
1125 case ION_IOC_IMPORT:
1126 {
1127 struct ion_fd_data data;
1128 if (copy_from_user(&data, (void __user *)arg,
1129 sizeof(struct ion_fd_data)))
1130 return -EFAULT;
1131
1132 data.handle = ion_import_fd(client, data.fd);
1133 if (IS_ERR(data.handle))
1134 data.handle = NULL;
1135 if (copy_to_user((void __user *)arg, &data,
1136 sizeof(struct ion_fd_data)))
1137 return -EFAULT;
1138 break;
1139 }
1140 case ION_IOC_CUSTOM:
1141 {
1142 struct ion_device *dev = client->dev;
1143 struct ion_custom_data data;
1144
1145 if (!dev->custom_ioctl)
1146 return -ENOTTY;
1147 if (copy_from_user(&data, (void __user *)arg,
1148 sizeof(struct ion_custom_data)))
1149 return -EFAULT;
1150 return dev->custom_ioctl(client, data.cmd, data.arg);
1151 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001152 case ION_IOC_CLEAN_CACHES:
1153 case ION_IOC_INV_CACHES:
1154 case ION_IOC_CLEAN_INV_CACHES:
1155 {
1156 struct ion_flush_data data;
1157
1158 if (copy_from_user(&data, (void __user *)arg,
1159 sizeof(struct ion_flush_data)))
1160 return -EFAULT;
1161
1162 return ion_do_cache_op(client, data.handle, data.vaddr,
1163 data.offset, data.length, cmd);
1164
1165 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001166 default:
1167 return -ENOTTY;
1168 }
1169 return 0;
1170}
1171
1172static int ion_release(struct inode *inode, struct file *file)
1173{
1174 struct ion_client *client = file->private_data;
1175
1176 pr_debug("%s: %d\n", __func__, __LINE__);
1177 ion_client_put(client);
1178 return 0;
1179}
1180
1181static int ion_open(struct inode *inode, struct file *file)
1182{
1183 struct miscdevice *miscdev = file->private_data;
1184 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1185 struct ion_client *client;
1186
1187 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavin6d3b9582011-07-06 18:07:24 -07001188 client = ion_client_create(dev, -1, "user");
1189 if (IS_ERR_OR_NULL(client))
1190 return PTR_ERR(client);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001191 file->private_data = client;
1192
1193 return 0;
1194}
1195
1196static const struct file_operations ion_fops = {
1197 .owner = THIS_MODULE,
1198 .open = ion_open,
1199 .release = ion_release,
1200 .unlocked_ioctl = ion_ioctl,
1201};
1202
1203static size_t ion_debug_heap_total(struct ion_client *client,
1204 enum ion_heap_type type)
1205{
1206 size_t size = 0;
1207 struct rb_node *n;
1208
1209 mutex_lock(&client->lock);
1210 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1211 struct ion_handle *handle = rb_entry(n,
1212 struct ion_handle,
1213 node);
1214 if (handle->buffer->heap->type == type)
1215 size += handle->buffer->size;
1216 }
1217 mutex_unlock(&client->lock);
1218 return size;
1219}
1220
1221static int ion_debug_heap_show(struct seq_file *s, void *unused)
1222{
1223 struct ion_heap *heap = s->private;
1224 struct ion_device *dev = heap->dev;
1225 struct rb_node *n;
1226
1227 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1228 for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
1229 struct ion_client *client = rb_entry(n, struct ion_client,
1230 node);
1231 char task_comm[TASK_COMM_LEN];
1232 size_t size = ion_debug_heap_total(client, heap->type);
1233 if (!size)
1234 continue;
1235
1236 get_task_comm(task_comm, client->task);
1237 seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid,
1238 size);
1239 }
1240
1241 for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1242 struct ion_client *client = rb_entry(n, struct ion_client,
1243 node);
1244 size_t size = ion_debug_heap_total(client, heap->type);
1245 if (!size)
1246 continue;
1247 seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid,
1248 size);
1249 }
1250 return 0;
1251}
1252
1253static int ion_debug_heap_open(struct inode *inode, struct file *file)
1254{
1255 return single_open(file, ion_debug_heap_show, inode->i_private);
1256}
1257
1258static const struct file_operations debug_heap_fops = {
1259 .open = ion_debug_heap_open,
1260 .read = seq_read,
1261 .llseek = seq_lseek,
1262 .release = single_release,
1263};
1264
1265void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1266{
1267 struct rb_node **p = &dev->heaps.rb_node;
1268 struct rb_node *parent = NULL;
1269 struct ion_heap *entry;
1270
1271 heap->dev = dev;
1272 mutex_lock(&dev->lock);
1273 while (*p) {
1274 parent = *p;
1275 entry = rb_entry(parent, struct ion_heap, node);
1276
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001277 if (heap->id < entry->id) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001278 p = &(*p)->rb_left;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001279 } else if (heap->id > entry->id ) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001280 p = &(*p)->rb_right;
1281 } else {
1282 pr_err("%s: can not insert multiple heaps with "
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001283 "id %d\n", __func__, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001284 goto end;
1285 }
1286 }
1287
1288 rb_link_node(&heap->node, parent, p);
1289 rb_insert_color(&heap->node, &dev->heaps);
1290 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1291 &debug_heap_fops);
1292end:
1293 mutex_unlock(&dev->lock);
1294}
1295
1296struct ion_device *ion_device_create(long (*custom_ioctl)
1297 (struct ion_client *client,
1298 unsigned int cmd,
1299 unsigned long arg))
1300{
1301 struct ion_device *idev;
1302 int ret;
1303
1304 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1305 if (!idev)
1306 return ERR_PTR(-ENOMEM);
1307
1308 idev->dev.minor = MISC_DYNAMIC_MINOR;
1309 idev->dev.name = "ion";
1310 idev->dev.fops = &ion_fops;
1311 idev->dev.parent = NULL;
1312 ret = misc_register(&idev->dev);
1313 if (ret) {
1314 pr_err("ion: failed to register misc device.\n");
1315 return ERR_PTR(ret);
1316 }
1317
1318 idev->debug_root = debugfs_create_dir("ion", NULL);
1319 if (IS_ERR_OR_NULL(idev->debug_root))
1320 pr_err("ion: failed to create debug files.\n");
1321
1322 idev->custom_ioctl = custom_ioctl;
1323 idev->buffers = RB_ROOT;
1324 mutex_init(&idev->lock);
1325 idev->heaps = RB_ROOT;
1326 idev->user_clients = RB_ROOT;
1327 idev->kernel_clients = RB_ROOT;
1328 return idev;
1329}
1330
1331void ion_device_destroy(struct ion_device *dev)
1332{
1333 misc_deregister(&dev->dev);
1334 /* XXX need to free the heaps and clients ? */
1335 kfree(dev);
1336}