blob: 108469b8478940b088556720438a52e597110e60 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/ion.h>
22#include <linux/list.h>
23#include <linux/miscdevice.h>
24#include <linux/mm.h>
25#include <linux/mm_types.h>
26#include <linux/rbtree.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/seq_file.h>
30#include <linux/uaccess.h>
31#include <linux/debugfs.h>
32
33#include "ion_priv.h"
34#define DEBUG
35
36/**
37 * struct ion_device - the metadata of the ion device node
38 * @dev: the actual misc device
39 * @buffers: an rb tree of all the existing buffers
40 * @lock: lock protecting the buffers & heaps trees
41 * @heaps: list of all the heaps in the system
42 * @user_clients: list of all the clients created from userspace
43 */
44struct ion_device {
45 struct miscdevice dev;
46 struct rb_root buffers;
47 struct mutex lock;
48 struct rb_root heaps;
49 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
50 unsigned long arg);
51 struct rb_root user_clients;
52 struct rb_root kernel_clients;
53 struct dentry *debug_root;
54};
55
56/**
57 * struct ion_client - a process/hw block local address space
58 * @ref: for reference counting the client
59 * @node: node in the tree of all clients
60 * @dev: backpointer to ion device
61 * @handles: an rb tree of all the handles in this client
62 * @lock: lock protecting the tree of handles
63 * @heap_mask: mask of all supported heaps
64 * @name: used for debugging
65 * @task: used for debugging
66 *
67 * A client represents a list of buffers this client may access.
68 * The mutex stored here is used to protect both handles tree
69 * as well as the handles themselves, and should be held while modifying either.
70 */
71struct ion_client {
72 struct kref ref;
73 struct rb_node node;
74 struct ion_device *dev;
75 struct rb_root handles;
76 struct mutex lock;
77 unsigned int heap_mask;
78 const char *name;
79 struct task_struct *task;
80 pid_t pid;
81 struct dentry *debug_root;
82};
83
84/**
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070085 * ion_handle - a client local reference to a buffer
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070086 * @ref: reference count
87 * @client: back pointer to the client the buffer resides in
88 * @buffer: pointer to the buffer
89 * @node: node in the client's handle rbtree
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070090 * @kmap_cnt: count of times this client has mapped to kernel
91 * @dmap_cnt: count of times this client has mapped for dma
92 * @usermap_cnt: count of times this client has mapped for userspace
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070093 *
94 * Modifications to node, map_cnt or mapping should be protected by the
95 * lock in the client. Other fields are never changed after initialization.
96 */
97struct ion_handle {
98 struct kref ref;
99 struct ion_client *client;
100 struct ion_buffer *buffer;
101 struct rb_node node;
102 unsigned int kmap_cnt;
103 unsigned int dmap_cnt;
104 unsigned int usermap_cnt;
105};
106
107/* this function should only be called while dev->lock is held */
108static void ion_buffer_add(struct ion_device *dev,
109 struct ion_buffer *buffer)
110{
111 struct rb_node **p = &dev->buffers.rb_node;
112 struct rb_node *parent = NULL;
113 struct ion_buffer *entry;
114
115 while (*p) {
116 parent = *p;
117 entry = rb_entry(parent, struct ion_buffer, node);
118
119 if (buffer < entry)
120 p = &(*p)->rb_left;
121 else if (buffer > entry)
122 p = &(*p)->rb_right;
123 else
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700124 pr_err("%s: buffer already found.", __func__);
125 BUG();
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700126 }
127
128 rb_link_node(&buffer->node, parent, p);
129 rb_insert_color(&buffer->node, &dev->buffers);
130}
131
132/* this function should only be called while dev->lock is held */
133struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
134 struct ion_device *dev,
135 unsigned long len,
136 unsigned long align,
137 unsigned long flags)
138{
139 struct ion_buffer *buffer;
140 int ret;
141
142 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
143 if (!buffer)
144 return ERR_PTR(-ENOMEM);
145
146 buffer->heap = heap;
147 kref_init(&buffer->ref);
148
149 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700150 if (ret) {
151 kfree(buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700152 return ERR_PTR(ret);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700153 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700154 buffer->dev = dev;
155 buffer->size = len;
156 mutex_init(&buffer->lock);
157 ion_buffer_add(dev, buffer);
158 return buffer;
159}
160
161static void ion_buffer_destroy(struct kref *kref)
162{
163 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
164 struct ion_device *dev = buffer->dev;
165
166 buffer->heap->ops->free(buffer);
167 mutex_lock(&dev->lock);
168 rb_erase(&buffer->node, &dev->buffers);
169 mutex_unlock(&dev->lock);
170 kfree(buffer);
171}
172
173static void ion_buffer_get(struct ion_buffer *buffer)
174{
175 kref_get(&buffer->ref);
176}
177
178static int ion_buffer_put(struct ion_buffer *buffer)
179{
180 return kref_put(&buffer->ref, ion_buffer_destroy);
181}
182
183struct ion_handle *ion_handle_create(struct ion_client *client,
184 struct ion_buffer *buffer)
185{
186 struct ion_handle *handle;
187
188 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
189 if (!handle)
190 return ERR_PTR(-ENOMEM);
191 kref_init(&handle->ref);
192 handle->client = client;
193 ion_buffer_get(buffer);
194 handle->buffer = buffer;
195
196 return handle;
197}
198
199static void ion_handle_destroy(struct kref *kref)
200{
201 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
202 /* XXX Can a handle be destroyed while it's map count is non-zero?:
203 if (handle->map_cnt) unmap
204 */
205 ion_buffer_put(handle->buffer);
206 mutex_lock(&handle->client->lock);
207 rb_erase(&handle->node, &handle->client->handles);
208 mutex_unlock(&handle->client->lock);
209 kfree(handle);
210}
211
212struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
213{
214 return handle->buffer;
215}
216
217static void ion_handle_get(struct ion_handle *handle)
218{
219 kref_get(&handle->ref);
220}
221
222static int ion_handle_put(struct ion_handle *handle)
223{
224 return kref_put(&handle->ref, ion_handle_destroy);
225}
226
227static struct ion_handle *ion_handle_lookup(struct ion_client *client,
228 struct ion_buffer *buffer)
229{
230 struct rb_node *n;
231
232 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
233 struct ion_handle *handle = rb_entry(n, struct ion_handle,
234 node);
235 if (handle->buffer == buffer)
236 return handle;
237 }
238 return NULL;
239}
240
241bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
242{
243 struct rb_node *n = client->handles.rb_node;
244
245 while (n) {
246 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
247 node);
248 if (handle < handle_node)
249 n = n->rb_left;
250 else if (handle > handle_node)
251 n = n->rb_right;
252 else
253 return true;
254 }
255 return false;
256}
257
258static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
259{
260 struct rb_node **p = &client->handles.rb_node;
261 struct rb_node *parent = NULL;
262 struct ion_handle *entry;
263
264 while (*p) {
265 parent = *p;
266 entry = rb_entry(parent, struct ion_handle, node);
267
268 if (handle < entry)
269 p = &(*p)->rb_left;
270 else if (handle > entry)
271 p = &(*p)->rb_right;
272 else
273 WARN(1, "%s: buffer already found.", __func__);
274 }
275
276 rb_link_node(&handle->node, parent, p);
277 rb_insert_color(&handle->node, &client->handles);
278}
279
280struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
281 size_t align, unsigned int flags)
282{
283 struct rb_node *n;
284 struct ion_handle *handle;
285 struct ion_device *dev = client->dev;
286 struct ion_buffer *buffer = NULL;
287
288 /*
289 * traverse the list of heaps available in this system in priority
290 * order. If the heap type is supported by the client, and matches the
291 * request of the caller allocate from it. Repeat until allocate has
292 * succeeded or all heaps have been tried
293 */
294 mutex_lock(&dev->lock);
295 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
296 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
297 /* if the client doesn't support this heap type */
298 if (!((1 << heap->type) & client->heap_mask))
299 continue;
300 /* if the caller didn't specify this heap type */
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700301 if (!((1 << heap->id) & flags))
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700302 continue;
303 buffer = ion_buffer_create(heap, dev, len, align, flags);
304 if (!IS_ERR_OR_NULL(buffer))
305 break;
306 }
307 mutex_unlock(&dev->lock);
308
309 if (IS_ERR_OR_NULL(buffer))
310 return ERR_PTR(PTR_ERR(buffer));
311
312 handle = ion_handle_create(client, buffer);
313
314 if (IS_ERR_OR_NULL(handle))
315 goto end;
316
317 /*
318 * ion_buffer_create will create a buffer with a ref_cnt of 1,
319 * and ion_handle_create will take a second reference, drop one here
320 */
321 ion_buffer_put(buffer);
322
323 mutex_lock(&client->lock);
324 ion_handle_add(client, handle);
325 mutex_unlock(&client->lock);
326 return handle;
327
328end:
329 ion_buffer_put(buffer);
330 return handle;
331}
332
333void ion_free(struct ion_client *client, struct ion_handle *handle)
334{
335 ion_handle_put(handle);
336}
337
338static void ion_client_get(struct ion_client *client);
339static int ion_client_put(struct ion_client *client);
340
341bool _ion_map(int *buffer_cnt, int *handle_cnt)
342{
343 bool map;
344
345 BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
346
347 if (*buffer_cnt)
348 map = false;
349 else
350 map = true;
351 if (*handle_cnt == 0)
352 (*buffer_cnt)++;
353 (*handle_cnt)++;
354 return map;
355}
356
357bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
358{
359 BUG_ON(*handle_cnt == 0);
360 (*handle_cnt)--;
361 if (*handle_cnt != 0)
362 return false;
363 BUG_ON(*buffer_cnt == 0);
364 (*buffer_cnt)--;
365 if (*buffer_cnt == 0)
366 return true;
367 return false;
368}
369
370int ion_phys(struct ion_client *client, struct ion_handle *handle,
371 ion_phys_addr_t *addr, size_t *len)
372{
373 struct ion_buffer *buffer;
374 int ret;
375
376 mutex_lock(&client->lock);
377 if (!ion_handle_validate(client, handle)) {
378 pr_err("%s: invalid handle passed to map_kernel.\n",
379 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700380 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700381 return -EINVAL;
382 }
383
384 buffer = handle->buffer;
385
386 if (!buffer->heap->ops->phys) {
387 pr_err("%s: ion_phys is not implemented by this heap.\n",
388 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700389 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700390 return -ENODEV;
391 }
392 mutex_unlock(&client->lock);
393 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
394 return ret;
395}
396
397void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
398{
399 struct ion_buffer *buffer;
400 void *vaddr;
401
402 mutex_lock(&client->lock);
403 if (!ion_handle_validate(client, handle)) {
404 pr_err("%s: invalid handle passed to map_kernel.\n",
405 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700406 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700407 return ERR_PTR(-EINVAL);
408 }
409
410 buffer = handle->buffer;
411 mutex_lock(&buffer->lock);
412
413 if (!handle->buffer->heap->ops->map_kernel) {
414 pr_err("%s: map_kernel is not implemented by this heap.\n",
415 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700416 mutex_unlock(&buffer->lock);
417 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700418 return ERR_PTR(-ENODEV);
419 }
420
421 if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
422 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
423 if (IS_ERR_OR_NULL(vaddr))
424 _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
425 buffer->vaddr = vaddr;
426 } else {
427 vaddr = buffer->vaddr;
428 }
429 mutex_unlock(&buffer->lock);
430 mutex_unlock(&client->lock);
431 return vaddr;
432}
433
434struct scatterlist *ion_map_dma(struct ion_client *client,
435 struct ion_handle *handle)
436{
437 struct ion_buffer *buffer;
438 struct scatterlist *sglist;
439
440 mutex_lock(&client->lock);
441 if (!ion_handle_validate(client, handle)) {
442 pr_err("%s: invalid handle passed to map_dma.\n",
443 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700444 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700445 return ERR_PTR(-EINVAL);
446 }
447 buffer = handle->buffer;
448 mutex_lock(&buffer->lock);
449
450 if (!handle->buffer->heap->ops->map_dma) {
451 pr_err("%s: map_kernel is not implemented by this heap.\n",
452 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700453 mutex_unlock(&buffer->lock);
454 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700455 return ERR_PTR(-ENODEV);
456 }
457 if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
458 sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
459 if (IS_ERR_OR_NULL(sglist))
460 _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
461 buffer->sglist = sglist;
462 } else {
463 sglist = buffer->sglist;
464 }
465 mutex_unlock(&buffer->lock);
466 mutex_unlock(&client->lock);
467 return sglist;
468}
469
470void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
471{
472 struct ion_buffer *buffer;
473
474 mutex_lock(&client->lock);
475 buffer = handle->buffer;
476 mutex_lock(&buffer->lock);
477 if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
478 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
479 buffer->vaddr = NULL;
480 }
481 mutex_unlock(&buffer->lock);
482 mutex_unlock(&client->lock);
483}
484
485void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
486{
487 struct ion_buffer *buffer;
488
489 mutex_lock(&client->lock);
490 buffer = handle->buffer;
491 mutex_lock(&buffer->lock);
492 if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
493 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
494 buffer->sglist = NULL;
495 }
496 mutex_unlock(&buffer->lock);
497 mutex_unlock(&client->lock);
498}
499
500
501struct ion_buffer *ion_share(struct ion_client *client,
502 struct ion_handle *handle)
503{
504 /* don't not take an extra refernce here, the burden is on the caller
505 * to make sure the buffer doesn't go away while it's passing it
506 * to another client -- ion_free should not be called on this handle
507 * until the buffer has been imported into the other client
508 */
509 return handle->buffer;
510}
511
512struct ion_handle *ion_import(struct ion_client *client,
513 struct ion_buffer *buffer)
514{
515 struct ion_handle *handle = NULL;
516
517 mutex_lock(&client->lock);
518 /* if a handle exists for this buffer just take a reference to it */
519 handle = ion_handle_lookup(client, buffer);
520 if (!IS_ERR_OR_NULL(handle)) {
521 ion_handle_get(handle);
522 goto end;
523 }
524 handle = ion_handle_create(client, buffer);
525 if (IS_ERR_OR_NULL(handle))
526 goto end;
527 ion_handle_add(client, handle);
528end:
529 mutex_unlock(&client->lock);
530 return handle;
531}
532
533static const struct file_operations ion_share_fops;
534
535struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
536{
537 struct file *file = fget(fd);
538 struct ion_handle *handle;
539
540 if (!file) {
541 pr_err("%s: imported fd not found in file table.\n", __func__);
542 return ERR_PTR(-EINVAL);
543 }
544 if (file->f_op != &ion_share_fops) {
545 pr_err("%s: imported file is not a shared ion file.\n",
546 __func__);
547 handle = ERR_PTR(-EINVAL);
548 goto end;
549 }
550 handle = ion_import(client, file->private_data);
551end:
552 fput(file);
553 return handle;
554}
555
556static int ion_debug_client_show(struct seq_file *s, void *unused)
557{
558 struct ion_client *client = s->private;
559 struct rb_node *n;
560 size_t sizes[ION_NUM_HEAPS] = {0};
561 const char *names[ION_NUM_HEAPS] = {0};
562 int i;
563
564 mutex_lock(&client->lock);
565 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
566 struct ion_handle *handle = rb_entry(n, struct ion_handle,
567 node);
568 enum ion_heap_type type = handle->buffer->heap->type;
569
570 if (!names[type])
571 names[type] = handle->buffer->heap->name;
572 sizes[type] += handle->buffer->size;
573 }
574 mutex_unlock(&client->lock);
575
576 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
577 for (i = 0; i < ION_NUM_HEAPS; i++) {
578 if (!names[i])
579 continue;
580 seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i],
581 atomic_read(&client->ref.refcount));
582 }
583 return 0;
584}
585
586static int ion_debug_client_open(struct inode *inode, struct file *file)
587{
588 return single_open(file, ion_debug_client_show, inode->i_private);
589}
590
591static const struct file_operations debug_client_fops = {
592 .open = ion_debug_client_open,
593 .read = seq_read,
594 .llseek = seq_lseek,
595 .release = single_release,
596};
597
598struct ion_client *ion_client_create(struct ion_device *dev,
599 unsigned int heap_mask,
600 const char *name)
601{
602 struct ion_client *client;
603 struct task_struct *task;
604 struct rb_node **p;
605 struct rb_node *parent = NULL;
606 struct ion_client *entry;
607 char debug_name[64];
608
609 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
610 if (!client)
611 return ERR_PTR(-ENOMEM);
612 client->dev = dev;
613 client->handles = RB_ROOT;
614 mutex_init(&client->lock);
615 client->name = name;
616 client->heap_mask = heap_mask;
617 get_task_struct(current->group_leader);
618 task_lock(current->group_leader);
619 client->pid = task_pid_nr(current->group_leader);
620 /* don't bother to store task struct for kernel threads,
621 they can't be killed anyway */
622 if (current->group_leader->flags & PF_KTHREAD) {
623 put_task_struct(current->group_leader);
624 task = NULL;
625 } else {
626 task = current->group_leader;
627 }
628 task_unlock(current->group_leader);
629 client->task = task;
630 kref_init(&client->ref);
631
632 mutex_lock(&dev->lock);
633 if (task) {
634 p = &dev->user_clients.rb_node;
635 while (*p) {
636 parent = *p;
637 entry = rb_entry(parent, struct ion_client, node);
638
639 if (task < entry->task)
640 p = &(*p)->rb_left;
641 else if (task > entry->task)
642 p = &(*p)->rb_right;
643 }
644 rb_link_node(&client->node, parent, p);
645 rb_insert_color(&client->node, &dev->user_clients);
646 } else {
647 p = &dev->kernel_clients.rb_node;
648 while (*p) {
649 parent = *p;
650 entry = rb_entry(parent, struct ion_client, node);
651
652 if (client < entry)
653 p = &(*p)->rb_left;
654 else if (client > entry)
655 p = &(*p)->rb_right;
656 }
657 rb_link_node(&client->node, parent, p);
658 rb_insert_color(&client->node, &dev->kernel_clients);
659 }
660
661 snprintf(debug_name, 64, "%u", client->pid);
662 client->debug_root = debugfs_create_file(debug_name, 0664,
663 dev->debug_root, client,
664 &debug_client_fops);
665 mutex_unlock(&dev->lock);
666
667 return client;
668}
669
670void ion_client_destroy(struct ion_client *client)
671{
672 struct ion_device *dev = client->dev;
673 struct rb_node *n;
674
675 pr_debug("%s: %d\n", __func__, __LINE__);
676 while ((n = rb_first(&client->handles))) {
677 struct ion_handle *handle = rb_entry(n, struct ion_handle,
678 node);
679 ion_handle_destroy(&handle->ref);
680 }
681 mutex_lock(&dev->lock);
682 if (client->task) {
683 rb_erase(&client->node, &dev->user_clients);
684 put_task_struct(client->task);
685 } else {
686 rb_erase(&client->node, &dev->kernel_clients);
687 }
688 debugfs_remove_recursive(client->debug_root);
689 mutex_unlock(&dev->lock);
690
691 kfree(client);
692}
693
694static struct ion_client *ion_client_lookup(struct ion_device *dev,
695 struct task_struct *task)
696{
697 struct rb_node *n = dev->user_clients.rb_node;
698 struct ion_client *client;
699
700 mutex_lock(&dev->lock);
701 while (n) {
702 client = rb_entry(n, struct ion_client, node);
703 if (task == client->task) {
704 ion_client_get(client);
705 mutex_unlock(&dev->lock);
706 return client;
707 } else if (task < client->task) {
708 n = n->rb_left;
709 } else if (task > client->task) {
710 n = n->rb_right;
711 }
712 }
713 mutex_unlock(&dev->lock);
714 return NULL;
715}
716
717static void _ion_client_destroy(struct kref *kref)
718{
719 struct ion_client *client = container_of(kref, struct ion_client, ref);
720 ion_client_destroy(client);
721}
722
723static void ion_client_get(struct ion_client *client)
724{
725 kref_get(&client->ref);
726}
727
728static int ion_client_put(struct ion_client *client)
729{
730 return kref_put(&client->ref, _ion_client_destroy);
731}
732
733static int ion_share_release(struct inode *inode, struct file* file)
734{
735 struct ion_buffer *buffer = file->private_data;
736
737 pr_debug("%s: %d\n", __func__, __LINE__);
738 /* drop the reference to the buffer -- this prevents the
739 buffer from going away because the client holding it exited
740 while it was being passed */
741 ion_buffer_put(buffer);
742 return 0;
743}
744
745static void ion_vma_open(struct vm_area_struct *vma)
746{
747
748 struct ion_buffer *buffer = vma->vm_file->private_data;
749 struct ion_handle *handle = vma->vm_private_data;
750 struct ion_client *client;
751
752 pr_debug("%s: %d\n", __func__, __LINE__);
753 /* check that the client still exists and take a reference so
754 it can't go away until this vma is closed */
755 client = ion_client_lookup(buffer->dev, current->group_leader);
756 if (IS_ERR_OR_NULL(client)) {
757 vma->vm_private_data = NULL;
758 return;
759 }
760 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
761 __func__, __LINE__,
762 atomic_read(&client->ref.refcount),
763 atomic_read(&handle->ref.refcount),
764 atomic_read(&buffer->ref.refcount));
765}
766
767static void ion_vma_close(struct vm_area_struct *vma)
768{
769 struct ion_handle *handle = vma->vm_private_data;
770 struct ion_buffer *buffer = vma->vm_file->private_data;
771 struct ion_client *client;
772
773 pr_debug("%s: %d\n", __func__, __LINE__);
774 /* this indicates the client is gone, nothing to do here */
775 if (!handle)
776 return;
777 client = handle->client;
778 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
779 __func__, __LINE__,
780 atomic_read(&client->ref.refcount),
781 atomic_read(&handle->ref.refcount),
782 atomic_read(&buffer->ref.refcount));
783 ion_handle_put(handle);
784 ion_client_put(client);
785 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
786 __func__, __LINE__,
787 atomic_read(&client->ref.refcount),
788 atomic_read(&handle->ref.refcount),
789 atomic_read(&buffer->ref.refcount));
790}
791
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700792static struct vm_operations_struct ion_vm_ops = {
793 .open = ion_vma_open,
794 .close = ion_vma_close,
795};
796
797static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
798{
799 struct ion_buffer *buffer = file->private_data;
800 unsigned long size = vma->vm_end - vma->vm_start;
801 struct ion_client *client;
802 struct ion_handle *handle;
803 int ret;
804
805 pr_debug("%s: %d\n", __func__, __LINE__);
806 /* make sure the client still exists, it's possible for the client to
807 have gone away but the map/share fd still to be around, take
808 a reference to it so it can't go away while this mapping exists */
809 client = ion_client_lookup(buffer->dev, current->group_leader);
810 if (IS_ERR_OR_NULL(client)) {
811 pr_err("%s: trying to mmap an ion handle in a process with no "
812 "ion client\n", __func__);
813 return -EINVAL;
814 }
815
816 if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
817 buffer->size)) {
818 pr_err("%s: trying to map larger area than handle has available"
819 "\n", __func__);
820 ret = -EINVAL;
821 goto err;
822 }
823
824 /* find the handle and take a reference to it */
825 handle = ion_import(client, buffer);
826 if (IS_ERR_OR_NULL(handle)) {
827 ret = -EINVAL;
828 goto err;
829 }
830
831 if (!handle->buffer->heap->ops->map_user) {
832 pr_err("%s: this heap does not define a method for mapping "
833 "to userspace\n", __func__);
834 ret = -EINVAL;
835 goto err1;
836 }
837
838 mutex_lock(&buffer->lock);
839 /* now map it to userspace */
840 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700841 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700842 if (ret) {
843 pr_err("%s: failure mapping buffer to userspace\n",
844 __func__);
845 goto err1;
846 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700847
848 vma->vm_ops = &ion_vm_ops;
849 /* move the handle into the vm_private_data so we can access it from
850 vma_open/close */
851 vma->vm_private_data = handle;
852 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
853 __func__, __LINE__,
854 atomic_read(&client->ref.refcount),
855 atomic_read(&handle->ref.refcount),
856 atomic_read(&buffer->ref.refcount));
857 return 0;
858
859err1:
860 /* drop the reference to the handle */
861 ion_handle_put(handle);
862err:
863 /* drop the refernce to the client */
864 ion_client_put(client);
865 return ret;
866}
867
868static const struct file_operations ion_share_fops = {
869 .owner = THIS_MODULE,
870 .release = ion_share_release,
871 .mmap = ion_share_mmap,
872};
873
874static int ion_ioctl_share(struct file *parent, struct ion_client *client,
875 struct ion_handle *handle)
876{
877 int fd = get_unused_fd();
878 struct file *file;
879
880 if (fd < 0)
881 return -ENFILE;
882
883 file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
884 handle->buffer, O_RDWR);
885 if (IS_ERR_OR_NULL(file))
886 goto err;
887 ion_buffer_get(handle->buffer);
888 fd_install(fd, file);
889
890 return fd;
891
892err:
893 put_unused_fd(fd);
894 return -ENFILE;
895}
896
897static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
898{
899 struct ion_client *client = filp->private_data;
900
901 switch (cmd) {
902 case ION_IOC_ALLOC:
903 {
904 struct ion_allocation_data data;
905
906 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
907 return -EFAULT;
908 data.handle = ion_alloc(client, data.len, data.align,
909 data.flags);
910 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
911 return -EFAULT;
912 break;
913 }
914 case ION_IOC_FREE:
915 {
916 struct ion_handle_data data;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700917 bool valid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700918
919 if (copy_from_user(&data, (void __user *)arg,
920 sizeof(struct ion_handle_data)))
921 return -EFAULT;
922 mutex_lock(&client->lock);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700923 valid = ion_handle_validate(client, data.handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700924 mutex_unlock(&client->lock);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700925 if (!valid)
926 return -EINVAL;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700927 ion_free(client, data.handle);
928 break;
929 }
930 case ION_IOC_MAP:
931 case ION_IOC_SHARE:
932 {
933 struct ion_fd_data data;
934
935 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
936 return -EFAULT;
937 mutex_lock(&client->lock);
938 if (!ion_handle_validate(client, data.handle)) {
939 pr_err("%s: invalid handle passed to share ioctl.\n",
940 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700941 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700942 return -EINVAL;
943 }
944 data.fd = ion_ioctl_share(filp, client, data.handle);
945 mutex_unlock(&client->lock);
946 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
947 return -EFAULT;
948 break;
949 }
950 case ION_IOC_IMPORT:
951 {
952 struct ion_fd_data data;
953 if (copy_from_user(&data, (void __user *)arg,
954 sizeof(struct ion_fd_data)))
955 return -EFAULT;
956
957 data.handle = ion_import_fd(client, data.fd);
958 if (IS_ERR(data.handle))
959 data.handle = NULL;
960 if (copy_to_user((void __user *)arg, &data,
961 sizeof(struct ion_fd_data)))
962 return -EFAULT;
963 break;
964 }
965 case ION_IOC_CUSTOM:
966 {
967 struct ion_device *dev = client->dev;
968 struct ion_custom_data data;
969
970 if (!dev->custom_ioctl)
971 return -ENOTTY;
972 if (copy_from_user(&data, (void __user *)arg,
973 sizeof(struct ion_custom_data)))
974 return -EFAULT;
975 return dev->custom_ioctl(client, data.cmd, data.arg);
976 }
977 default:
978 return -ENOTTY;
979 }
980 return 0;
981}
982
983static int ion_release(struct inode *inode, struct file *file)
984{
985 struct ion_client *client = file->private_data;
986
987 pr_debug("%s: %d\n", __func__, __LINE__);
988 ion_client_put(client);
989 return 0;
990}
991
992static int ion_open(struct inode *inode, struct file *file)
993{
994 struct miscdevice *miscdev = file->private_data;
995 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
996 struct ion_client *client;
997
998 pr_debug("%s: %d\n", __func__, __LINE__);
999 client = ion_client_lookup(dev, current->group_leader);
1000 if (IS_ERR_OR_NULL(client)) {
1001 /* XXX: consider replacing "user" with cmdline */
1002 client = ion_client_create(dev, -1, "user");
1003 if (IS_ERR_OR_NULL(client))
1004 return PTR_ERR(client);
1005 }
1006 file->private_data = client;
1007
1008 return 0;
1009}
1010
1011static const struct file_operations ion_fops = {
1012 .owner = THIS_MODULE,
1013 .open = ion_open,
1014 .release = ion_release,
1015 .unlocked_ioctl = ion_ioctl,
1016};
1017
1018static size_t ion_debug_heap_total(struct ion_client *client,
1019 enum ion_heap_type type)
1020{
1021 size_t size = 0;
1022 struct rb_node *n;
1023
1024 mutex_lock(&client->lock);
1025 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1026 struct ion_handle *handle = rb_entry(n,
1027 struct ion_handle,
1028 node);
1029 if (handle->buffer->heap->type == type)
1030 size += handle->buffer->size;
1031 }
1032 mutex_unlock(&client->lock);
1033 return size;
1034}
1035
1036static int ion_debug_heap_show(struct seq_file *s, void *unused)
1037{
1038 struct ion_heap *heap = s->private;
1039 struct ion_device *dev = heap->dev;
1040 struct rb_node *n;
1041
1042 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1043 for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
1044 struct ion_client *client = rb_entry(n, struct ion_client,
1045 node);
1046 char task_comm[TASK_COMM_LEN];
1047 size_t size = ion_debug_heap_total(client, heap->type);
1048 if (!size)
1049 continue;
1050
1051 get_task_comm(task_comm, client->task);
1052 seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid,
1053 size);
1054 }
1055
1056 for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1057 struct ion_client *client = rb_entry(n, struct ion_client,
1058 node);
1059 size_t size = ion_debug_heap_total(client, heap->type);
1060 if (!size)
1061 continue;
1062 seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid,
1063 size);
1064 }
1065 return 0;
1066}
1067
1068static int ion_debug_heap_open(struct inode *inode, struct file *file)
1069{
1070 return single_open(file, ion_debug_heap_show, inode->i_private);
1071}
1072
1073static const struct file_operations debug_heap_fops = {
1074 .open = ion_debug_heap_open,
1075 .read = seq_read,
1076 .llseek = seq_lseek,
1077 .release = single_release,
1078};
1079
1080void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1081{
1082 struct rb_node **p = &dev->heaps.rb_node;
1083 struct rb_node *parent = NULL;
1084 struct ion_heap *entry;
1085
1086 heap->dev = dev;
1087 mutex_lock(&dev->lock);
1088 while (*p) {
1089 parent = *p;
1090 entry = rb_entry(parent, struct ion_heap, node);
1091
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001092 if (heap->id < entry->id) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001093 p = &(*p)->rb_left;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001094 } else if (heap->id > entry->id ) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001095 p = &(*p)->rb_right;
1096 } else {
1097 pr_err("%s: can not insert multiple heaps with "
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001098 "id %d\n", __func__, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001099 goto end;
1100 }
1101 }
1102
1103 rb_link_node(&heap->node, parent, p);
1104 rb_insert_color(&heap->node, &dev->heaps);
1105 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1106 &debug_heap_fops);
1107end:
1108 mutex_unlock(&dev->lock);
1109}
1110
1111struct ion_device *ion_device_create(long (*custom_ioctl)
1112 (struct ion_client *client,
1113 unsigned int cmd,
1114 unsigned long arg))
1115{
1116 struct ion_device *idev;
1117 int ret;
1118
1119 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1120 if (!idev)
1121 return ERR_PTR(-ENOMEM);
1122
1123 idev->dev.minor = MISC_DYNAMIC_MINOR;
1124 idev->dev.name = "ion";
1125 idev->dev.fops = &ion_fops;
1126 idev->dev.parent = NULL;
1127 ret = misc_register(&idev->dev);
1128 if (ret) {
1129 pr_err("ion: failed to register misc device.\n");
1130 return ERR_PTR(ret);
1131 }
1132
1133 idev->debug_root = debugfs_create_dir("ion", NULL);
1134 if (IS_ERR_OR_NULL(idev->debug_root))
1135 pr_err("ion: failed to create debug files.\n");
1136
1137 idev->custom_ioctl = custom_ioctl;
1138 idev->buffers = RB_ROOT;
1139 mutex_init(&idev->lock);
1140 idev->heaps = RB_ROOT;
1141 idev->user_clients = RB_ROOT;
1142 idev->kernel_clients = RB_ROOT;
1143 return idev;
1144}
1145
1146void ion_device_destroy(struct ion_device *dev)
1147{
1148 misc_deregister(&dev->dev);
1149 /* XXX need to free the heaps and clients ? */
1150 kfree(dev);
1151}