blob: 688582b7d409225e0a7e4545b8cdc4ff6907ae67 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/ion.h>
22#include <linux/list.h>
23#include <linux/miscdevice.h>
24#include <linux/mm.h>
25#include <linux/mm_types.h>
26#include <linux/rbtree.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/seq_file.h>
30#include <linux/uaccess.h>
31#include <linux/debugfs.h>
32
33#include "ion_priv.h"
34#define DEBUG
35
36/**
37 * struct ion_device - the metadata of the ion device node
38 * @dev: the actual misc device
39 * @buffers: an rb tree of all the existing buffers
40 * @lock: lock protecting the buffers & heaps trees
41 * @heaps: list of all the heaps in the system
42 * @user_clients: list of all the clients created from userspace
43 */
44struct ion_device {
45 struct miscdevice dev;
46 struct rb_root buffers;
47 struct mutex lock;
48 struct rb_root heaps;
49 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
50 unsigned long arg);
51 struct rb_root user_clients;
52 struct rb_root kernel_clients;
53 struct dentry *debug_root;
54};
55
56/**
57 * struct ion_client - a process/hw block local address space
58 * @ref: for reference counting the client
59 * @node: node in the tree of all clients
60 * @dev: backpointer to ion device
61 * @handles: an rb tree of all the handles in this client
62 * @lock: lock protecting the tree of handles
63 * @heap_mask: mask of all supported heaps
64 * @name: used for debugging
65 * @task: used for debugging
66 *
67 * A client represents a list of buffers this client may access.
68 * The mutex stored here is used to protect both handles tree
69 * as well as the handles themselves, and should be held while modifying either.
70 */
71struct ion_client {
72 struct kref ref;
73 struct rb_node node;
74 struct ion_device *dev;
75 struct rb_root handles;
76 struct mutex lock;
77 unsigned int heap_mask;
78 const char *name;
79 struct task_struct *task;
80 pid_t pid;
81 struct dentry *debug_root;
82};
83
84/**
85 * ion_handle - a client local reference to an buffer
86 * @ref: reference count
87 * @client: back pointer to the client the buffer resides in
88 * @buffer: pointer to the buffer
89 * @node: node in the client's handle rbtree
90 * @map_cnt: count of times this client has mapped this buffer
91 * @addr: return from map
92 * @vaddr: return from map_kernel
93 *
94 * Modifications to node, map_cnt or mapping should be protected by the
95 * lock in the client. Other fields are never changed after initialization.
96 */
97struct ion_handle {
98 struct kref ref;
99 struct ion_client *client;
100 struct ion_buffer *buffer;
101 struct rb_node node;
102 unsigned int kmap_cnt;
103 unsigned int dmap_cnt;
104 unsigned int usermap_cnt;
105};
106
107/* this function should only be called while dev->lock is held */
108static void ion_buffer_add(struct ion_device *dev,
109 struct ion_buffer *buffer)
110{
111 struct rb_node **p = &dev->buffers.rb_node;
112 struct rb_node *parent = NULL;
113 struct ion_buffer *entry;
114
115 while (*p) {
116 parent = *p;
117 entry = rb_entry(parent, struct ion_buffer, node);
118
119 if (buffer < entry)
120 p = &(*p)->rb_left;
121 else if (buffer > entry)
122 p = &(*p)->rb_right;
123 else
124 WARN(1, "%s: buffer already found.", __func__);
125 }
126
127 rb_link_node(&buffer->node, parent, p);
128 rb_insert_color(&buffer->node, &dev->buffers);
129}
130
131/* this function should only be called while dev->lock is held */
132struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
133 struct ion_device *dev,
134 unsigned long len,
135 unsigned long align,
136 unsigned long flags)
137{
138 struct ion_buffer *buffer;
139 int ret;
140
141 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
142 if (!buffer)
143 return ERR_PTR(-ENOMEM);
144
145 buffer->heap = heap;
146 kref_init(&buffer->ref);
147
148 ret = heap->ops->allocate(heap, buffer, len, align, flags);
149 if (ret)
150 return ERR_PTR(ret);
151 buffer->dev = dev;
152 buffer->size = len;
153 mutex_init(&buffer->lock);
154 ion_buffer_add(dev, buffer);
155 return buffer;
156}
157
158static void ion_buffer_destroy(struct kref *kref)
159{
160 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
161 struct ion_device *dev = buffer->dev;
162
163 buffer->heap->ops->free(buffer);
164 mutex_lock(&dev->lock);
165 rb_erase(&buffer->node, &dev->buffers);
166 mutex_unlock(&dev->lock);
167 kfree(buffer);
168}
169
170static void ion_buffer_get(struct ion_buffer *buffer)
171{
172 kref_get(&buffer->ref);
173}
174
175static int ion_buffer_put(struct ion_buffer *buffer)
176{
177 return kref_put(&buffer->ref, ion_buffer_destroy);
178}
179
180struct ion_handle *ion_handle_create(struct ion_client *client,
181 struct ion_buffer *buffer)
182{
183 struct ion_handle *handle;
184
185 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
186 if (!handle)
187 return ERR_PTR(-ENOMEM);
188 kref_init(&handle->ref);
189 handle->client = client;
190 ion_buffer_get(buffer);
191 handle->buffer = buffer;
192
193 return handle;
194}
195
196static void ion_handle_destroy(struct kref *kref)
197{
198 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
199 /* XXX Can a handle be destroyed while it's map count is non-zero?:
200 if (handle->map_cnt) unmap
201 */
202 ion_buffer_put(handle->buffer);
203 mutex_lock(&handle->client->lock);
204 rb_erase(&handle->node, &handle->client->handles);
205 mutex_unlock(&handle->client->lock);
206 kfree(handle);
207}
208
209struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
210{
211 return handle->buffer;
212}
213
214static void ion_handle_get(struct ion_handle *handle)
215{
216 kref_get(&handle->ref);
217}
218
219static int ion_handle_put(struct ion_handle *handle)
220{
221 return kref_put(&handle->ref, ion_handle_destroy);
222}
223
224static struct ion_handle *ion_handle_lookup(struct ion_client *client,
225 struct ion_buffer *buffer)
226{
227 struct rb_node *n;
228
229 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
230 struct ion_handle *handle = rb_entry(n, struct ion_handle,
231 node);
232 if (handle->buffer == buffer)
233 return handle;
234 }
235 return NULL;
236}
237
238bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
239{
240 struct rb_node *n = client->handles.rb_node;
241
242 while (n) {
243 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
244 node);
245 if (handle < handle_node)
246 n = n->rb_left;
247 else if (handle > handle_node)
248 n = n->rb_right;
249 else
250 return true;
251 }
252 return false;
253}
254
255static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
256{
257 struct rb_node **p = &client->handles.rb_node;
258 struct rb_node *parent = NULL;
259 struct ion_handle *entry;
260
261 while (*p) {
262 parent = *p;
263 entry = rb_entry(parent, struct ion_handle, node);
264
265 if (handle < entry)
266 p = &(*p)->rb_left;
267 else if (handle > entry)
268 p = &(*p)->rb_right;
269 else
270 WARN(1, "%s: buffer already found.", __func__);
271 }
272
273 rb_link_node(&handle->node, parent, p);
274 rb_insert_color(&handle->node, &client->handles);
275}
276
277struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
278 size_t align, unsigned int flags)
279{
280 struct rb_node *n;
281 struct ion_handle *handle;
282 struct ion_device *dev = client->dev;
283 struct ion_buffer *buffer = NULL;
284
285 /*
286 * traverse the list of heaps available in this system in priority
287 * order. If the heap type is supported by the client, and matches the
288 * request of the caller allocate from it. Repeat until allocate has
289 * succeeded or all heaps have been tried
290 */
291 mutex_lock(&dev->lock);
292 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
293 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
294 /* if the client doesn't support this heap type */
295 if (!((1 << heap->type) & client->heap_mask))
296 continue;
297 /* if the caller didn't specify this heap type */
298 if (!((1 << heap->prio) & flags))
299 continue;
300 buffer = ion_buffer_create(heap, dev, len, align, flags);
301 if (!IS_ERR_OR_NULL(buffer))
302 break;
303 }
304 mutex_unlock(&dev->lock);
305
306 if (IS_ERR_OR_NULL(buffer))
307 return ERR_PTR(PTR_ERR(buffer));
308
309 handle = ion_handle_create(client, buffer);
310
311 if (IS_ERR_OR_NULL(handle))
312 goto end;
313
314 /*
315 * ion_buffer_create will create a buffer with a ref_cnt of 1,
316 * and ion_handle_create will take a second reference, drop one here
317 */
318 ion_buffer_put(buffer);
319
320 mutex_lock(&client->lock);
321 ion_handle_add(client, handle);
322 mutex_unlock(&client->lock);
323 return handle;
324
325end:
326 ion_buffer_put(buffer);
327 return handle;
328}
329
330void ion_free(struct ion_client *client, struct ion_handle *handle)
331{
332 ion_handle_put(handle);
333}
334
335static void ion_client_get(struct ion_client *client);
336static int ion_client_put(struct ion_client *client);
337
338bool _ion_map(int *buffer_cnt, int *handle_cnt)
339{
340 bool map;
341
342 BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
343
344 if (*buffer_cnt)
345 map = false;
346 else
347 map = true;
348 if (*handle_cnt == 0)
349 (*buffer_cnt)++;
350 (*handle_cnt)++;
351 return map;
352}
353
354bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
355{
356 BUG_ON(*handle_cnt == 0);
357 (*handle_cnt)--;
358 if (*handle_cnt != 0)
359 return false;
360 BUG_ON(*buffer_cnt == 0);
361 (*buffer_cnt)--;
362 if (*buffer_cnt == 0)
363 return true;
364 return false;
365}
366
367int ion_phys(struct ion_client *client, struct ion_handle *handle,
368 ion_phys_addr_t *addr, size_t *len)
369{
370 struct ion_buffer *buffer;
371 int ret;
372
373 mutex_lock(&client->lock);
374 if (!ion_handle_validate(client, handle)) {
375 pr_err("%s: invalid handle passed to map_kernel.\n",
376 __func__);
377 return -EINVAL;
378 }
379
380 buffer = handle->buffer;
381
382 if (!buffer->heap->ops->phys) {
383 pr_err("%s: ion_phys is not implemented by this heap.\n",
384 __func__);
385 return -ENODEV;
386 }
387 mutex_unlock(&client->lock);
388 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
389 return ret;
390}
391
392void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
393{
394 struct ion_buffer *buffer;
395 void *vaddr;
396
397 mutex_lock(&client->lock);
398 if (!ion_handle_validate(client, handle)) {
399 pr_err("%s: invalid handle passed to map_kernel.\n",
400 __func__);
401 return ERR_PTR(-EINVAL);
402 }
403
404 buffer = handle->buffer;
405 mutex_lock(&buffer->lock);
406
407 if (!handle->buffer->heap->ops->map_kernel) {
408 pr_err("%s: map_kernel is not implemented by this heap.\n",
409 __func__);
410 return ERR_PTR(-ENODEV);
411 }
412
413 if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
414 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
415 if (IS_ERR_OR_NULL(vaddr))
416 _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
417 buffer->vaddr = vaddr;
418 } else {
419 vaddr = buffer->vaddr;
420 }
421 mutex_unlock(&buffer->lock);
422 mutex_unlock(&client->lock);
423 return vaddr;
424}
425
426struct scatterlist *ion_map_dma(struct ion_client *client,
427 struct ion_handle *handle)
428{
429 struct ion_buffer *buffer;
430 struct scatterlist *sglist;
431
432 mutex_lock(&client->lock);
433 if (!ion_handle_validate(client, handle)) {
434 pr_err("%s: invalid handle passed to map_dma.\n",
435 __func__);
436 return ERR_PTR(-EINVAL);
437 }
438 buffer = handle->buffer;
439 mutex_lock(&buffer->lock);
440
441 if (!handle->buffer->heap->ops->map_dma) {
442 pr_err("%s: map_kernel is not implemented by this heap.\n",
443 __func__);
444 return ERR_PTR(-ENODEV);
445 }
446 if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
447 sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
448 if (IS_ERR_OR_NULL(sglist))
449 _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
450 buffer->sglist = sglist;
451 } else {
452 sglist = buffer->sglist;
453 }
454 mutex_unlock(&buffer->lock);
455 mutex_unlock(&client->lock);
456 return sglist;
457}
458
459void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
460{
461 struct ion_buffer *buffer;
462
463 mutex_lock(&client->lock);
464 buffer = handle->buffer;
465 mutex_lock(&buffer->lock);
466 if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
467 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
468 buffer->vaddr = NULL;
469 }
470 mutex_unlock(&buffer->lock);
471 mutex_unlock(&client->lock);
472}
473
474void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
475{
476 struct ion_buffer *buffer;
477
478 mutex_lock(&client->lock);
479 buffer = handle->buffer;
480 mutex_lock(&buffer->lock);
481 if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
482 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
483 buffer->sglist = NULL;
484 }
485 mutex_unlock(&buffer->lock);
486 mutex_unlock(&client->lock);
487}
488
489
490struct ion_buffer *ion_share(struct ion_client *client,
491 struct ion_handle *handle)
492{
493 /* don't not take an extra refernce here, the burden is on the caller
494 * to make sure the buffer doesn't go away while it's passing it
495 * to another client -- ion_free should not be called on this handle
496 * until the buffer has been imported into the other client
497 */
498 return handle->buffer;
499}
500
501struct ion_handle *ion_import(struct ion_client *client,
502 struct ion_buffer *buffer)
503{
504 struct ion_handle *handle = NULL;
505
506 mutex_lock(&client->lock);
507 /* if a handle exists for this buffer just take a reference to it */
508 handle = ion_handle_lookup(client, buffer);
509 if (!IS_ERR_OR_NULL(handle)) {
510 ion_handle_get(handle);
511 goto end;
512 }
513 handle = ion_handle_create(client, buffer);
514 if (IS_ERR_OR_NULL(handle))
515 goto end;
516 ion_handle_add(client, handle);
517end:
518 mutex_unlock(&client->lock);
519 return handle;
520}
521
522static const struct file_operations ion_share_fops;
523
524struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
525{
526 struct file *file = fget(fd);
527 struct ion_handle *handle;
528
529 if (!file) {
530 pr_err("%s: imported fd not found in file table.\n", __func__);
531 return ERR_PTR(-EINVAL);
532 }
533 if (file->f_op != &ion_share_fops) {
534 pr_err("%s: imported file is not a shared ion file.\n",
535 __func__);
536 handle = ERR_PTR(-EINVAL);
537 goto end;
538 }
539 handle = ion_import(client, file->private_data);
540end:
541 fput(file);
542 return handle;
543}
544
545static int ion_debug_client_show(struct seq_file *s, void *unused)
546{
547 struct ion_client *client = s->private;
548 struct rb_node *n;
549 size_t sizes[ION_NUM_HEAPS] = {0};
550 const char *names[ION_NUM_HEAPS] = {0};
551 int i;
552
553 mutex_lock(&client->lock);
554 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
555 struct ion_handle *handle = rb_entry(n, struct ion_handle,
556 node);
557 enum ion_heap_type type = handle->buffer->heap->type;
558
559 if (!names[type])
560 names[type] = handle->buffer->heap->name;
561 sizes[type] += handle->buffer->size;
562 }
563 mutex_unlock(&client->lock);
564
565 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
566 for (i = 0; i < ION_NUM_HEAPS; i++) {
567 if (!names[i])
568 continue;
569 seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i],
570 atomic_read(&client->ref.refcount));
571 }
572 return 0;
573}
574
575static int ion_debug_client_open(struct inode *inode, struct file *file)
576{
577 return single_open(file, ion_debug_client_show, inode->i_private);
578}
579
580static const struct file_operations debug_client_fops = {
581 .open = ion_debug_client_open,
582 .read = seq_read,
583 .llseek = seq_lseek,
584 .release = single_release,
585};
586
587struct ion_client *ion_client_create(struct ion_device *dev,
588 unsigned int heap_mask,
589 const char *name)
590{
591 struct ion_client *client;
592 struct task_struct *task;
593 struct rb_node **p;
594 struct rb_node *parent = NULL;
595 struct ion_client *entry;
596 char debug_name[64];
597
598 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
599 if (!client)
600 return ERR_PTR(-ENOMEM);
601 client->dev = dev;
602 client->handles = RB_ROOT;
603 mutex_init(&client->lock);
604 client->name = name;
605 client->heap_mask = heap_mask;
606 get_task_struct(current->group_leader);
607 task_lock(current->group_leader);
608 client->pid = task_pid_nr(current->group_leader);
609 /* don't bother to store task struct for kernel threads,
610 they can't be killed anyway */
611 if (current->group_leader->flags & PF_KTHREAD) {
612 put_task_struct(current->group_leader);
613 task = NULL;
614 } else {
615 task = current->group_leader;
616 }
617 task_unlock(current->group_leader);
618 client->task = task;
619 kref_init(&client->ref);
620
621 mutex_lock(&dev->lock);
622 if (task) {
623 p = &dev->user_clients.rb_node;
624 while (*p) {
625 parent = *p;
626 entry = rb_entry(parent, struct ion_client, node);
627
628 if (task < entry->task)
629 p = &(*p)->rb_left;
630 else if (task > entry->task)
631 p = &(*p)->rb_right;
632 }
633 rb_link_node(&client->node, parent, p);
634 rb_insert_color(&client->node, &dev->user_clients);
635 } else {
636 p = &dev->kernel_clients.rb_node;
637 while (*p) {
638 parent = *p;
639 entry = rb_entry(parent, struct ion_client, node);
640
641 if (client < entry)
642 p = &(*p)->rb_left;
643 else if (client > entry)
644 p = &(*p)->rb_right;
645 }
646 rb_link_node(&client->node, parent, p);
647 rb_insert_color(&client->node, &dev->kernel_clients);
648 }
649
650 snprintf(debug_name, 64, "%u", client->pid);
651 client->debug_root = debugfs_create_file(debug_name, 0664,
652 dev->debug_root, client,
653 &debug_client_fops);
654 mutex_unlock(&dev->lock);
655
656 return client;
657}
658
659void ion_client_destroy(struct ion_client *client)
660{
661 struct ion_device *dev = client->dev;
662 struct rb_node *n;
663
664 pr_debug("%s: %d\n", __func__, __LINE__);
665 while ((n = rb_first(&client->handles))) {
666 struct ion_handle *handle = rb_entry(n, struct ion_handle,
667 node);
668 ion_handle_destroy(&handle->ref);
669 }
670 mutex_lock(&dev->lock);
671 if (client->task) {
672 rb_erase(&client->node, &dev->user_clients);
673 put_task_struct(client->task);
674 } else {
675 rb_erase(&client->node, &dev->kernel_clients);
676 }
677 debugfs_remove_recursive(client->debug_root);
678 mutex_unlock(&dev->lock);
679
680 kfree(client);
681}
682
683static struct ion_client *ion_client_lookup(struct ion_device *dev,
684 struct task_struct *task)
685{
686 struct rb_node *n = dev->user_clients.rb_node;
687 struct ion_client *client;
688
689 mutex_lock(&dev->lock);
690 while (n) {
691 client = rb_entry(n, struct ion_client, node);
692 if (task == client->task) {
693 ion_client_get(client);
694 mutex_unlock(&dev->lock);
695 return client;
696 } else if (task < client->task) {
697 n = n->rb_left;
698 } else if (task > client->task) {
699 n = n->rb_right;
700 }
701 }
702 mutex_unlock(&dev->lock);
703 return NULL;
704}
705
706static void _ion_client_destroy(struct kref *kref)
707{
708 struct ion_client *client = container_of(kref, struct ion_client, ref);
709 ion_client_destroy(client);
710}
711
712static void ion_client_get(struct ion_client *client)
713{
714 kref_get(&client->ref);
715}
716
717static int ion_client_put(struct ion_client *client)
718{
719 return kref_put(&client->ref, _ion_client_destroy);
720}
721
722static int ion_share_release(struct inode *inode, struct file* file)
723{
724 struct ion_buffer *buffer = file->private_data;
725
726 pr_debug("%s: %d\n", __func__, __LINE__);
727 /* drop the reference to the buffer -- this prevents the
728 buffer from going away because the client holding it exited
729 while it was being passed */
730 ion_buffer_put(buffer);
731 return 0;
732}
733
734static void ion_vma_open(struct vm_area_struct *vma)
735{
736
737 struct ion_buffer *buffer = vma->vm_file->private_data;
738 struct ion_handle *handle = vma->vm_private_data;
739 struct ion_client *client;
740
741 pr_debug("%s: %d\n", __func__, __LINE__);
742 /* check that the client still exists and take a reference so
743 it can't go away until this vma is closed */
744 client = ion_client_lookup(buffer->dev, current->group_leader);
745 if (IS_ERR_OR_NULL(client)) {
746 vma->vm_private_data = NULL;
747 return;
748 }
749 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
750 __func__, __LINE__,
751 atomic_read(&client->ref.refcount),
752 atomic_read(&handle->ref.refcount),
753 atomic_read(&buffer->ref.refcount));
754}
755
756static void ion_vma_close(struct vm_area_struct *vma)
757{
758 struct ion_handle *handle = vma->vm_private_data;
759 struct ion_buffer *buffer = vma->vm_file->private_data;
760 struct ion_client *client;
761
762 pr_debug("%s: %d\n", __func__, __LINE__);
763 /* this indicates the client is gone, nothing to do here */
764 if (!handle)
765 return;
766 client = handle->client;
767 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
768 __func__, __LINE__,
769 atomic_read(&client->ref.refcount),
770 atomic_read(&handle->ref.refcount),
771 atomic_read(&buffer->ref.refcount));
772 ion_handle_put(handle);
773 ion_client_put(client);
774 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
775 __func__, __LINE__,
776 atomic_read(&client->ref.refcount),
777 atomic_read(&handle->ref.refcount),
778 atomic_read(&buffer->ref.refcount));
779}
780
781#if 0
782static int ion_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
783{
784 struct ion_handle *handle = vma->vm_private_data;
785 struct ion_buffer *buffer = vma->vm_file->private_data;
786 struct ion_client *client;
787
788 pr_debug("%s: %d\n", __func__, __LINE__);
789 /* this indicates the client is gone, nothing to do here */
790 if (!handle)
791 return;
792 client = handle->client;
793}
794#endif
795
796static struct vm_operations_struct ion_vm_ops = {
797 .open = ion_vma_open,
798 .close = ion_vma_close,
799};
800
801static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
802{
803 struct ion_buffer *buffer = file->private_data;
804 unsigned long size = vma->vm_end - vma->vm_start;
805 struct ion_client *client;
806 struct ion_handle *handle;
807 int ret;
808
809 pr_debug("%s: %d\n", __func__, __LINE__);
810 /* make sure the client still exists, it's possible for the client to
811 have gone away but the map/share fd still to be around, take
812 a reference to it so it can't go away while this mapping exists */
813 client = ion_client_lookup(buffer->dev, current->group_leader);
814 if (IS_ERR_OR_NULL(client)) {
815 pr_err("%s: trying to mmap an ion handle in a process with no "
816 "ion client\n", __func__);
817 return -EINVAL;
818 }
819
820 if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
821 buffer->size)) {
822 pr_err("%s: trying to map larger area than handle has available"
823 "\n", __func__);
824 ret = -EINVAL;
825 goto err;
826 }
827
828 /* find the handle and take a reference to it */
829 handle = ion_import(client, buffer);
830 if (IS_ERR_OR_NULL(handle)) {
831 ret = -EINVAL;
832 goto err;
833 }
834
835 if (!handle->buffer->heap->ops->map_user) {
836 pr_err("%s: this heap does not define a method for mapping "
837 "to userspace\n", __func__);
838 ret = -EINVAL;
839 goto err1;
840 }
841
842 mutex_lock(&buffer->lock);
843 /* now map it to userspace */
844 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
845 if (ret) {
846 pr_err("%s: failure mapping buffer to userspace\n",
847 __func__);
848 goto err1;
849 }
850 mutex_unlock(&buffer->lock);
851
852 vma->vm_ops = &ion_vm_ops;
853 /* move the handle into the vm_private_data so we can access it from
854 vma_open/close */
855 vma->vm_private_data = handle;
856 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
857 __func__, __LINE__,
858 atomic_read(&client->ref.refcount),
859 atomic_read(&handle->ref.refcount),
860 atomic_read(&buffer->ref.refcount));
861 return 0;
862
863err1:
864 /* drop the reference to the handle */
865 ion_handle_put(handle);
866err:
867 /* drop the refernce to the client */
868 ion_client_put(client);
869 return ret;
870}
871
872static const struct file_operations ion_share_fops = {
873 .owner = THIS_MODULE,
874 .release = ion_share_release,
875 .mmap = ion_share_mmap,
876};
877
878static int ion_ioctl_share(struct file *parent, struct ion_client *client,
879 struct ion_handle *handle)
880{
881 int fd = get_unused_fd();
882 struct file *file;
883
884 if (fd < 0)
885 return -ENFILE;
886
887 file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
888 handle->buffer, O_RDWR);
889 if (IS_ERR_OR_NULL(file))
890 goto err;
891 ion_buffer_get(handle->buffer);
892 fd_install(fd, file);
893
894 return fd;
895
896err:
897 put_unused_fd(fd);
898 return -ENFILE;
899}
900
901static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
902{
903 struct ion_client *client = filp->private_data;
904
905 switch (cmd) {
906 case ION_IOC_ALLOC:
907 {
908 struct ion_allocation_data data;
909
910 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
911 return -EFAULT;
912 data.handle = ion_alloc(client, data.len, data.align,
913 data.flags);
914 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
915 return -EFAULT;
916 break;
917 }
918 case ION_IOC_FREE:
919 {
920 struct ion_handle_data data;
921
922 if (copy_from_user(&data, (void __user *)arg,
923 sizeof(struct ion_handle_data)))
924 return -EFAULT;
925 mutex_lock(&client->lock);
926 if (!ion_handle_validate(client, data.handle))
927 return -EINVAL;
928 mutex_unlock(&client->lock);
929 ion_free(client, data.handle);
930 break;
931 }
932 case ION_IOC_MAP:
933 case ION_IOC_SHARE:
934 {
935 struct ion_fd_data data;
936
937 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
938 return -EFAULT;
939 mutex_lock(&client->lock);
940 if (!ion_handle_validate(client, data.handle)) {
941 pr_err("%s: invalid handle passed to share ioctl.\n",
942 __func__);
943 return -EINVAL;
944 }
945 data.fd = ion_ioctl_share(filp, client, data.handle);
946 mutex_unlock(&client->lock);
947 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
948 return -EFAULT;
949 break;
950 }
951 case ION_IOC_IMPORT:
952 {
953 struct ion_fd_data data;
954 if (copy_from_user(&data, (void __user *)arg,
955 sizeof(struct ion_fd_data)))
956 return -EFAULT;
957
958 data.handle = ion_import_fd(client, data.fd);
959 if (IS_ERR(data.handle))
960 data.handle = NULL;
961 if (copy_to_user((void __user *)arg, &data,
962 sizeof(struct ion_fd_data)))
963 return -EFAULT;
964 break;
965 }
966 case ION_IOC_CUSTOM:
967 {
968 struct ion_device *dev = client->dev;
969 struct ion_custom_data data;
970
971 if (!dev->custom_ioctl)
972 return -ENOTTY;
973 if (copy_from_user(&data, (void __user *)arg,
974 sizeof(struct ion_custom_data)))
975 return -EFAULT;
976 return dev->custom_ioctl(client, data.cmd, data.arg);
977 }
978 default:
979 return -ENOTTY;
980 }
981 return 0;
982}
983
984static int ion_release(struct inode *inode, struct file *file)
985{
986 struct ion_client *client = file->private_data;
987
988 pr_debug("%s: %d\n", __func__, __LINE__);
989 ion_client_put(client);
990 return 0;
991}
992
993static int ion_open(struct inode *inode, struct file *file)
994{
995 struct miscdevice *miscdev = file->private_data;
996 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
997 struct ion_client *client;
998
999 pr_debug("%s: %d\n", __func__, __LINE__);
1000 client = ion_client_lookup(dev, current->group_leader);
1001 if (IS_ERR_OR_NULL(client)) {
1002 /* XXX: consider replacing "user" with cmdline */
1003 client = ion_client_create(dev, -1, "user");
1004 if (IS_ERR_OR_NULL(client))
1005 return PTR_ERR(client);
1006 }
1007 file->private_data = client;
1008
1009 return 0;
1010}
1011
1012static const struct file_operations ion_fops = {
1013 .owner = THIS_MODULE,
1014 .open = ion_open,
1015 .release = ion_release,
1016 .unlocked_ioctl = ion_ioctl,
1017};
1018
1019static size_t ion_debug_heap_total(struct ion_client *client,
1020 enum ion_heap_type type)
1021{
1022 size_t size = 0;
1023 struct rb_node *n;
1024
1025 mutex_lock(&client->lock);
1026 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1027 struct ion_handle *handle = rb_entry(n,
1028 struct ion_handle,
1029 node);
1030 if (handle->buffer->heap->type == type)
1031 size += handle->buffer->size;
1032 }
1033 mutex_unlock(&client->lock);
1034 return size;
1035}
1036
1037static int ion_debug_heap_show(struct seq_file *s, void *unused)
1038{
1039 struct ion_heap *heap = s->private;
1040 struct ion_device *dev = heap->dev;
1041 struct rb_node *n;
1042
1043 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1044 for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
1045 struct ion_client *client = rb_entry(n, struct ion_client,
1046 node);
1047 char task_comm[TASK_COMM_LEN];
1048 size_t size = ion_debug_heap_total(client, heap->type);
1049 if (!size)
1050 continue;
1051
1052 get_task_comm(task_comm, client->task);
1053 seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid,
1054 size);
1055 }
1056
1057 for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1058 struct ion_client *client = rb_entry(n, struct ion_client,
1059 node);
1060 size_t size = ion_debug_heap_total(client, heap->type);
1061 if (!size)
1062 continue;
1063 seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid,
1064 size);
1065 }
1066 return 0;
1067}
1068
1069static int ion_debug_heap_open(struct inode *inode, struct file *file)
1070{
1071 return single_open(file, ion_debug_heap_show, inode->i_private);
1072}
1073
1074static const struct file_operations debug_heap_fops = {
1075 .open = ion_debug_heap_open,
1076 .read = seq_read,
1077 .llseek = seq_lseek,
1078 .release = single_release,
1079};
1080
1081void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1082{
1083 struct rb_node **p = &dev->heaps.rb_node;
1084 struct rb_node *parent = NULL;
1085 struct ion_heap *entry;
1086
1087 heap->dev = dev;
1088 mutex_lock(&dev->lock);
1089 while (*p) {
1090 parent = *p;
1091 entry = rb_entry(parent, struct ion_heap, node);
1092
1093 if (heap->prio < entry->prio) {
1094 p = &(*p)->rb_left;
1095 } else if (heap->prio > entry->prio) {
1096 p = &(*p)->rb_right;
1097 } else {
1098 pr_err("%s: can not insert multiple heaps with "
1099 "priority %d\n", __func__, heap->prio);
1100 goto end;
1101 }
1102 }
1103
1104 rb_link_node(&heap->node, parent, p);
1105 rb_insert_color(&heap->node, &dev->heaps);
1106 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1107 &debug_heap_fops);
1108end:
1109 mutex_unlock(&dev->lock);
1110}
1111
1112struct ion_device *ion_device_create(long (*custom_ioctl)
1113 (struct ion_client *client,
1114 unsigned int cmd,
1115 unsigned long arg))
1116{
1117 struct ion_device *idev;
1118 int ret;
1119
1120 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1121 if (!idev)
1122 return ERR_PTR(-ENOMEM);
1123
1124 idev->dev.minor = MISC_DYNAMIC_MINOR;
1125 idev->dev.name = "ion";
1126 idev->dev.fops = &ion_fops;
1127 idev->dev.parent = NULL;
1128 ret = misc_register(&idev->dev);
1129 if (ret) {
1130 pr_err("ion: failed to register misc device.\n");
1131 return ERR_PTR(ret);
1132 }
1133
1134 idev->debug_root = debugfs_create_dir("ion", NULL);
1135 if (IS_ERR_OR_NULL(idev->debug_root))
1136 pr_err("ion: failed to create debug files.\n");
1137
1138 idev->custom_ioctl = custom_ioctl;
1139 idev->buffers = RB_ROOT;
1140 mutex_init(&idev->lock);
1141 idev->heaps = RB_ROOT;
1142 idev->user_clients = RB_ROOT;
1143 idev->kernel_clients = RB_ROOT;
1144 return idev;
1145}
1146
1147void ion_device_destroy(struct ion_device *dev)
1148{
1149 misc_deregister(&dev->dev);
1150 /* XXX need to free the heaps and clients ? */
1151 kfree(dev);
1152}