blob: 50420ba5c7e7b9dea26a5235504eb5222c5f6624 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/ion.h>
22#include <linux/list.h>
23#include <linux/miscdevice.h>
24#include <linux/mm.h>
25#include <linux/mm_types.h>
26#include <linux/rbtree.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/seq_file.h>
30#include <linux/uaccess.h>
31#include <linux/debugfs.h>
32
33#include "ion_priv.h"
34#define DEBUG
35
36/**
37 * struct ion_device - the metadata of the ion device node
38 * @dev: the actual misc device
39 * @buffers: an rb tree of all the existing buffers
40 * @lock: lock protecting the buffers & heaps trees
41 * @heaps: list of all the heaps in the system
42 * @user_clients: list of all the clients created from userspace
43 */
44struct ion_device {
45 struct miscdevice dev;
46 struct rb_root buffers;
47 struct mutex lock;
48 struct rb_root heaps;
49 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
50 unsigned long arg);
51 struct rb_root user_clients;
52 struct rb_root kernel_clients;
53 struct dentry *debug_root;
54};
55
56/**
57 * struct ion_client - a process/hw block local address space
58 * @ref: for reference counting the client
59 * @node: node in the tree of all clients
60 * @dev: backpointer to ion device
61 * @handles: an rb tree of all the handles in this client
62 * @lock: lock protecting the tree of handles
63 * @heap_mask: mask of all supported heaps
64 * @name: used for debugging
65 * @task: used for debugging
66 *
67 * A client represents a list of buffers this client may access.
68 * The mutex stored here is used to protect both handles tree
69 * as well as the handles themselves, and should be held while modifying either.
70 */
71struct ion_client {
72 struct kref ref;
73 struct rb_node node;
74 struct ion_device *dev;
75 struct rb_root handles;
76 struct mutex lock;
77 unsigned int heap_mask;
78 const char *name;
79 struct task_struct *task;
80 pid_t pid;
81 struct dentry *debug_root;
82};
83
84/**
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070085 * ion_handle - a client local reference to a buffer
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070086 * @ref: reference count
87 * @client: back pointer to the client the buffer resides in
88 * @buffer: pointer to the buffer
89 * @node: node in the client's handle rbtree
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070090 * @kmap_cnt: count of times this client has mapped to kernel
91 * @dmap_cnt: count of times this client has mapped for dma
92 * @usermap_cnt: count of times this client has mapped for userspace
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070093 *
94 * Modifications to node, map_cnt or mapping should be protected by the
95 * lock in the client. Other fields are never changed after initialization.
96 */
97struct ion_handle {
98 struct kref ref;
99 struct ion_client *client;
100 struct ion_buffer *buffer;
101 struct rb_node node;
102 unsigned int kmap_cnt;
103 unsigned int dmap_cnt;
104 unsigned int usermap_cnt;
105};
106
107/* this function should only be called while dev->lock is held */
108static void ion_buffer_add(struct ion_device *dev,
109 struct ion_buffer *buffer)
110{
111 struct rb_node **p = &dev->buffers.rb_node;
112 struct rb_node *parent = NULL;
113 struct ion_buffer *entry;
114
115 while (*p) {
116 parent = *p;
117 entry = rb_entry(parent, struct ion_buffer, node);
118
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700119 if (buffer < entry) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700120 p = &(*p)->rb_left;
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700121 } else if (buffer > entry) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700122 p = &(*p)->rb_right;
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700123 } else {
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700124 pr_err("%s: buffer already found.", __func__);
125 BUG();
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700126 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700127 }
128
129 rb_link_node(&buffer->node, parent, p);
130 rb_insert_color(&buffer->node, &dev->buffers);
131}
132
133/* this function should only be called while dev->lock is held */
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700134static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700135 struct ion_device *dev,
136 unsigned long len,
137 unsigned long align,
138 unsigned long flags)
139{
140 struct ion_buffer *buffer;
141 int ret;
142
143 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
144 if (!buffer)
145 return ERR_PTR(-ENOMEM);
146
147 buffer->heap = heap;
148 kref_init(&buffer->ref);
149
150 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700151 if (ret) {
152 kfree(buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700153 return ERR_PTR(ret);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700154 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700155 buffer->dev = dev;
156 buffer->size = len;
157 mutex_init(&buffer->lock);
158 ion_buffer_add(dev, buffer);
159 return buffer;
160}
161
162static void ion_buffer_destroy(struct kref *kref)
163{
164 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
165 struct ion_device *dev = buffer->dev;
166
167 buffer->heap->ops->free(buffer);
168 mutex_lock(&dev->lock);
169 rb_erase(&buffer->node, &dev->buffers);
170 mutex_unlock(&dev->lock);
171 kfree(buffer);
172}
173
174static void ion_buffer_get(struct ion_buffer *buffer)
175{
176 kref_get(&buffer->ref);
177}
178
179static int ion_buffer_put(struct ion_buffer *buffer)
180{
181 return kref_put(&buffer->ref, ion_buffer_destroy);
182}
183
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700184static struct ion_handle *ion_handle_create(struct ion_client *client,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700185 struct ion_buffer *buffer)
186{
187 struct ion_handle *handle;
188
189 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
190 if (!handle)
191 return ERR_PTR(-ENOMEM);
192 kref_init(&handle->ref);
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700193 rb_init_node(&handle->node);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700194 handle->client = client;
195 ion_buffer_get(buffer);
196 handle->buffer = buffer;
197
198 return handle;
199}
200
201static void ion_handle_destroy(struct kref *kref)
202{
203 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
204 /* XXX Can a handle be destroyed while it's map count is non-zero?:
205 if (handle->map_cnt) unmap
206 */
Laura Abbottd2a87372011-10-20 17:53:49 -0700207 WARN_ON(handle->kmap_cnt || handle->dmap_cnt || handle->usermap_cnt);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700208 ion_buffer_put(handle->buffer);
209 mutex_lock(&handle->client->lock);
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700210 if (!RB_EMPTY_NODE(&handle->node))
211 rb_erase(&handle->node, &handle->client->handles);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700212 mutex_unlock(&handle->client->lock);
213 kfree(handle);
214}
215
216struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
217{
218 return handle->buffer;
219}
220
221static void ion_handle_get(struct ion_handle *handle)
222{
223 kref_get(&handle->ref);
224}
225
226static int ion_handle_put(struct ion_handle *handle)
227{
228 return kref_put(&handle->ref, ion_handle_destroy);
229}
230
231static struct ion_handle *ion_handle_lookup(struct ion_client *client,
232 struct ion_buffer *buffer)
233{
234 struct rb_node *n;
235
236 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
237 struct ion_handle *handle = rb_entry(n, struct ion_handle,
238 node);
239 if (handle->buffer == buffer)
240 return handle;
241 }
242 return NULL;
243}
244
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700245static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700246{
247 struct rb_node *n = client->handles.rb_node;
248
249 while (n) {
250 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
251 node);
252 if (handle < handle_node)
253 n = n->rb_left;
254 else if (handle > handle_node)
255 n = n->rb_right;
256 else
257 return true;
258 }
259 return false;
260}
261
262static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
263{
264 struct rb_node **p = &client->handles.rb_node;
265 struct rb_node *parent = NULL;
266 struct ion_handle *entry;
267
268 while (*p) {
269 parent = *p;
270 entry = rb_entry(parent, struct ion_handle, node);
271
272 if (handle < entry)
273 p = &(*p)->rb_left;
274 else if (handle > entry)
275 p = &(*p)->rb_right;
276 else
277 WARN(1, "%s: buffer already found.", __func__);
278 }
279
280 rb_link_node(&handle->node, parent, p);
281 rb_insert_color(&handle->node, &client->handles);
282}
283
284struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
285 size_t align, unsigned int flags)
286{
287 struct rb_node *n;
288 struct ion_handle *handle;
289 struct ion_device *dev = client->dev;
290 struct ion_buffer *buffer = NULL;
291
292 /*
293 * traverse the list of heaps available in this system in priority
294 * order. If the heap type is supported by the client, and matches the
295 * request of the caller allocate from it. Repeat until allocate has
296 * succeeded or all heaps have been tried
297 */
298 mutex_lock(&dev->lock);
299 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
300 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
301 /* if the client doesn't support this heap type */
302 if (!((1 << heap->type) & client->heap_mask))
303 continue;
304 /* if the caller didn't specify this heap type */
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700305 if (!((1 << heap->id) & flags))
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700306 continue;
307 buffer = ion_buffer_create(heap, dev, len, align, flags);
308 if (!IS_ERR_OR_NULL(buffer))
309 break;
310 }
311 mutex_unlock(&dev->lock);
312
313 if (IS_ERR_OR_NULL(buffer))
314 return ERR_PTR(PTR_ERR(buffer));
315
316 handle = ion_handle_create(client, buffer);
317
318 if (IS_ERR_OR_NULL(handle))
319 goto end;
320
321 /*
322 * ion_buffer_create will create a buffer with a ref_cnt of 1,
323 * and ion_handle_create will take a second reference, drop one here
324 */
325 ion_buffer_put(buffer);
326
327 mutex_lock(&client->lock);
328 ion_handle_add(client, handle);
329 mutex_unlock(&client->lock);
330 return handle;
331
332end:
333 ion_buffer_put(buffer);
334 return handle;
335}
336
337void ion_free(struct ion_client *client, struct ion_handle *handle)
338{
Rebecca Schultz Zavinc72866d2011-07-07 17:07:56 -0700339 bool valid_handle;
340
341 BUG_ON(client != handle->client);
342
343 mutex_lock(&client->lock);
344 valid_handle = ion_handle_validate(client, handle);
345 mutex_unlock(&client->lock);
346
347 if (!valid_handle) {
348 WARN("%s: invalid handle passed to free.\n", __func__);
349 return;
350 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700351 ion_handle_put(handle);
352}
353
354static void ion_client_get(struct ion_client *client);
355static int ion_client_put(struct ion_client *client);
356
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700357static bool _ion_map(int *buffer_cnt, int *handle_cnt)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700358{
359 bool map;
360
361 BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
362
363 if (*buffer_cnt)
364 map = false;
365 else
366 map = true;
367 if (*handle_cnt == 0)
368 (*buffer_cnt)++;
369 (*handle_cnt)++;
370 return map;
371}
372
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700373static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700374{
375 BUG_ON(*handle_cnt == 0);
376 (*handle_cnt)--;
377 if (*handle_cnt != 0)
378 return false;
379 BUG_ON(*buffer_cnt == 0);
380 (*buffer_cnt)--;
381 if (*buffer_cnt == 0)
382 return true;
383 return false;
384}
385
386int ion_phys(struct ion_client *client, struct ion_handle *handle,
387 ion_phys_addr_t *addr, size_t *len)
388{
389 struct ion_buffer *buffer;
390 int ret;
391
392 mutex_lock(&client->lock);
393 if (!ion_handle_validate(client, handle)) {
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700394 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700395 return -EINVAL;
396 }
397
398 buffer = handle->buffer;
399
400 if (!buffer->heap->ops->phys) {
401 pr_err("%s: ion_phys is not implemented by this heap.\n",
402 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700403 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700404 return -ENODEV;
405 }
406 mutex_unlock(&client->lock);
407 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
408 return ret;
409}
410
Laura Abbott894fd582011-08-19 13:33:56 -0700411void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
412 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700413{
414 struct ion_buffer *buffer;
415 void *vaddr;
416
417 mutex_lock(&client->lock);
418 if (!ion_handle_validate(client, handle)) {
419 pr_err("%s: invalid handle passed to map_kernel.\n",
420 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700421 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700422 return ERR_PTR(-EINVAL);
423 }
424
425 buffer = handle->buffer;
426 mutex_lock(&buffer->lock);
427
428 if (!handle->buffer->heap->ops->map_kernel) {
429 pr_err("%s: map_kernel is not implemented by this heap.\n",
430 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700431 mutex_unlock(&buffer->lock);
432 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700433 return ERR_PTR(-ENODEV);
434 }
435
Laura Abbott894fd582011-08-19 13:33:56 -0700436 if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
437 if (buffer->flags != flags) {
438 pr_err("%s: buffer was already mapped with flags %lx,"
439 " cannot map with flags %lx\n", __func__,
440 buffer->flags, flags);
441 vaddr = ERR_PTR(-EEXIST);
442 goto out;
443 }
444
445 } else {
446 buffer->flags = flags;
447 }
448
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700449 if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700450 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer,
451 flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700452 if (IS_ERR_OR_NULL(vaddr))
453 _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
454 buffer->vaddr = vaddr;
455 } else {
456 vaddr = buffer->vaddr;
457 }
Laura Abbott894fd582011-08-19 13:33:56 -0700458
459out:
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700460 mutex_unlock(&buffer->lock);
461 mutex_unlock(&client->lock);
462 return vaddr;
463}
464
465struct scatterlist *ion_map_dma(struct ion_client *client,
Laura Abbott894fd582011-08-19 13:33:56 -0700466 struct ion_handle *handle,
467 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700468{
469 struct ion_buffer *buffer;
470 struct scatterlist *sglist;
471
472 mutex_lock(&client->lock);
473 if (!ion_handle_validate(client, handle)) {
474 pr_err("%s: invalid handle passed to map_dma.\n",
475 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700476 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700477 return ERR_PTR(-EINVAL);
478 }
479 buffer = handle->buffer;
480 mutex_lock(&buffer->lock);
481
482 if (!handle->buffer->heap->ops->map_dma) {
483 pr_err("%s: map_kernel is not implemented by this heap.\n",
484 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700485 mutex_unlock(&buffer->lock);
486 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700487 return ERR_PTR(-ENODEV);
488 }
Laura Abbott894fd582011-08-19 13:33:56 -0700489
490 if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
491 if (buffer->flags != flags) {
492 pr_err("%s: buffer was already mapped with flags %lx,"
493 " cannot map with flags %lx\n", __func__,
494 buffer->flags, flags);
495 sglist = ERR_PTR(-EEXIST);
496 goto out;
497 }
498
499 } else {
500 buffer->flags = flags;
501 }
502
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700503 if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
504 sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
505 if (IS_ERR_OR_NULL(sglist))
506 _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
507 buffer->sglist = sglist;
508 } else {
509 sglist = buffer->sglist;
510 }
Laura Abbott894fd582011-08-19 13:33:56 -0700511
512out:
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700513 mutex_unlock(&buffer->lock);
514 mutex_unlock(&client->lock);
515 return sglist;
516}
517
518void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
519{
520 struct ion_buffer *buffer;
521
522 mutex_lock(&client->lock);
523 buffer = handle->buffer;
524 mutex_lock(&buffer->lock);
525 if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
526 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
527 buffer->vaddr = NULL;
528 }
529 mutex_unlock(&buffer->lock);
530 mutex_unlock(&client->lock);
531}
532
533void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
534{
535 struct ion_buffer *buffer;
536
537 mutex_lock(&client->lock);
538 buffer = handle->buffer;
539 mutex_lock(&buffer->lock);
540 if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
541 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
542 buffer->sglist = NULL;
543 }
544 mutex_unlock(&buffer->lock);
545 mutex_unlock(&client->lock);
546}
547
548
549struct ion_buffer *ion_share(struct ion_client *client,
550 struct ion_handle *handle)
551{
Rebecca Schultz Zavinc72866d2011-07-07 17:07:56 -0700552 bool valid_handle;
553
554 mutex_lock(&client->lock);
555 valid_handle = ion_handle_validate(client, handle);
556 mutex_unlock(&client->lock);
557 if (!valid_handle) {
558 WARN("%s: invalid handle passed to share.\n", __func__);
559 return ERR_PTR(-EINVAL);
560 }
561
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700562 /* do not take an extra reference here, the burden is on the caller
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700563 * to make sure the buffer doesn't go away while it's passing it
564 * to another client -- ion_free should not be called on this handle
565 * until the buffer has been imported into the other client
566 */
567 return handle->buffer;
568}
569
570struct ion_handle *ion_import(struct ion_client *client,
571 struct ion_buffer *buffer)
572{
573 struct ion_handle *handle = NULL;
574
575 mutex_lock(&client->lock);
576 /* if a handle exists for this buffer just take a reference to it */
577 handle = ion_handle_lookup(client, buffer);
578 if (!IS_ERR_OR_NULL(handle)) {
579 ion_handle_get(handle);
580 goto end;
581 }
582 handle = ion_handle_create(client, buffer);
583 if (IS_ERR_OR_NULL(handle))
584 goto end;
585 ion_handle_add(client, handle);
586end:
587 mutex_unlock(&client->lock);
588 return handle;
589}
590
Laura Abbottabcb6f72011-10-04 16:26:49 -0700591static int check_vaddr_bounds(unsigned long start, unsigned long end)
592{
593 struct mm_struct *mm = current->active_mm;
594 struct vm_area_struct *vma;
595 int ret = 1;
596
597 if (end < start)
598 goto out;
599
600 down_read(&mm->mmap_sem);
601 vma = find_vma(mm, start);
602 if (vma && vma->vm_start < end) {
603 if (start < vma->vm_start)
604 goto out_up;
605 if (end > vma->vm_end)
606 goto out_up;
607 ret = 0;
608 }
609
610out_up:
611 up_read(&mm->mmap_sem);
612out:
613 return ret;
614}
615
616int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
617 void *uaddr, unsigned long offset, unsigned long len,
618 unsigned int cmd)
619{
620 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700621 int ret = -EINVAL;
622
623 mutex_lock(&client->lock);
624 if (!ion_handle_validate(client, handle)) {
625 pr_err("%s: invalid handle passed to do_cache_op.\n",
626 __func__);
627 mutex_unlock(&client->lock);
628 return -EINVAL;
629 }
630 buffer = handle->buffer;
631 mutex_lock(&buffer->lock);
632
Laura Abbottcbaa6682011-10-19 12:14:14 -0700633 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700634 ret = 0;
635 goto out;
636 }
637
638 if (!handle->buffer->heap->ops->cache_op) {
639 pr_err("%s: cache_op is not implemented by this heap.\n",
640 __func__);
641 ret = -ENODEV;
642 goto out;
643 }
644
Laura Abbottabcb6f72011-10-04 16:26:49 -0700645
646 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
647 offset, len, cmd);
648
649out:
650 mutex_unlock(&buffer->lock);
651 mutex_unlock(&client->lock);
652 return ret;
653
654}
655
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700656static const struct file_operations ion_share_fops;
657
658struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
659{
660 struct file *file = fget(fd);
661 struct ion_handle *handle;
662
663 if (!file) {
664 pr_err("%s: imported fd not found in file table.\n", __func__);
665 return ERR_PTR(-EINVAL);
666 }
667 if (file->f_op != &ion_share_fops) {
Laura Abbott084d6eb2011-10-24 19:09:50 -0700668 pr_err("%s: imported file %s is not a shared ion"
669 " file.", __func__, file->f_dentry->d_name.name);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700670 handle = ERR_PTR(-EINVAL);
671 goto end;
672 }
673 handle = ion_import(client, file->private_data);
674end:
675 fput(file);
676 return handle;
677}
678
679static int ion_debug_client_show(struct seq_file *s, void *unused)
680{
681 struct ion_client *client = s->private;
682 struct rb_node *n;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700683
Laura Abbott68c80642011-10-21 17:32:27 -0700684 seq_printf(s, "%16.16s: %16.16s : %16.16s : %16.16s\n", "heap_name",
685 "size_in_bytes", "handle refcount", "buffer");
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700686 mutex_lock(&client->lock);
687 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
688 struct ion_handle *handle = rb_entry(n, struct ion_handle,
689 node);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700690
Laura Abbott8747bbe2011-10-31 14:18:13 -0700691 seq_printf(s, "%16.16s: %16x : %16d : %16p\n",
Laura Abbott68c80642011-10-21 17:32:27 -0700692 handle->buffer->heap->name,
693 handle->buffer->size,
694 atomic_read(&handle->ref.refcount),
695 handle->buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700696 }
Laura Abbott68c80642011-10-21 17:32:27 -0700697
698 seq_printf(s, "%16.16s %d\n", "client refcount:",
699 atomic_read(&client->ref.refcount));
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700700 mutex_unlock(&client->lock);
701
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700702 return 0;
703}
704
705static int ion_debug_client_open(struct inode *inode, struct file *file)
706{
707 return single_open(file, ion_debug_client_show, inode->i_private);
708}
709
710static const struct file_operations debug_client_fops = {
711 .open = ion_debug_client_open,
712 .read = seq_read,
713 .llseek = seq_lseek,
714 .release = single_release,
715};
716
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700717static struct ion_client *ion_client_lookup(struct ion_device *dev,
718 struct task_struct *task)
719{
720 struct rb_node *n = dev->user_clients.rb_node;
721 struct ion_client *client;
722
723 mutex_lock(&dev->lock);
724 while (n) {
725 client = rb_entry(n, struct ion_client, node);
726 if (task == client->task) {
727 ion_client_get(client);
728 mutex_unlock(&dev->lock);
729 return client;
730 } else if (task < client->task) {
731 n = n->rb_left;
732 } else if (task > client->task) {
733 n = n->rb_right;
734 }
735 }
736 mutex_unlock(&dev->lock);
737 return NULL;
738}
739
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700740struct ion_client *ion_client_create(struct ion_device *dev,
741 unsigned int heap_mask,
742 const char *name)
743{
744 struct ion_client *client;
745 struct task_struct *task;
746 struct rb_node **p;
747 struct rb_node *parent = NULL;
748 struct ion_client *entry;
749 char debug_name[64];
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700750 pid_t pid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700751
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700752 get_task_struct(current->group_leader);
753 task_lock(current->group_leader);
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700754 pid = task_pid_nr(current->group_leader);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700755 /* don't bother to store task struct for kernel threads,
756 they can't be killed anyway */
757 if (current->group_leader->flags & PF_KTHREAD) {
758 put_task_struct(current->group_leader);
759 task = NULL;
760 } else {
761 task = current->group_leader;
762 }
763 task_unlock(current->group_leader);
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700764
765 /* if this isn't a kernel thread, see if a client already
766 exists */
767 if (task) {
768 client = ion_client_lookup(dev, task);
769 if (!IS_ERR_OR_NULL(client)) {
770 put_task_struct(current->group_leader);
771 return client;
772 }
773 }
774
775 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
776 if (!client) {
777 put_task_struct(current->group_leader);
778 return ERR_PTR(-ENOMEM);
779 }
780
781 client->dev = dev;
782 client->handles = RB_ROOT;
783 mutex_init(&client->lock);
784 client->name = name;
785 client->heap_mask = heap_mask;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700786 client->task = task;
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700787 client->pid = pid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700788 kref_init(&client->ref);
789
790 mutex_lock(&dev->lock);
791 if (task) {
792 p = &dev->user_clients.rb_node;
793 while (*p) {
794 parent = *p;
795 entry = rb_entry(parent, struct ion_client, node);
796
797 if (task < entry->task)
798 p = &(*p)->rb_left;
799 else if (task > entry->task)
800 p = &(*p)->rb_right;
801 }
802 rb_link_node(&client->node, parent, p);
803 rb_insert_color(&client->node, &dev->user_clients);
804 } else {
805 p = &dev->kernel_clients.rb_node;
806 while (*p) {
807 parent = *p;
808 entry = rb_entry(parent, struct ion_client, node);
809
810 if (client < entry)
811 p = &(*p)->rb_left;
812 else if (client > entry)
813 p = &(*p)->rb_right;
814 }
815 rb_link_node(&client->node, parent, p);
816 rb_insert_color(&client->node, &dev->kernel_clients);
817 }
818
819 snprintf(debug_name, 64, "%u", client->pid);
820 client->debug_root = debugfs_create_file(debug_name, 0664,
821 dev->debug_root, client,
822 &debug_client_fops);
823 mutex_unlock(&dev->lock);
824
825 return client;
826}
827
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -0700828static void _ion_client_destroy(struct kref *kref)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700829{
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -0700830 struct ion_client *client = container_of(kref, struct ion_client, ref);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700831 struct ion_device *dev = client->dev;
832 struct rb_node *n;
833
834 pr_debug("%s: %d\n", __func__, __LINE__);
835 while ((n = rb_first(&client->handles))) {
836 struct ion_handle *handle = rb_entry(n, struct ion_handle,
837 node);
838 ion_handle_destroy(&handle->ref);
839 }
840 mutex_lock(&dev->lock);
841 if (client->task) {
842 rb_erase(&client->node, &dev->user_clients);
843 put_task_struct(client->task);
844 } else {
845 rb_erase(&client->node, &dev->kernel_clients);
846 }
847 debugfs_remove_recursive(client->debug_root);
848 mutex_unlock(&dev->lock);
849
850 kfree(client);
851}
852
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700853static void ion_client_get(struct ion_client *client)
854{
855 kref_get(&client->ref);
856}
857
858static int ion_client_put(struct ion_client *client)
859{
860 return kref_put(&client->ref, _ion_client_destroy);
861}
862
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -0700863void ion_client_destroy(struct ion_client *client)
864{
Jordan Crousea75022c2011-10-12 16:57:47 -0600865 if (client)
866 ion_client_put(client);
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -0700867}
868
Laura Abbott273dd8e2011-10-12 14:26:33 -0700869int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
870 unsigned long *flags)
871{
872 struct ion_buffer *buffer;
873
874 mutex_lock(&client->lock);
875 if (!ion_handle_validate(client, handle)) {
876 pr_err("%s: invalid handle passed to %s.\n",
877 __func__, __func__);
878 mutex_unlock(&client->lock);
879 return -EINVAL;
880 }
881 buffer = handle->buffer;
882 mutex_lock(&buffer->lock);
883 *flags = buffer->flags;
884 mutex_unlock(&buffer->lock);
885 mutex_unlock(&client->lock);
886
887 return 0;
888}
889EXPORT_SYMBOL(ion_handle_get_flags);
890
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700891static int ion_share_release(struct inode *inode, struct file* file)
892{
893 struct ion_buffer *buffer = file->private_data;
894
895 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbott894fd582011-08-19 13:33:56 -0700896 mutex_lock(&buffer->lock);
897 buffer->umap_cnt--;
898 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700899 /* drop the reference to the buffer -- this prevents the
900 buffer from going away because the client holding it exited
901 while it was being passed */
902 ion_buffer_put(buffer);
903 return 0;
904}
905
906static void ion_vma_open(struct vm_area_struct *vma)
907{
908
909 struct ion_buffer *buffer = vma->vm_file->private_data;
910 struct ion_handle *handle = vma->vm_private_data;
911 struct ion_client *client;
912
913 pr_debug("%s: %d\n", __func__, __LINE__);
914 /* check that the client still exists and take a reference so
915 it can't go away until this vma is closed */
916 client = ion_client_lookup(buffer->dev, current->group_leader);
917 if (IS_ERR_OR_NULL(client)) {
918 vma->vm_private_data = NULL;
919 return;
920 }
921 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
922 __func__, __LINE__,
923 atomic_read(&client->ref.refcount),
924 atomic_read(&handle->ref.refcount),
925 atomic_read(&buffer->ref.refcount));
926}
927
928static void ion_vma_close(struct vm_area_struct *vma)
929{
930 struct ion_handle *handle = vma->vm_private_data;
931 struct ion_buffer *buffer = vma->vm_file->private_data;
932 struct ion_client *client;
933
934 pr_debug("%s: %d\n", __func__, __LINE__);
935 /* this indicates the client is gone, nothing to do here */
936 if (!handle)
937 return;
938 client = handle->client;
939 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
940 __func__, __LINE__,
941 atomic_read(&client->ref.refcount),
942 atomic_read(&handle->ref.refcount),
943 atomic_read(&buffer->ref.refcount));
944 ion_handle_put(handle);
945 ion_client_put(client);
946 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
947 __func__, __LINE__,
948 atomic_read(&client->ref.refcount),
949 atomic_read(&handle->ref.refcount),
950 atomic_read(&buffer->ref.refcount));
951}
952
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700953static struct vm_operations_struct ion_vm_ops = {
954 .open = ion_vma_open,
955 .close = ion_vma_close,
956};
957
958static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
959{
960 struct ion_buffer *buffer = file->private_data;
961 unsigned long size = vma->vm_end - vma->vm_start;
962 struct ion_client *client;
963 struct ion_handle *handle;
964 int ret;
Laura Abbott894fd582011-08-19 13:33:56 -0700965 unsigned long flags = file->f_flags & O_DSYNC ?
966 ION_SET_CACHE(UNCACHED) :
967 ION_SET_CACHE(CACHED);
968
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700969
970 pr_debug("%s: %d\n", __func__, __LINE__);
971 /* make sure the client still exists, it's possible for the client to
972 have gone away but the map/share fd still to be around, take
973 a reference to it so it can't go away while this mapping exists */
974 client = ion_client_lookup(buffer->dev, current->group_leader);
975 if (IS_ERR_OR_NULL(client)) {
976 pr_err("%s: trying to mmap an ion handle in a process with no "
977 "ion client\n", __func__);
978 return -EINVAL;
979 }
980
981 if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
982 buffer->size)) {
983 pr_err("%s: trying to map larger area than handle has available"
984 "\n", __func__);
985 ret = -EINVAL;
986 goto err;
987 }
988
989 /* find the handle and take a reference to it */
990 handle = ion_import(client, buffer);
991 if (IS_ERR_OR_NULL(handle)) {
992 ret = -EINVAL;
993 goto err;
994 }
995
996 if (!handle->buffer->heap->ops->map_user) {
997 pr_err("%s: this heap does not define a method for mapping "
998 "to userspace\n", __func__);
999 ret = -EINVAL;
1000 goto err1;
1001 }
1002
1003 mutex_lock(&buffer->lock);
Laura Abbott894fd582011-08-19 13:33:56 -07001004 if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
1005 if (buffer->flags != flags) {
1006 pr_err("%s: buffer was already mapped with flags %lx,"
1007 " cannot map with flags %lx\n", __func__,
1008 buffer->flags, flags);
1009 ret = -EEXIST;
1010 mutex_unlock(&buffer->lock);
1011 goto err1;
1012 }
1013
1014 } else {
1015 buffer->flags = flags;
1016 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001017 /* now map it to userspace */
Laura Abbott894fd582011-08-19 13:33:56 -07001018 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma,
1019 flags);
1020 buffer->umap_cnt++;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001021 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001022 if (ret) {
1023 pr_err("%s: failure mapping buffer to userspace\n",
1024 __func__);
Laura Abbott894fd582011-08-19 13:33:56 -07001025 goto err2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001026 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001027
1028 vma->vm_ops = &ion_vm_ops;
1029 /* move the handle into the vm_private_data so we can access it from
1030 vma_open/close */
1031 vma->vm_private_data = handle;
1032 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
1033 __func__, __LINE__,
1034 atomic_read(&client->ref.refcount),
1035 atomic_read(&handle->ref.refcount),
1036 atomic_read(&buffer->ref.refcount));
1037 return 0;
1038
Laura Abbott894fd582011-08-19 13:33:56 -07001039err2:
1040 buffer->umap_cnt--;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001041 /* drop the reference to the handle */
Laura Abbott894fd582011-08-19 13:33:56 -07001042err1:
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001043 ion_handle_put(handle);
1044err:
Iliyan Malchev3fe24362011-08-09 14:42:08 -07001045 /* drop the reference to the client */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001046 ion_client_put(client);
1047 return ret;
1048}
1049
1050static const struct file_operations ion_share_fops = {
1051 .owner = THIS_MODULE,
1052 .release = ion_share_release,
1053 .mmap = ion_share_mmap,
1054};
1055
1056static int ion_ioctl_share(struct file *parent, struct ion_client *client,
1057 struct ion_handle *handle)
1058{
1059 int fd = get_unused_fd();
1060 struct file *file;
1061
1062 if (fd < 0)
1063 return -ENFILE;
1064
1065 file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
1066 handle->buffer, O_RDWR);
1067 if (IS_ERR_OR_NULL(file))
1068 goto err;
Laura Abbott4b5d0482011-09-27 18:35:14 -07001069
1070 if (parent->f_flags & O_DSYNC)
1071 file->f_flags |= O_DSYNC;
1072
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001073 ion_buffer_get(handle->buffer);
1074 fd_install(fd, file);
1075
1076 return fd;
1077
1078err:
1079 put_unused_fd(fd);
1080 return -ENFILE;
1081}
1082
1083static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1084{
1085 struct ion_client *client = filp->private_data;
1086
1087 switch (cmd) {
1088 case ION_IOC_ALLOC:
1089 {
1090 struct ion_allocation_data data;
1091
1092 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1093 return -EFAULT;
1094 data.handle = ion_alloc(client, data.len, data.align,
1095 data.flags);
Laura Abbotte1b9ce52011-11-11 18:31:39 -08001096
1097 if (IS_ERR_OR_NULL(data.handle))
1098 return PTR_ERR(data.handle);
1099
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001100 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1101 return -EFAULT;
1102 break;
1103 }
1104 case ION_IOC_FREE:
1105 {
1106 struct ion_handle_data data;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001107 bool valid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001108
1109 if (copy_from_user(&data, (void __user *)arg,
1110 sizeof(struct ion_handle_data)))
1111 return -EFAULT;
1112 mutex_lock(&client->lock);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001113 valid = ion_handle_validate(client, data.handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001114 mutex_unlock(&client->lock);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001115 if (!valid)
1116 return -EINVAL;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001117 ion_free(client, data.handle);
1118 break;
1119 }
1120 case ION_IOC_MAP:
1121 case ION_IOC_SHARE:
1122 {
1123 struct ion_fd_data data;
1124
1125 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1126 return -EFAULT;
1127 mutex_lock(&client->lock);
1128 if (!ion_handle_validate(client, data.handle)) {
1129 pr_err("%s: invalid handle passed to share ioctl.\n",
1130 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001131 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001132 return -EINVAL;
1133 }
1134 data.fd = ion_ioctl_share(filp, client, data.handle);
1135 mutex_unlock(&client->lock);
1136 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1137 return -EFAULT;
1138 break;
1139 }
1140 case ION_IOC_IMPORT:
1141 {
1142 struct ion_fd_data data;
1143 if (copy_from_user(&data, (void __user *)arg,
1144 sizeof(struct ion_fd_data)))
1145 return -EFAULT;
1146
1147 data.handle = ion_import_fd(client, data.fd);
1148 if (IS_ERR(data.handle))
1149 data.handle = NULL;
1150 if (copy_to_user((void __user *)arg, &data,
1151 sizeof(struct ion_fd_data)))
1152 return -EFAULT;
1153 break;
1154 }
1155 case ION_IOC_CUSTOM:
1156 {
1157 struct ion_device *dev = client->dev;
1158 struct ion_custom_data data;
1159
1160 if (!dev->custom_ioctl)
1161 return -ENOTTY;
1162 if (copy_from_user(&data, (void __user *)arg,
1163 sizeof(struct ion_custom_data)))
1164 return -EFAULT;
1165 return dev->custom_ioctl(client, data.cmd, data.arg);
1166 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001167 case ION_IOC_CLEAN_CACHES:
1168 case ION_IOC_INV_CACHES:
1169 case ION_IOC_CLEAN_INV_CACHES:
1170 {
1171 struct ion_flush_data data;
Laura Abbott9fa29e82011-11-14 09:42:53 -08001172 unsigned long start, end;
Laura Abbottabcb6f72011-10-04 16:26:49 -07001173
1174 if (copy_from_user(&data, (void __user *)arg,
1175 sizeof(struct ion_flush_data)))
1176 return -EFAULT;
1177
Laura Abbott9fa29e82011-11-14 09:42:53 -08001178 start = (unsigned long) data.vaddr;
1179 end = (unsigned long) data.vaddr + data.length;
1180
1181 if (check_vaddr_bounds(start, end)) {
1182 pr_err("%s: virtual address %p is out of bounds\n",
1183 __func__, data.vaddr);
1184 return -EINVAL;
1185 }
1186
Laura Abbottabcb6f72011-10-04 16:26:49 -07001187 return ion_do_cache_op(client, data.handle, data.vaddr,
1188 data.offset, data.length, cmd);
1189
1190 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001191 case ION_IOC_GET_FLAGS:
1192 {
1193 struct ion_flag_data data;
1194 int ret;
1195 if (copy_from_user(&data, (void __user *)arg,
1196 sizeof(struct ion_flag_data)))
1197 return -EFAULT;
1198
1199 ret = ion_handle_get_flags(client, data.handle, &data.flags);
1200 if (ret < 0)
1201 return ret;
1202 if (copy_to_user((void __user *)arg, &data,
1203 sizeof(struct ion_flag_data)))
1204 return -EFAULT;
1205 break;
1206 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001207 default:
1208 return -ENOTTY;
1209 }
1210 return 0;
1211}
1212
1213static int ion_release(struct inode *inode, struct file *file)
1214{
1215 struct ion_client *client = file->private_data;
1216
1217 pr_debug("%s: %d\n", __func__, __LINE__);
1218 ion_client_put(client);
1219 return 0;
1220}
1221
1222static int ion_open(struct inode *inode, struct file *file)
1223{
1224 struct miscdevice *miscdev = file->private_data;
1225 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1226 struct ion_client *client;
1227
1228 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavin6d3b9582011-07-06 18:07:24 -07001229 client = ion_client_create(dev, -1, "user");
1230 if (IS_ERR_OR_NULL(client))
1231 return PTR_ERR(client);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001232 file->private_data = client;
1233
1234 return 0;
1235}
1236
1237static const struct file_operations ion_fops = {
1238 .owner = THIS_MODULE,
1239 .open = ion_open,
1240 .release = ion_release,
1241 .unlocked_ioctl = ion_ioctl,
1242};
1243
1244static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001245 enum ion_heap_ids id)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001246{
1247 size_t size = 0;
1248 struct rb_node *n;
1249
1250 mutex_lock(&client->lock);
1251 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1252 struct ion_handle *handle = rb_entry(n,
1253 struct ion_handle,
1254 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001255 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001256 size += handle->buffer->size;
1257 }
1258 mutex_unlock(&client->lock);
1259 return size;
1260}
1261
1262static int ion_debug_heap_show(struct seq_file *s, void *unused)
1263{
1264 struct ion_heap *heap = s->private;
1265 struct ion_device *dev = heap->dev;
1266 struct rb_node *n;
1267
1268 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1269 for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
1270 struct ion_client *client = rb_entry(n, struct ion_client,
1271 node);
1272 char task_comm[TASK_COMM_LEN];
Laura Abbott3647ac32011-10-31 14:09:53 -07001273 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001274 if (!size)
1275 continue;
1276
1277 get_task_comm(task_comm, client->task);
Laura Abbott8747bbe2011-10-31 14:18:13 -07001278 seq_printf(s, "%16.s %16u %16x\n", task_comm, client->pid,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001279 size);
1280 }
1281
1282 for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1283 struct ion_client *client = rb_entry(n, struct ion_client,
1284 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001285 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001286 if (!size)
1287 continue;
Laura Abbott8747bbe2011-10-31 14:18:13 -07001288 seq_printf(s, "%16.s %16u %16x\n", client->name, client->pid,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001289 size);
1290 }
Laura Abbott68c80642011-10-21 17:32:27 -07001291 if (heap->ops->get_allocated) {
1292 seq_printf(s, "total bytes currently allocated: %lx\n",
1293 heap->ops->get_allocated(heap));
1294 }
1295 if (heap->ops->get_total) {
1296 seq_printf(s, "total heap size: %lx\n",
1297 heap->ops->get_total(heap));
1298 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001299 return 0;
1300}
1301
1302static int ion_debug_heap_open(struct inode *inode, struct file *file)
1303{
1304 return single_open(file, ion_debug_heap_show, inode->i_private);
1305}
1306
1307static const struct file_operations debug_heap_fops = {
1308 .open = ion_debug_heap_open,
1309 .read = seq_read,
1310 .llseek = seq_lseek,
1311 .release = single_release,
1312};
1313
1314void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1315{
1316 struct rb_node **p = &dev->heaps.rb_node;
1317 struct rb_node *parent = NULL;
1318 struct ion_heap *entry;
1319
1320 heap->dev = dev;
1321 mutex_lock(&dev->lock);
1322 while (*p) {
1323 parent = *p;
1324 entry = rb_entry(parent, struct ion_heap, node);
1325
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001326 if (heap->id < entry->id) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001327 p = &(*p)->rb_left;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001328 } else if (heap->id > entry->id ) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001329 p = &(*p)->rb_right;
1330 } else {
1331 pr_err("%s: can not insert multiple heaps with "
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001332 "id %d\n", __func__, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001333 goto end;
1334 }
1335 }
1336
1337 rb_link_node(&heap->node, parent, p);
1338 rb_insert_color(&heap->node, &dev->heaps);
1339 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1340 &debug_heap_fops);
1341end:
1342 mutex_unlock(&dev->lock);
1343}
1344
Laura Abbott404f8242011-10-31 14:22:53 -07001345static int ion_debug_leak_show(struct seq_file *s, void *unused)
1346{
1347 struct ion_device *dev = s->private;
1348 struct rb_node *n;
1349 struct rb_node *n2;
1350
1351 /* mark all buffers as 1 */
1352 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1353 "ref cnt");
1354 mutex_lock(&dev->lock);
1355 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1356 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1357 node);
1358
1359 buf->marked = 1;
1360 }
1361
1362 /* now see which buffers we can access */
1363 for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1364 struct ion_client *client = rb_entry(n, struct ion_client,
1365 node);
1366
1367 mutex_lock(&client->lock);
1368 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1369 struct ion_handle *handle = rb_entry(n2,
1370 struct ion_handle, node);
1371
1372 handle->buffer->marked = 0;
1373
1374 }
1375 mutex_unlock(&client->lock);
1376
1377 }
1378
1379 for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
1380 struct ion_client *client = rb_entry(n, struct ion_client,
1381 node);
1382
1383 mutex_lock(&client->lock);
1384 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1385 struct ion_handle *handle = rb_entry(n2,
1386 struct ion_handle, node);
1387
1388 handle->buffer->marked = 0;
1389
1390 }
1391 mutex_unlock(&client->lock);
1392
1393 }
1394 /* And anyone still marked as a 1 means a leaked handle somewhere */
1395 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1396 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1397 node);
1398
1399 if (buf->marked == 1)
1400 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1401 (int)buf, buf->heap->name, buf->size,
1402 atomic_read(&buf->ref.refcount));
1403 }
1404 mutex_unlock(&dev->lock);
1405 return 0;
1406}
1407
1408static int ion_debug_leak_open(struct inode *inode, struct file *file)
1409{
1410 return single_open(file, ion_debug_leak_show, inode->i_private);
1411}
1412
1413static const struct file_operations debug_leak_fops = {
1414 .open = ion_debug_leak_open,
1415 .read = seq_read,
1416 .llseek = seq_lseek,
1417 .release = single_release,
1418};
1419
1420
1421
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001422struct ion_device *ion_device_create(long (*custom_ioctl)
1423 (struct ion_client *client,
1424 unsigned int cmd,
1425 unsigned long arg))
1426{
1427 struct ion_device *idev;
1428 int ret;
1429
1430 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1431 if (!idev)
1432 return ERR_PTR(-ENOMEM);
1433
1434 idev->dev.minor = MISC_DYNAMIC_MINOR;
1435 idev->dev.name = "ion";
1436 idev->dev.fops = &ion_fops;
1437 idev->dev.parent = NULL;
1438 ret = misc_register(&idev->dev);
1439 if (ret) {
1440 pr_err("ion: failed to register misc device.\n");
1441 return ERR_PTR(ret);
1442 }
1443
1444 idev->debug_root = debugfs_create_dir("ion", NULL);
1445 if (IS_ERR_OR_NULL(idev->debug_root))
1446 pr_err("ion: failed to create debug files.\n");
1447
1448 idev->custom_ioctl = custom_ioctl;
1449 idev->buffers = RB_ROOT;
1450 mutex_init(&idev->lock);
1451 idev->heaps = RB_ROOT;
1452 idev->user_clients = RB_ROOT;
1453 idev->kernel_clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001454 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1455 &debug_leak_fops);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001456 return idev;
1457}
1458
1459void ion_device_destroy(struct ion_device *dev)
1460{
1461 misc_deregister(&dev->dev);
1462 /* XXX need to free the heaps and clients ? */
1463 kfree(dev);
1464}