blob: b95202b0a51ae9e27aa70aefe47b6090805003bb [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
2 * drivers/staging/android/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/list.h>
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -080022#include <linux/memblock.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080023#include <linux/miscdevice.h>
24#include <linux/export.h>
25#include <linux/mm.h>
26#include <linux/mm_types.h>
27#include <linux/rbtree.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/seq_file.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080033#include <linux/dma-buf.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080034
35#include "ion.h"
36#include "ion_priv.h"
37#define DEBUG
38
39/**
40 * struct ion_device - the metadata of the ion device node
41 * @dev: the actual misc device
42 * @buffers: an rb tree of all the existing buffers
43 * @lock: lock protecting the buffers & heaps trees
44 * @heaps: list of all the heaps in the system
45 * @user_clients: list of all the clients created from userspace
46 */
47struct ion_device {
48 struct miscdevice dev;
49 struct rb_root buffers;
50 struct mutex lock;
51 struct rb_root heaps;
52 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
53 unsigned long arg);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080054 struct rb_root clients;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080055 struct dentry *debug_root;
56};
57
58/**
59 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080060 * @node: node in the tree of all clients
61 * @dev: backpointer to ion device
62 * @handles: an rb tree of all the handles in this client
63 * @lock: lock protecting the tree of handles
64 * @heap_mask: mask of all supported heaps
65 * @name: used for debugging
66 * @task: used for debugging
67 *
68 * A client represents a list of buffers this client may access.
69 * The mutex stored here is used to protect both handles tree
70 * as well as the handles themselves, and should be held while modifying either.
71 */
72struct ion_client {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080073 struct rb_node node;
74 struct ion_device *dev;
75 struct rb_root handles;
76 struct mutex lock;
77 unsigned int heap_mask;
78 const char *name;
79 struct task_struct *task;
80 pid_t pid;
81 struct dentry *debug_root;
82};
83
84/**
85 * ion_handle - a client local reference to a buffer
86 * @ref: reference count
87 * @client: back pointer to the client the buffer resides in
88 * @buffer: pointer to the buffer
89 * @node: node in the client's handle rbtree
90 * @kmap_cnt: count of times this client has mapped to kernel
91 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080092 *
93 * Modifications to node, map_cnt or mapping should be protected by the
94 * lock in the client. Other fields are never changed after initialization.
95 */
96struct ion_handle {
97 struct kref ref;
98 struct ion_client *client;
99 struct ion_buffer *buffer;
100 struct rb_node node;
101 unsigned int kmap_cnt;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800102};
103
104/* this function should only be called while dev->lock is held */
105static void ion_buffer_add(struct ion_device *dev,
106 struct ion_buffer *buffer)
107{
108 struct rb_node **p = &dev->buffers.rb_node;
109 struct rb_node *parent = NULL;
110 struct ion_buffer *entry;
111
112 while (*p) {
113 parent = *p;
114 entry = rb_entry(parent, struct ion_buffer, node);
115
116 if (buffer < entry) {
117 p = &(*p)->rb_left;
118 } else if (buffer > entry) {
119 p = &(*p)->rb_right;
120 } else {
121 pr_err("%s: buffer already found.", __func__);
122 BUG();
123 }
124 }
125
126 rb_link_node(&buffer->node, parent, p);
127 rb_insert_color(&buffer->node, &dev->buffers);
128}
129
130/* this function should only be called while dev->lock is held */
131static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
132 struct ion_device *dev,
133 unsigned long len,
134 unsigned long align,
135 unsigned long flags)
136{
137 struct ion_buffer *buffer;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800138 struct sg_table *table;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800139 int ret;
140
141 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
142 if (!buffer)
143 return ERR_PTR(-ENOMEM);
144
145 buffer->heap = heap;
146 kref_init(&buffer->ref);
147
148 ret = heap->ops->allocate(heap, buffer, len, align, flags);
149 if (ret) {
150 kfree(buffer);
151 return ERR_PTR(ret);
152 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800153
154 table = buffer->heap->ops->map_dma(buffer->heap, buffer);
155 if (IS_ERR_OR_NULL(table)) {
156 heap->ops->free(buffer);
157 kfree(buffer);
158 return ERR_PTR(PTR_ERR(table));
159 }
160 buffer->sg_table = table;
161
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800162 buffer->dev = dev;
163 buffer->size = len;
164 mutex_init(&buffer->lock);
165 ion_buffer_add(dev, buffer);
166 return buffer;
167}
168
169static void ion_buffer_destroy(struct kref *kref)
170{
171 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
172 struct ion_device *dev = buffer->dev;
173
KyongHo Cho54ac07842013-12-13 14:23:39 -0800174 if (WARN_ON(buffer->kmap_cnt > 0))
175 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
176
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800177 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800178 buffer->heap->ops->free(buffer);
179 mutex_lock(&dev->lock);
180 rb_erase(&buffer->node, &dev->buffers);
181 mutex_unlock(&dev->lock);
182 kfree(buffer);
183}
184
185static void ion_buffer_get(struct ion_buffer *buffer)
186{
187 kref_get(&buffer->ref);
188}
189
190static int ion_buffer_put(struct ion_buffer *buffer)
191{
192 return kref_put(&buffer->ref, ion_buffer_destroy);
193}
194
195static struct ion_handle *ion_handle_create(struct ion_client *client,
196 struct ion_buffer *buffer)
197{
198 struct ion_handle *handle;
199
200 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
201 if (!handle)
202 return ERR_PTR(-ENOMEM);
203 kref_init(&handle->ref);
204 RB_CLEAR_NODE(&handle->node);
205 handle->client = client;
206 ion_buffer_get(buffer);
207 handle->buffer = buffer;
208
209 return handle;
210}
211
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800212static void ion_handle_kmap_put(struct ion_handle *);
213
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800214static void ion_handle_destroy(struct kref *kref)
215{
216 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800217 struct ion_client *client = handle->client;
218 struct ion_buffer *buffer = handle->buffer;
219
220 mutex_lock(&client->lock);
221
222 mutex_lock(&buffer->lock);
223 while (buffer->kmap_cnt)
224 ion_handle_kmap_put(handle);
225 mutex_unlock(&buffer->lock);
226
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800227 if (!RB_EMPTY_NODE(&handle->node))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800228 rb_erase(&handle->node, &client->handles);
229 mutex_unlock(&client->lock);
230
231 ion_buffer_put(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800232 kfree(handle);
233}
234
235struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
236{
237 return handle->buffer;
238}
239
240static void ion_handle_get(struct ion_handle *handle)
241{
242 kref_get(&handle->ref);
243}
244
245static int ion_handle_put(struct ion_handle *handle)
246{
247 return kref_put(&handle->ref, ion_handle_destroy);
248}
249
250static struct ion_handle *ion_handle_lookup(struct ion_client *client,
251 struct ion_buffer *buffer)
252{
253 struct rb_node *n;
254
255 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
256 struct ion_handle *handle = rb_entry(n, struct ion_handle,
257 node);
258 if (handle->buffer == buffer)
259 return handle;
260 }
261 return NULL;
262}
263
264static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
265{
266 struct rb_node *n = client->handles.rb_node;
267
268 while (n) {
269 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
270 node);
271 if (handle < handle_node)
272 n = n->rb_left;
273 else if (handle > handle_node)
274 n = n->rb_right;
275 else
276 return true;
277 }
278 return false;
279}
280
281static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
282{
283 struct rb_node **p = &client->handles.rb_node;
284 struct rb_node *parent = NULL;
285 struct ion_handle *entry;
286
287 while (*p) {
288 parent = *p;
289 entry = rb_entry(parent, struct ion_handle, node);
290
291 if (handle < entry)
292 p = &(*p)->rb_left;
293 else if (handle > entry)
294 p = &(*p)->rb_right;
295 else
296 WARN(1, "%s: buffer already found.", __func__);
297 }
298
299 rb_link_node(&handle->node, parent, p);
300 rb_insert_color(&handle->node, &client->handles);
301}
302
303struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
304 size_t align, unsigned int flags)
305{
306 struct rb_node *n;
307 struct ion_handle *handle;
308 struct ion_device *dev = client->dev;
309 struct ion_buffer *buffer = NULL;
310
311 /*
312 * traverse the list of heaps available in this system in priority
313 * order. If the heap type is supported by the client, and matches the
314 * request of the caller allocate from it. Repeat until allocate has
315 * succeeded or all heaps have been tried
316 */
KyongHo Cho54ac07842013-12-13 14:23:39 -0800317 if (WARN_ON(!len))
318 return ERR_PTR(-EINVAL);
319
320 len = PAGE_ALIGN(len);
321
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800322 mutex_lock(&dev->lock);
323 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
324 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
325 /* if the client doesn't support this heap type */
326 if (!((1 << heap->type) & client->heap_mask))
327 continue;
328 /* if the caller didn't specify this heap type */
329 if (!((1 << heap->id) & flags))
330 continue;
331 buffer = ion_buffer_create(heap, dev, len, align, flags);
332 if (!IS_ERR_OR_NULL(buffer))
333 break;
334 }
335 mutex_unlock(&dev->lock);
336
KyongHo Cho54ac07842013-12-13 14:23:39 -0800337 if (buffer == NULL)
338 return ERR_PTR(-ENODEV);
339
340 if (IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800341 return ERR_PTR(PTR_ERR(buffer));
342
343 handle = ion_handle_create(client, buffer);
344
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800345 /*
346 * ion_buffer_create will create a buffer with a ref_cnt of 1,
347 * and ion_handle_create will take a second reference, drop one here
348 */
349 ion_buffer_put(buffer);
350
KyongHo Cho54ac07842013-12-13 14:23:39 -0800351 if (!IS_ERR(handle)) {
352 mutex_lock(&client->lock);
353 ion_handle_add(client, handle);
354 mutex_unlock(&client->lock);
355 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800356
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800357
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800358 return handle;
359}
360
361void ion_free(struct ion_client *client, struct ion_handle *handle)
362{
363 bool valid_handle;
364
365 BUG_ON(client != handle->client);
366
367 mutex_lock(&client->lock);
368 valid_handle = ion_handle_validate(client, handle);
369 mutex_unlock(&client->lock);
370
371 if (!valid_handle) {
372 WARN("%s: invalid handle passed to free.\n", __func__);
373 return;
374 }
375 ion_handle_put(handle);
376}
377
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800378int ion_phys(struct ion_client *client, struct ion_handle *handle,
379 ion_phys_addr_t *addr, size_t *len)
380{
381 struct ion_buffer *buffer;
382 int ret;
383
384 mutex_lock(&client->lock);
385 if (!ion_handle_validate(client, handle)) {
386 mutex_unlock(&client->lock);
387 return -EINVAL;
388 }
389
390 buffer = handle->buffer;
391
392 if (!buffer->heap->ops->phys) {
393 pr_err("%s: ion_phys is not implemented by this heap.\n",
394 __func__);
395 mutex_unlock(&client->lock);
396 return -ENODEV;
397 }
398 mutex_unlock(&client->lock);
399 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
400 return ret;
401}
402
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800403static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
404{
405 void *vaddr;
406
407 if (buffer->kmap_cnt) {
408 buffer->kmap_cnt++;
409 return buffer->vaddr;
410 }
411 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
412 if (IS_ERR_OR_NULL(vaddr))
413 return vaddr;
414 buffer->vaddr = vaddr;
415 buffer->kmap_cnt++;
416 return vaddr;
417}
418
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800419static void *ion_handle_kmap_get(struct ion_handle *handle)
420{
421 struct ion_buffer *buffer = handle->buffer;
422 void *vaddr;
423
424 if (handle->kmap_cnt) {
425 handle->kmap_cnt++;
426 return buffer->vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800427 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800428 vaddr = ion_buffer_kmap_get(buffer);
429 if (IS_ERR_OR_NULL(vaddr))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800430 return vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800431 handle->kmap_cnt++;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800432 return vaddr;
433}
434
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800435static void ion_buffer_kmap_put(struct ion_buffer *buffer)
436{
437 buffer->kmap_cnt--;
438 if (!buffer->kmap_cnt) {
439 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
440 buffer->vaddr = NULL;
441 }
442}
443
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800444static void ion_handle_kmap_put(struct ion_handle *handle)
445{
446 struct ion_buffer *buffer = handle->buffer;
447
448 handle->kmap_cnt--;
449 if (!handle->kmap_cnt)
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800450 ion_buffer_kmap_put(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800451}
452
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800453void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
454{
455 struct ion_buffer *buffer;
456 void *vaddr;
457
458 mutex_lock(&client->lock);
459 if (!ion_handle_validate(client, handle)) {
460 pr_err("%s: invalid handle passed to map_kernel.\n",
461 __func__);
462 mutex_unlock(&client->lock);
463 return ERR_PTR(-EINVAL);
464 }
465
466 buffer = handle->buffer;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800467
468 if (!handle->buffer->heap->ops->map_kernel) {
469 pr_err("%s: map_kernel is not implemented by this heap.\n",
470 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800471 mutex_unlock(&client->lock);
472 return ERR_PTR(-ENODEV);
473 }
474
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800475 mutex_lock(&buffer->lock);
476 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800477 mutex_unlock(&buffer->lock);
478 mutex_unlock(&client->lock);
479 return vaddr;
480}
481
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800482void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
483{
484 struct ion_buffer *buffer;
485
486 mutex_lock(&client->lock);
487 buffer = handle->buffer;
488 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800489 ion_handle_kmap_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800490 mutex_unlock(&buffer->lock);
491 mutex_unlock(&client->lock);
492}
493
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800494static int ion_debug_client_show(struct seq_file *s, void *unused)
495{
496 struct ion_client *client = s->private;
497 struct rb_node *n;
498 size_t sizes[ION_NUM_HEAPS] = {0};
499 const char *names[ION_NUM_HEAPS] = {0};
500 int i;
501
502 mutex_lock(&client->lock);
503 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
504 struct ion_handle *handle = rb_entry(n, struct ion_handle,
505 node);
506 enum ion_heap_type type = handle->buffer->heap->type;
507
508 if (!names[type])
509 names[type] = handle->buffer->heap->name;
510 sizes[type] += handle->buffer->size;
511 }
512 mutex_unlock(&client->lock);
513
514 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
515 for (i = 0; i < ION_NUM_HEAPS; i++) {
516 if (!names[i])
517 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800518 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800519 }
520 return 0;
521}
522
523static int ion_debug_client_open(struct inode *inode, struct file *file)
524{
525 return single_open(file, ion_debug_client_show, inode->i_private);
526}
527
528static const struct file_operations debug_client_fops = {
529 .open = ion_debug_client_open,
530 .read = seq_read,
531 .llseek = seq_lseek,
532 .release = single_release,
533};
534
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800535struct ion_client *ion_client_create(struct ion_device *dev,
536 unsigned int heap_mask,
537 const char *name)
538{
539 struct ion_client *client;
540 struct task_struct *task;
541 struct rb_node **p;
542 struct rb_node *parent = NULL;
543 struct ion_client *entry;
544 char debug_name[64];
545 pid_t pid;
546
547 get_task_struct(current->group_leader);
548 task_lock(current->group_leader);
549 pid = task_pid_nr(current->group_leader);
550 /* don't bother to store task struct for kernel threads,
551 they can't be killed anyway */
552 if (current->group_leader->flags & PF_KTHREAD) {
553 put_task_struct(current->group_leader);
554 task = NULL;
555 } else {
556 task = current->group_leader;
557 }
558 task_unlock(current->group_leader);
559
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800560 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
561 if (!client) {
KyongHo Cho54ac07842013-12-13 14:23:39 -0800562 if (task)
563 put_task_struct(current->group_leader);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800564 return ERR_PTR(-ENOMEM);
565 }
566
567 client->dev = dev;
568 client->handles = RB_ROOT;
569 mutex_init(&client->lock);
570 client->name = name;
571 client->heap_mask = heap_mask;
572 client->task = task;
573 client->pid = pid;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800574
575 mutex_lock(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800576 p = &dev->clients.rb_node;
577 while (*p) {
578 parent = *p;
579 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800580
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800581 if (client < entry)
582 p = &(*p)->rb_left;
583 else if (client > entry)
584 p = &(*p)->rb_right;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800585 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800586 rb_link_node(&client->node, parent, p);
587 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800588
589 snprintf(debug_name, 64, "%u", client->pid);
590 client->debug_root = debugfs_create_file(debug_name, 0664,
591 dev->debug_root, client,
592 &debug_client_fops);
593 mutex_unlock(&dev->lock);
594
595 return client;
596}
597
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800598void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800599{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800600 struct ion_device *dev = client->dev;
601 struct rb_node *n;
602
603 pr_debug("%s: %d\n", __func__, __LINE__);
604 while ((n = rb_first(&client->handles))) {
605 struct ion_handle *handle = rb_entry(n, struct ion_handle,
606 node);
607 ion_handle_destroy(&handle->ref);
608 }
609 mutex_lock(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800610 if (client->task)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800611 put_task_struct(client->task);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800612 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800613 debugfs_remove_recursive(client->debug_root);
614 mutex_unlock(&dev->lock);
615
616 kfree(client);
617}
618
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800619struct sg_table *ion_map_dma(struct ion_client *client,
620 struct ion_handle *handle)
621{
622 struct ion_buffer *buffer;
623 struct sg_table *table;
624
625 mutex_lock(&client->lock);
626 if (!ion_handle_validate(client, handle)) {
627 pr_err("%s: invalid handle passed to map_dma.\n",
628 __func__);
629 mutex_unlock(&client->lock);
630 return ERR_PTR(-EINVAL);
631 }
632 buffer = handle->buffer;
633 table = buffer->sg_table;
634 mutex_unlock(&client->lock);
635 return table;
636}
637
638void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
639{
640}
641
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800642static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
643 enum dma_data_direction direction)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800644{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800645 struct dma_buf *dmabuf = attachment->dmabuf;
646 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800647
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800648 return buffer->sg_table;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800649}
650
651static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
652 struct sg_table *table,
653 enum dma_data_direction direction)
654{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800655}
656
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800657static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800658{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800659 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800660 int ret;
661
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800662 if (!buffer->heap->ops->map_user) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800663 pr_err("%s: this heap does not define a method for mapping "
664 "to userspace\n", __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800665 return -EINVAL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800666 }
667
668 mutex_lock(&buffer->lock);
669 /* now map it to userspace */
670 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
671 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800672
673 if (ret)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800674 pr_err("%s: failure mapping buffer to userspace\n",
675 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800676
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800677 return ret;
678}
679
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800680static void ion_dma_buf_release(struct dma_buf *dmabuf)
681{
682 struct ion_buffer *buffer = dmabuf->priv;
683 ion_buffer_put(buffer);
684}
685
686static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
687{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800688 struct ion_buffer *buffer = dmabuf->priv;
689 return buffer->vaddr + offset;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800690}
691
692static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
693 void *ptr)
694{
695 return;
696}
697
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800698static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
699 size_t len,
700 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800701{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800702 struct ion_buffer *buffer = dmabuf->priv;
703 void *vaddr;
704
705 if (!buffer->heap->ops->map_kernel) {
706 pr_err("%s: map kernel is not implemented by this heap.\n",
707 __func__);
708 return -ENODEV;
709 }
710
711 mutex_lock(&buffer->lock);
712 vaddr = ion_buffer_kmap_get(buffer);
713 mutex_unlock(&buffer->lock);
714 if (IS_ERR(vaddr))
715 return PTR_ERR(vaddr);
716 if (!vaddr)
717 return -ENOMEM;
718 return 0;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800719}
720
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800721static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
722 size_t len,
723 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800724{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800725 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800726
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800727 mutex_lock(&buffer->lock);
728 ion_buffer_kmap_put(buffer);
729 mutex_unlock(&buffer->lock);
730}
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800731
732struct dma_buf_ops dma_buf_ops = {
733 .map_dma_buf = ion_map_dma_buf,
734 .unmap_dma_buf = ion_unmap_dma_buf,
735 .mmap = ion_mmap,
736 .release = ion_dma_buf_release,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800737 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
738 .end_cpu_access = ion_dma_buf_end_cpu_access,
739 .kmap_atomic = ion_dma_buf_kmap,
740 .kunmap_atomic = ion_dma_buf_kunmap,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800741 .kmap = ion_dma_buf_kmap,
742 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800743};
744
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800745int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800746{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800747 struct ion_buffer *buffer;
748 struct dma_buf *dmabuf;
749 bool valid_handle;
750 int fd;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800751
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800752 mutex_lock(&client->lock);
753 valid_handle = ion_handle_validate(client, handle);
754 mutex_unlock(&client->lock);
755 if (!valid_handle) {
756 WARN("%s: invalid handle passed to share.\n", __func__);
757 return -EINVAL;
758 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800759
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800760 buffer = handle->buffer;
761 ion_buffer_get(buffer);
762 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
763 if (IS_ERR(dmabuf)) {
764 ion_buffer_put(buffer);
765 return PTR_ERR(dmabuf);
766 }
767 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
768 if (fd < 0) {
769 dma_buf_put(dmabuf);
770 ion_buffer_put(buffer);
771 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800772 return fd;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800773}
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800774
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800775struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
776{
777 struct dma_buf *dmabuf;
778 struct ion_buffer *buffer;
779 struct ion_handle *handle;
780
781 dmabuf = dma_buf_get(fd);
782 if (IS_ERR_OR_NULL(dmabuf))
783 return ERR_PTR(PTR_ERR(dmabuf));
784 /* if this memory came from ion */
785
786 if (dmabuf->ops != &dma_buf_ops) {
787 pr_err("%s: can not import dmabuf from another exporter\n",
788 __func__);
789 dma_buf_put(dmabuf);
790 return ERR_PTR(-EINVAL);
791 }
792 buffer = dmabuf->priv;
793
794 mutex_lock(&client->lock);
795 /* if a handle exists for this buffer just take a reference to it */
796 handle = ion_handle_lookup(client, buffer);
797 if (!IS_ERR_OR_NULL(handle)) {
798 ion_handle_get(handle);
799 goto end;
800 }
801 handle = ion_handle_create(client, buffer);
802 if (IS_ERR_OR_NULL(handle))
803 goto end;
804 ion_handle_add(client, handle);
805end:
806 mutex_unlock(&client->lock);
807 dma_buf_put(dmabuf);
808 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800809}
810
811static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
812{
813 struct ion_client *client = filp->private_data;
814
815 switch (cmd) {
816 case ION_IOC_ALLOC:
817 {
818 struct ion_allocation_data data;
819
820 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
821 return -EFAULT;
822 data.handle = ion_alloc(client, data.len, data.align,
823 data.flags);
KyongHo Cho54ac07842013-12-13 14:23:39 -0800824
825 if (IS_ERR(data.handle))
826 return PTR_ERR(data.handle);
827
828 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
829 ion_free(client, data.handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800830 return -EFAULT;
KyongHo Cho54ac07842013-12-13 14:23:39 -0800831 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800832 break;
833 }
834 case ION_IOC_FREE:
835 {
836 struct ion_handle_data data;
837 bool valid;
838
839 if (copy_from_user(&data, (void __user *)arg,
840 sizeof(struct ion_handle_data)))
841 return -EFAULT;
842 mutex_lock(&client->lock);
843 valid = ion_handle_validate(client, data.handle);
844 mutex_unlock(&client->lock);
845 if (!valid)
846 return -EINVAL;
847 ion_free(client, data.handle);
848 break;
849 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800850 case ION_IOC_SHARE:
851 {
852 struct ion_fd_data data;
853
854 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
855 return -EFAULT;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800856 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800857 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
858 return -EFAULT;
859 break;
860 }
861 case ION_IOC_IMPORT:
862 {
863 struct ion_fd_data data;
864 if (copy_from_user(&data, (void __user *)arg,
865 sizeof(struct ion_fd_data)))
866 return -EFAULT;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800867 data.handle = ion_import_dma_buf(client, data.fd);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800868 if (IS_ERR(data.handle))
869 data.handle = NULL;
870 if (copy_to_user((void __user *)arg, &data,
871 sizeof(struct ion_fd_data)))
872 return -EFAULT;
873 break;
874 }
875 case ION_IOC_CUSTOM:
876 {
877 struct ion_device *dev = client->dev;
878 struct ion_custom_data data;
879
880 if (!dev->custom_ioctl)
881 return -ENOTTY;
882 if (copy_from_user(&data, (void __user *)arg,
883 sizeof(struct ion_custom_data)))
884 return -EFAULT;
885 return dev->custom_ioctl(client, data.cmd, data.arg);
886 }
887 default:
888 return -ENOTTY;
889 }
890 return 0;
891}
892
893static int ion_release(struct inode *inode, struct file *file)
894{
895 struct ion_client *client = file->private_data;
896
897 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800898 ion_client_destroy(client);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800899 return 0;
900}
901
902static int ion_open(struct inode *inode, struct file *file)
903{
904 struct miscdevice *miscdev = file->private_data;
905 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
906 struct ion_client *client;
907
908 pr_debug("%s: %d\n", __func__, __LINE__);
909 client = ion_client_create(dev, -1, "user");
910 if (IS_ERR_OR_NULL(client))
911 return PTR_ERR(client);
912 file->private_data = client;
913
914 return 0;
915}
916
917static const struct file_operations ion_fops = {
918 .owner = THIS_MODULE,
919 .open = ion_open,
920 .release = ion_release,
921 .unlocked_ioctl = ion_ioctl,
922};
923
924static size_t ion_debug_heap_total(struct ion_client *client,
925 enum ion_heap_type type)
926{
927 size_t size = 0;
928 struct rb_node *n;
929
930 mutex_lock(&client->lock);
931 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
932 struct ion_handle *handle = rb_entry(n,
933 struct ion_handle,
934 node);
935 if (handle->buffer->heap->type == type)
936 size += handle->buffer->size;
937 }
938 mutex_unlock(&client->lock);
939 return size;
940}
941
942static int ion_debug_heap_show(struct seq_file *s, void *unused)
943{
944 struct ion_heap *heap = s->private;
945 struct ion_device *dev = heap->dev;
946 struct rb_node *n;
947
948 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800949
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800950 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800951 struct ion_client *client = rb_entry(n, struct ion_client,
952 node);
953 size_t size = ion_debug_heap_total(client, heap->type);
954 if (!size)
955 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800956 if (client->task) {
957 char task_comm[TASK_COMM_LEN];
958
959 get_task_comm(task_comm, client->task);
960 seq_printf(s, "%16.s %16u %16u\n", task_comm,
961 client->pid, size);
962 } else {
963 seq_printf(s, "%16.s %16u %16u\n", client->name,
964 client->pid, size);
965 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800966 }
967 return 0;
968}
969
970static int ion_debug_heap_open(struct inode *inode, struct file *file)
971{
972 return single_open(file, ion_debug_heap_show, inode->i_private);
973}
974
975static const struct file_operations debug_heap_fops = {
976 .open = ion_debug_heap_open,
977 .read = seq_read,
978 .llseek = seq_lseek,
979 .release = single_release,
980};
981
982void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
983{
984 struct rb_node **p = &dev->heaps.rb_node;
985 struct rb_node *parent = NULL;
986 struct ion_heap *entry;
987
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800988 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
989 !heap->ops->unmap_dma)
990 pr_err("%s: can not add heap with invalid ops struct.\n",
991 __func__);
992
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800993 heap->dev = dev;
994 mutex_lock(&dev->lock);
995 while (*p) {
996 parent = *p;
997 entry = rb_entry(parent, struct ion_heap, node);
998
999 if (heap->id < entry->id) {
1000 p = &(*p)->rb_left;
1001 } else if (heap->id > entry->id ) {
1002 p = &(*p)->rb_right;
1003 } else {
1004 pr_err("%s: can not insert multiple heaps with "
1005 "id %d\n", __func__, heap->id);
1006 goto end;
1007 }
1008 }
1009
1010 rb_link_node(&heap->node, parent, p);
1011 rb_insert_color(&heap->node, &dev->heaps);
1012 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1013 &debug_heap_fops);
1014end:
1015 mutex_unlock(&dev->lock);
1016}
1017
1018struct ion_device *ion_device_create(long (*custom_ioctl)
1019 (struct ion_client *client,
1020 unsigned int cmd,
1021 unsigned long arg))
1022{
1023 struct ion_device *idev;
1024 int ret;
1025
1026 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1027 if (!idev)
1028 return ERR_PTR(-ENOMEM);
1029
1030 idev->dev.minor = MISC_DYNAMIC_MINOR;
1031 idev->dev.name = "ion";
1032 idev->dev.fops = &ion_fops;
1033 idev->dev.parent = NULL;
1034 ret = misc_register(&idev->dev);
1035 if (ret) {
1036 pr_err("ion: failed to register misc device.\n");
1037 return ERR_PTR(ret);
1038 }
1039
1040 idev->debug_root = debugfs_create_dir("ion", NULL);
1041 if (IS_ERR_OR_NULL(idev->debug_root))
1042 pr_err("ion: failed to create debug files.\n");
1043
1044 idev->custom_ioctl = custom_ioctl;
1045 idev->buffers = RB_ROOT;
1046 mutex_init(&idev->lock);
1047 idev->heaps = RB_ROOT;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001048 idev->clients = RB_ROOT;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001049 return idev;
1050}
1051
1052void ion_device_destroy(struct ion_device *dev)
1053{
1054 misc_deregister(&dev->dev);
1055 /* XXX need to free the heaps and clients ? */
1056 kfree(dev);
1057}
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -08001058
1059void __init ion_reserve(struct ion_platform_data *data)
1060{
1061 int i, ret;
1062
1063 for (i = 0; i < data->nr; i++) {
1064 if (data->heaps[i].size == 0)
1065 continue;
1066 ret = memblock_reserve(data->heaps[i].base,
1067 data->heaps[i].size);
1068 if (ret)
1069 pr_err("memblock reserve of %x@%lx failed\n",
1070 data->heaps[i].size,
1071 data->heaps[i].base);
1072 }
1073}