blob: 6ba2c39f793b867263f659864d1b1615b8b7113f [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
2 * drivers/staging/android/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/list.h>
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -080022#include <linux/memblock.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080023#include <linux/miscdevice.h>
24#include <linux/export.h>
25#include <linux/mm.h>
26#include <linux/mm_types.h>
27#include <linux/rbtree.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/seq_file.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080033#include <linux/dma-buf.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080034
35#include "ion.h"
36#include "ion_priv.h"
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080037
38/**
39 * struct ion_device - the metadata of the ion device node
40 * @dev: the actual misc device
41 * @buffers: an rb tree of all the existing buffers
42 * @lock: lock protecting the buffers & heaps trees
43 * @heaps: list of all the heaps in the system
44 * @user_clients: list of all the clients created from userspace
45 */
46struct ion_device {
47 struct miscdevice dev;
48 struct rb_root buffers;
49 struct mutex lock;
50 struct rb_root heaps;
51 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
52 unsigned long arg);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080053 struct rb_root clients;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080054 struct dentry *debug_root;
55};
56
57/**
58 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080059 * @node: node in the tree of all clients
60 * @dev: backpointer to ion device
61 * @handles: an rb tree of all the handles in this client
62 * @lock: lock protecting the tree of handles
63 * @heap_mask: mask of all supported heaps
64 * @name: used for debugging
65 * @task: used for debugging
66 *
67 * A client represents a list of buffers this client may access.
68 * The mutex stored here is used to protect both handles tree
69 * as well as the handles themselves, and should be held while modifying either.
70 */
71struct ion_client {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080072 struct rb_node node;
73 struct ion_device *dev;
74 struct rb_root handles;
75 struct mutex lock;
76 unsigned int heap_mask;
77 const char *name;
78 struct task_struct *task;
79 pid_t pid;
80 struct dentry *debug_root;
81};
82
83/**
84 * ion_handle - a client local reference to a buffer
85 * @ref: reference count
86 * @client: back pointer to the client the buffer resides in
87 * @buffer: pointer to the buffer
88 * @node: node in the client's handle rbtree
89 * @kmap_cnt: count of times this client has mapped to kernel
90 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080091 *
92 * Modifications to node, map_cnt or mapping should be protected by the
93 * lock in the client. Other fields are never changed after initialization.
94 */
95struct ion_handle {
96 struct kref ref;
97 struct ion_client *client;
98 struct ion_buffer *buffer;
99 struct rb_node node;
100 unsigned int kmap_cnt;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800101};
102
103/* this function should only be called while dev->lock is held */
104static void ion_buffer_add(struct ion_device *dev,
105 struct ion_buffer *buffer)
106{
107 struct rb_node **p = &dev->buffers.rb_node;
108 struct rb_node *parent = NULL;
109 struct ion_buffer *entry;
110
111 while (*p) {
112 parent = *p;
113 entry = rb_entry(parent, struct ion_buffer, node);
114
115 if (buffer < entry) {
116 p = &(*p)->rb_left;
117 } else if (buffer > entry) {
118 p = &(*p)->rb_right;
119 } else {
120 pr_err("%s: buffer already found.", __func__);
121 BUG();
122 }
123 }
124
125 rb_link_node(&buffer->node, parent, p);
126 rb_insert_color(&buffer->node, &dev->buffers);
127}
128
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800129static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
130
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800131/* this function should only be called while dev->lock is held */
132static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
133 struct ion_device *dev,
134 unsigned long len,
135 unsigned long align,
136 unsigned long flags)
137{
138 struct ion_buffer *buffer;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800139 struct sg_table *table;
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800140 struct scatterlist *sg;
141 int i, ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800142
143 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
144 if (!buffer)
145 return ERR_PTR(-ENOMEM);
146
147 buffer->heap = heap;
148 kref_init(&buffer->ref);
149
150 ret = heap->ops->allocate(heap, buffer, len, align, flags);
151 if (ret) {
152 kfree(buffer);
153 return ERR_PTR(ret);
154 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800155
Greg Hackmann056be392013-12-13 14:23:45 -0800156 buffer->dev = dev;
157 buffer->size = len;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800158 buffer->flags = flags;
Greg Hackmann056be392013-12-13 14:23:45 -0800159
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800160 table = heap->ops->map_dma(heap, buffer);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800161 if (IS_ERR_OR_NULL(table)) {
162 heap->ops->free(buffer);
163 kfree(buffer);
164 return ERR_PTR(PTR_ERR(table));
165 }
166 buffer->sg_table = table;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800167 if (buffer->flags & ION_FLAG_CACHED)
168 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
169 i) {
170 if (sg_dma_len(sg) == PAGE_SIZE)
171 continue;
172 pr_err("%s: cached mappings must have pagewise "
173 "sg_lists\n", __func__);
174 heap->ops->unmap_dma(heap, buffer);
175 kfree(buffer);
176 return ERR_PTR(-EINVAL);
177 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800178
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800179 ret = ion_buffer_alloc_dirty(buffer);
180 if (ret) {
181 heap->ops->unmap_dma(heap, buffer);
182 heap->ops->free(buffer);
183 kfree(buffer);
184 return ERR_PTR(ret);
185 }
186
187 buffer->dev = dev;
188 buffer->size = len;
189 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800190 mutex_init(&buffer->lock);
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800191 /* this will set up dma addresses for the sglist -- it is not
192 technically correct as per the dma api -- a specific
193 device isn't really taking ownership here. However, in practice on
194 our systems the only dma_address space is physical addresses.
195 Additionally, we can't afford the overhead of invalidating every
196 allocation via dma_map_sg. The implicit contract here is that
197 memory comming from the heaps is ready for dma, ie if it has a
198 cached mapping that mapping has been invalidated */
199 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
200 sg_dma_address(sg) = sg_phys(sg);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800201 ion_buffer_add(dev, buffer);
202 return buffer;
203}
204
205static void ion_buffer_destroy(struct kref *kref)
206{
207 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
208 struct ion_device *dev = buffer->dev;
209
KyongHo Cho54ac07842013-12-13 14:23:39 -0800210 if (WARN_ON(buffer->kmap_cnt > 0))
211 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
212
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800213 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800214 buffer->heap->ops->free(buffer);
215 mutex_lock(&dev->lock);
216 rb_erase(&buffer->node, &dev->buffers);
217 mutex_unlock(&dev->lock);
218 kfree(buffer);
219}
220
221static void ion_buffer_get(struct ion_buffer *buffer)
222{
223 kref_get(&buffer->ref);
224}
225
226static int ion_buffer_put(struct ion_buffer *buffer)
227{
228 return kref_put(&buffer->ref, ion_buffer_destroy);
229}
230
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800231static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
232{
233 mutex_lock(&buffer->dev->lock);
234 buffer->handle_count++;
235 mutex_unlock(&buffer->dev->lock);
236}
237
238static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
239{
240 /*
241 * when a buffer is removed from a handle, if it is not in
242 * any other handles, copy the taskcomm and the pid of the
243 * process it's being removed from into the buffer. At this
244 * point there will be no way to track what processes this buffer is
245 * being used by, it only exists as a dma_buf file descriptor.
246 * The taskcomm and pid can provide a debug hint as to where this fd
247 * is in the system
248 */
249 mutex_lock(&buffer->dev->lock);
250 buffer->handle_count--;
251 BUG_ON(buffer->handle_count < 0);
252 if (!buffer->handle_count) {
253 struct task_struct *task;
254
255 task = current->group_leader;
256 get_task_comm(buffer->task_comm, task);
257 buffer->pid = task_pid_nr(task);
258 }
259 mutex_unlock(&buffer->dev->lock);
260}
261
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800262static struct ion_handle *ion_handle_create(struct ion_client *client,
263 struct ion_buffer *buffer)
264{
265 struct ion_handle *handle;
266
267 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
268 if (!handle)
269 return ERR_PTR(-ENOMEM);
270 kref_init(&handle->ref);
271 RB_CLEAR_NODE(&handle->node);
272 handle->client = client;
273 ion_buffer_get(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800274 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800275 handle->buffer = buffer;
276
277 return handle;
278}
279
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800280static void ion_handle_kmap_put(struct ion_handle *);
281
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800282static void ion_handle_destroy(struct kref *kref)
283{
284 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800285 struct ion_client *client = handle->client;
286 struct ion_buffer *buffer = handle->buffer;
287
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800288 mutex_lock(&buffer->lock);
Laura Abbott2900cd72013-12-13 14:23:48 -0800289 while (handle->kmap_cnt)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800290 ion_handle_kmap_put(handle);
291 mutex_unlock(&buffer->lock);
292
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800293 if (!RB_EMPTY_NODE(&handle->node))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800294 rb_erase(&handle->node, &client->handles);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800295
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800296 ion_buffer_remove_from_handle(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800297 ion_buffer_put(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800298
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800299 kfree(handle);
300}
301
302struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
303{
304 return handle->buffer;
305}
306
307static void ion_handle_get(struct ion_handle *handle)
308{
309 kref_get(&handle->ref);
310}
311
312static int ion_handle_put(struct ion_handle *handle)
313{
314 return kref_put(&handle->ref, ion_handle_destroy);
315}
316
317static struct ion_handle *ion_handle_lookup(struct ion_client *client,
318 struct ion_buffer *buffer)
319{
320 struct rb_node *n;
321
322 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
323 struct ion_handle *handle = rb_entry(n, struct ion_handle,
324 node);
325 if (handle->buffer == buffer)
326 return handle;
327 }
328 return NULL;
329}
330
331static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
332{
333 struct rb_node *n = client->handles.rb_node;
334
335 while (n) {
336 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
337 node);
338 if (handle < handle_node)
339 n = n->rb_left;
340 else if (handle > handle_node)
341 n = n->rb_right;
342 else
343 return true;
344 }
345 return false;
346}
347
348static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
349{
350 struct rb_node **p = &client->handles.rb_node;
351 struct rb_node *parent = NULL;
352 struct ion_handle *entry;
353
354 while (*p) {
355 parent = *p;
356 entry = rb_entry(parent, struct ion_handle, node);
357
358 if (handle < entry)
359 p = &(*p)->rb_left;
360 else if (handle > entry)
361 p = &(*p)->rb_right;
362 else
363 WARN(1, "%s: buffer already found.", __func__);
364 }
365
366 rb_link_node(&handle->node, parent, p);
367 rb_insert_color(&handle->node, &client->handles);
368}
369
370struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800371 size_t align, unsigned int heap_mask,
372 unsigned int flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800373{
374 struct rb_node *n;
375 struct ion_handle *handle;
376 struct ion_device *dev = client->dev;
377 struct ion_buffer *buffer = NULL;
378
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800379 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
380 align, heap_mask, flags);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800381 /*
382 * traverse the list of heaps available in this system in priority
383 * order. If the heap type is supported by the client, and matches the
384 * request of the caller allocate from it. Repeat until allocate has
385 * succeeded or all heaps have been tried
386 */
KyongHo Cho54ac07842013-12-13 14:23:39 -0800387 if (WARN_ON(!len))
388 return ERR_PTR(-EINVAL);
389
390 len = PAGE_ALIGN(len);
391
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800392 mutex_lock(&dev->lock);
393 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
394 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
395 /* if the client doesn't support this heap type */
396 if (!((1 << heap->type) & client->heap_mask))
397 continue;
398 /* if the caller didn't specify this heap type */
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800399 if (!((1 << heap->id) & heap_mask))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800400 continue;
401 buffer = ion_buffer_create(heap, dev, len, align, flags);
402 if (!IS_ERR_OR_NULL(buffer))
403 break;
404 }
405 mutex_unlock(&dev->lock);
406
KyongHo Cho54ac07842013-12-13 14:23:39 -0800407 if (buffer == NULL)
408 return ERR_PTR(-ENODEV);
409
410 if (IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800411 return ERR_PTR(PTR_ERR(buffer));
412
413 handle = ion_handle_create(client, buffer);
414
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800415 /*
416 * ion_buffer_create will create a buffer with a ref_cnt of 1,
417 * and ion_handle_create will take a second reference, drop one here
418 */
419 ion_buffer_put(buffer);
420
KyongHo Cho54ac07842013-12-13 14:23:39 -0800421 if (!IS_ERR(handle)) {
422 mutex_lock(&client->lock);
423 ion_handle_add(client, handle);
424 mutex_unlock(&client->lock);
425 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800426
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800427
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800428 return handle;
429}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800430EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800431
432void ion_free(struct ion_client *client, struct ion_handle *handle)
433{
434 bool valid_handle;
435
436 BUG_ON(client != handle->client);
437
438 mutex_lock(&client->lock);
439 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800440
441 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -0800442 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin37bdbf02013-12-13 14:24:02 -0800443 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800444 return;
445 }
446 ion_handle_put(handle);
Rebecca Schultz Zavin0e9c03a2013-12-13 14:24:01 -0800447 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800448}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800449EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800450
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800451int ion_phys(struct ion_client *client, struct ion_handle *handle,
452 ion_phys_addr_t *addr, size_t *len)
453{
454 struct ion_buffer *buffer;
455 int ret;
456
457 mutex_lock(&client->lock);
458 if (!ion_handle_validate(client, handle)) {
459 mutex_unlock(&client->lock);
460 return -EINVAL;
461 }
462
463 buffer = handle->buffer;
464
465 if (!buffer->heap->ops->phys) {
466 pr_err("%s: ion_phys is not implemented by this heap.\n",
467 __func__);
468 mutex_unlock(&client->lock);
469 return -ENODEV;
470 }
471 mutex_unlock(&client->lock);
472 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
473 return ret;
474}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800475EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800476
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800477static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
478{
479 void *vaddr;
480
481 if (buffer->kmap_cnt) {
482 buffer->kmap_cnt++;
483 return buffer->vaddr;
484 }
485 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
486 if (IS_ERR_OR_NULL(vaddr))
487 return vaddr;
488 buffer->vaddr = vaddr;
489 buffer->kmap_cnt++;
490 return vaddr;
491}
492
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800493static void *ion_handle_kmap_get(struct ion_handle *handle)
494{
495 struct ion_buffer *buffer = handle->buffer;
496 void *vaddr;
497
498 if (handle->kmap_cnt) {
499 handle->kmap_cnt++;
500 return buffer->vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800501 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800502 vaddr = ion_buffer_kmap_get(buffer);
503 if (IS_ERR_OR_NULL(vaddr))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800504 return vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800505 handle->kmap_cnt++;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800506 return vaddr;
507}
508
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800509static void ion_buffer_kmap_put(struct ion_buffer *buffer)
510{
511 buffer->kmap_cnt--;
512 if (!buffer->kmap_cnt) {
513 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
514 buffer->vaddr = NULL;
515 }
516}
517
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800518static void ion_handle_kmap_put(struct ion_handle *handle)
519{
520 struct ion_buffer *buffer = handle->buffer;
521
522 handle->kmap_cnt--;
523 if (!handle->kmap_cnt)
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800524 ion_buffer_kmap_put(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800525}
526
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800527void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
528{
529 struct ion_buffer *buffer;
530 void *vaddr;
531
532 mutex_lock(&client->lock);
533 if (!ion_handle_validate(client, handle)) {
534 pr_err("%s: invalid handle passed to map_kernel.\n",
535 __func__);
536 mutex_unlock(&client->lock);
537 return ERR_PTR(-EINVAL);
538 }
539
540 buffer = handle->buffer;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800541
542 if (!handle->buffer->heap->ops->map_kernel) {
543 pr_err("%s: map_kernel is not implemented by this heap.\n",
544 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800545 mutex_unlock(&client->lock);
546 return ERR_PTR(-ENODEV);
547 }
548
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800549 mutex_lock(&buffer->lock);
550 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800551 mutex_unlock(&buffer->lock);
552 mutex_unlock(&client->lock);
553 return vaddr;
554}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800555EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800556
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800557void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
558{
559 struct ion_buffer *buffer;
560
561 mutex_lock(&client->lock);
562 buffer = handle->buffer;
563 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800564 ion_handle_kmap_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800565 mutex_unlock(&buffer->lock);
566 mutex_unlock(&client->lock);
567}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800568EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800569
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800570static int ion_debug_client_show(struct seq_file *s, void *unused)
571{
572 struct ion_client *client = s->private;
573 struct rb_node *n;
574 size_t sizes[ION_NUM_HEAPS] = {0};
575 const char *names[ION_NUM_HEAPS] = {0};
576 int i;
577
578 mutex_lock(&client->lock);
579 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
580 struct ion_handle *handle = rb_entry(n, struct ion_handle,
581 node);
582 enum ion_heap_type type = handle->buffer->heap->type;
583
584 if (!names[type])
585 names[type] = handle->buffer->heap->name;
586 sizes[type] += handle->buffer->size;
587 }
588 mutex_unlock(&client->lock);
589
590 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
591 for (i = 0; i < ION_NUM_HEAPS; i++) {
592 if (!names[i])
593 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800594 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800595 }
596 return 0;
597}
598
599static int ion_debug_client_open(struct inode *inode, struct file *file)
600{
601 return single_open(file, ion_debug_client_show, inode->i_private);
602}
603
604static const struct file_operations debug_client_fops = {
605 .open = ion_debug_client_open,
606 .read = seq_read,
607 .llseek = seq_lseek,
608 .release = single_release,
609};
610
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800611struct ion_client *ion_client_create(struct ion_device *dev,
612 unsigned int heap_mask,
613 const char *name)
614{
615 struct ion_client *client;
616 struct task_struct *task;
617 struct rb_node **p;
618 struct rb_node *parent = NULL;
619 struct ion_client *entry;
620 char debug_name[64];
621 pid_t pid;
622
623 get_task_struct(current->group_leader);
624 task_lock(current->group_leader);
625 pid = task_pid_nr(current->group_leader);
626 /* don't bother to store task struct for kernel threads,
627 they can't be killed anyway */
628 if (current->group_leader->flags & PF_KTHREAD) {
629 put_task_struct(current->group_leader);
630 task = NULL;
631 } else {
632 task = current->group_leader;
633 }
634 task_unlock(current->group_leader);
635
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800636 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
637 if (!client) {
KyongHo Cho54ac07842013-12-13 14:23:39 -0800638 if (task)
639 put_task_struct(current->group_leader);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800640 return ERR_PTR(-ENOMEM);
641 }
642
643 client->dev = dev;
644 client->handles = RB_ROOT;
645 mutex_init(&client->lock);
646 client->name = name;
647 client->heap_mask = heap_mask;
648 client->task = task;
649 client->pid = pid;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800650
651 mutex_lock(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800652 p = &dev->clients.rb_node;
653 while (*p) {
654 parent = *p;
655 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800656
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800657 if (client < entry)
658 p = &(*p)->rb_left;
659 else if (client > entry)
660 p = &(*p)->rb_right;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800661 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800662 rb_link_node(&client->node, parent, p);
663 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800664
665 snprintf(debug_name, 64, "%u", client->pid);
666 client->debug_root = debugfs_create_file(debug_name, 0664,
667 dev->debug_root, client,
668 &debug_client_fops);
669 mutex_unlock(&dev->lock);
670
671 return client;
672}
673
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800674void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800675{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800676 struct ion_device *dev = client->dev;
677 struct rb_node *n;
678
679 pr_debug("%s: %d\n", __func__, __LINE__);
680 while ((n = rb_first(&client->handles))) {
681 struct ion_handle *handle = rb_entry(n, struct ion_handle,
682 node);
683 ion_handle_destroy(&handle->ref);
684 }
685 mutex_lock(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800686 if (client->task)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800687 put_task_struct(client->task);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800688 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800689 debugfs_remove_recursive(client->debug_root);
690 mutex_unlock(&dev->lock);
691
692 kfree(client);
693}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800694EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800695
Rebecca Schultz Zavince1f1472013-12-13 14:23:44 -0800696struct sg_table *ion_sg_table(struct ion_client *client,
697 struct ion_handle *handle)
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800698{
699 struct ion_buffer *buffer;
700 struct sg_table *table;
701
702 mutex_lock(&client->lock);
703 if (!ion_handle_validate(client, handle)) {
704 pr_err("%s: invalid handle passed to map_dma.\n",
705 __func__);
706 mutex_unlock(&client->lock);
707 return ERR_PTR(-EINVAL);
708 }
709 buffer = handle->buffer;
710 table = buffer->sg_table;
711 mutex_unlock(&client->lock);
712 return table;
713}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800714EXPORT_SYMBOL(ion_sg_table);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800715
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800716static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
717 struct device *dev,
718 enum dma_data_direction direction);
719
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800720static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
721 enum dma_data_direction direction)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800722{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800723 struct dma_buf *dmabuf = attachment->dmabuf;
724 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800725
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800726 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800727 return buffer->sg_table;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800728}
729
730static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
731 struct sg_table *table,
732 enum dma_data_direction direction)
733{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800734}
735
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800736static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
737{
738 unsigned long pages = buffer->sg_table->nents;
739 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
740
741 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
742 if (!buffer->dirty)
743 return -ENOMEM;
744 return 0;
745}
746
747struct ion_vma_list {
748 struct list_head list;
749 struct vm_area_struct *vma;
750};
751
752static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
753 struct device *dev,
754 enum dma_data_direction dir)
755{
756 struct scatterlist *sg;
757 int i;
758 struct ion_vma_list *vma_list;
759
760 pr_debug("%s: syncing for device %s\n", __func__,
761 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800762
763 if (!(buffer->flags & ION_FLAG_CACHED))
764 return;
765
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800766 mutex_lock(&buffer->lock);
767 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
768 if (!test_bit(i, buffer->dirty))
769 continue;
770 dma_sync_sg_for_device(dev, sg, 1, dir);
771 clear_bit(i, buffer->dirty);
772 }
773 list_for_each_entry(vma_list, &buffer->vmas, list) {
774 struct vm_area_struct *vma = vma_list->vma;
775
776 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
777 NULL);
778 }
779 mutex_unlock(&buffer->lock);
780}
781
782int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
783{
784 struct ion_buffer *buffer = vma->vm_private_data;
785 struct scatterlist *sg;
786 int i;
787
788 mutex_lock(&buffer->lock);
789 set_bit(vmf->pgoff, buffer->dirty);
790
791 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
792 if (i != vmf->pgoff)
793 continue;
794 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
795 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
796 sg_page(sg));
797 break;
798 }
799 mutex_unlock(&buffer->lock);
800 return VM_FAULT_NOPAGE;
801}
802
803static void ion_vm_open(struct vm_area_struct *vma)
804{
805 struct ion_buffer *buffer = vma->vm_private_data;
806 struct ion_vma_list *vma_list;
807
808 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
809 if (!vma_list)
810 return;
811 vma_list->vma = vma;
812 mutex_lock(&buffer->lock);
813 list_add(&vma_list->list, &buffer->vmas);
814 mutex_unlock(&buffer->lock);
815 pr_debug("%s: adding %p\n", __func__, vma);
816}
817
818static void ion_vm_close(struct vm_area_struct *vma)
819{
820 struct ion_buffer *buffer = vma->vm_private_data;
821 struct ion_vma_list *vma_list, *tmp;
822
823 pr_debug("%s\n", __func__);
824 mutex_lock(&buffer->lock);
825 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
826 if (vma_list->vma != vma)
827 continue;
828 list_del(&vma_list->list);
829 kfree(vma_list);
830 pr_debug("%s: deleting %p\n", __func__, vma);
831 break;
832 }
833 mutex_unlock(&buffer->lock);
834}
835
836struct vm_operations_struct ion_vma_ops = {
837 .open = ion_vm_open,
838 .close = ion_vm_close,
839 .fault = ion_vm_fault,
840};
841
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800842static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800843{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800844 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800845 int ret = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800846
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800847 if (!buffer->heap->ops->map_user) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800848 pr_err("%s: this heap does not define a method for mapping "
849 "to userspace\n", __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800850 return -EINVAL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800851 }
852
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800853 if (buffer->flags & ION_FLAG_CACHED) {
854 vma->vm_private_data = buffer;
855 vma->vm_ops = &ion_vma_ops;
856 ion_vm_open(vma);
857 } else {
858 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
859 mutex_lock(&buffer->lock);
860 /* now map it to userspace */
861 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
862 mutex_unlock(&buffer->lock);
863 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800864
865 if (ret)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800866 pr_err("%s: failure mapping buffer to userspace\n",
867 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800868
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800869 return ret;
870}
871
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800872static void ion_dma_buf_release(struct dma_buf *dmabuf)
873{
874 struct ion_buffer *buffer = dmabuf->priv;
875 ion_buffer_put(buffer);
876}
877
878static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
879{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800880 struct ion_buffer *buffer = dmabuf->priv;
Greg Hackmann12edf532013-12-13 14:24:00 -0800881 return buffer->vaddr + offset * PAGE_SIZE;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800882}
883
884static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
885 void *ptr)
886{
887 return;
888}
889
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800890static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
891 size_t len,
892 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800893{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800894 struct ion_buffer *buffer = dmabuf->priv;
895 void *vaddr;
896
897 if (!buffer->heap->ops->map_kernel) {
898 pr_err("%s: map kernel is not implemented by this heap.\n",
899 __func__);
900 return -ENODEV;
901 }
902
903 mutex_lock(&buffer->lock);
904 vaddr = ion_buffer_kmap_get(buffer);
905 mutex_unlock(&buffer->lock);
906 if (IS_ERR(vaddr))
907 return PTR_ERR(vaddr);
908 if (!vaddr)
909 return -ENOMEM;
910 return 0;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800911}
912
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800913static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
914 size_t len,
915 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800916{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800917 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800918
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800919 mutex_lock(&buffer->lock);
920 ion_buffer_kmap_put(buffer);
921 mutex_unlock(&buffer->lock);
922}
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800923
924struct dma_buf_ops dma_buf_ops = {
925 .map_dma_buf = ion_map_dma_buf,
926 .unmap_dma_buf = ion_unmap_dma_buf,
927 .mmap = ion_mmap,
928 .release = ion_dma_buf_release,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800929 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
930 .end_cpu_access = ion_dma_buf_end_cpu_access,
931 .kmap_atomic = ion_dma_buf_kmap,
932 .kunmap_atomic = ion_dma_buf_kunmap,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800933 .kmap = ion_dma_buf_kmap,
934 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800935};
936
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800937int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800938{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800939 struct ion_buffer *buffer;
940 struct dma_buf *dmabuf;
941 bool valid_handle;
942 int fd;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800943
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800944 mutex_lock(&client->lock);
945 valid_handle = ion_handle_validate(client, handle);
946 mutex_unlock(&client->lock);
947 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -0800948 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800949 return -EINVAL;
950 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800951
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800952 buffer = handle->buffer;
953 ion_buffer_get(buffer);
954 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
955 if (IS_ERR(dmabuf)) {
956 ion_buffer_put(buffer);
957 return PTR_ERR(dmabuf);
958 }
959 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbott55808b82013-12-13 14:23:57 -0800960 if (fd < 0)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800961 dma_buf_put(dmabuf);
Laura Abbott55808b82013-12-13 14:23:57 -0800962
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800963 return fd;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800964}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800965EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800966
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800967struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
968{
969 struct dma_buf *dmabuf;
970 struct ion_buffer *buffer;
971 struct ion_handle *handle;
972
973 dmabuf = dma_buf_get(fd);
974 if (IS_ERR_OR_NULL(dmabuf))
975 return ERR_PTR(PTR_ERR(dmabuf));
976 /* if this memory came from ion */
977
978 if (dmabuf->ops != &dma_buf_ops) {
979 pr_err("%s: can not import dmabuf from another exporter\n",
980 __func__);
981 dma_buf_put(dmabuf);
982 return ERR_PTR(-EINVAL);
983 }
984 buffer = dmabuf->priv;
985
986 mutex_lock(&client->lock);
987 /* if a handle exists for this buffer just take a reference to it */
988 handle = ion_handle_lookup(client, buffer);
989 if (!IS_ERR_OR_NULL(handle)) {
990 ion_handle_get(handle);
991 goto end;
992 }
993 handle = ion_handle_create(client, buffer);
994 if (IS_ERR_OR_NULL(handle))
995 goto end;
996 ion_handle_add(client, handle);
997end:
998 mutex_unlock(&client->lock);
999 dma_buf_put(dmabuf);
1000 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001001}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001002EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001003
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001004static int ion_sync_for_device(struct ion_client *client, int fd)
1005{
1006 struct dma_buf *dmabuf;
1007 struct ion_buffer *buffer;
1008
1009 dmabuf = dma_buf_get(fd);
1010 if (IS_ERR_OR_NULL(dmabuf))
1011 return PTR_ERR(dmabuf);
1012
1013 /* if this memory came from ion */
1014 if (dmabuf->ops != &dma_buf_ops) {
1015 pr_err("%s: can not sync dmabuf from another exporter\n",
1016 __func__);
1017 dma_buf_put(dmabuf);
1018 return -EINVAL;
1019 }
1020 buffer = dmabuf->priv;
1021 ion_buffer_sync_for_device(buffer, NULL, DMA_BIDIRECTIONAL);
1022 dma_buf_put(dmabuf);
1023 return 0;
1024}
1025
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001026static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1027{
1028 struct ion_client *client = filp->private_data;
1029
1030 switch (cmd) {
1031 case ION_IOC_ALLOC:
1032 {
1033 struct ion_allocation_data data;
1034
1035 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1036 return -EFAULT;
1037 data.handle = ion_alloc(client, data.len, data.align,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001038 data.heap_mask, data.flags);
KyongHo Cho54ac07842013-12-13 14:23:39 -08001039
1040 if (IS_ERR(data.handle))
1041 return PTR_ERR(data.handle);
1042
1043 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1044 ion_free(client, data.handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001045 return -EFAULT;
KyongHo Cho54ac07842013-12-13 14:23:39 -08001046 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001047 break;
1048 }
1049 case ION_IOC_FREE:
1050 {
1051 struct ion_handle_data data;
1052 bool valid;
1053
1054 if (copy_from_user(&data, (void __user *)arg,
1055 sizeof(struct ion_handle_data)))
1056 return -EFAULT;
1057 mutex_lock(&client->lock);
1058 valid = ion_handle_validate(client, data.handle);
1059 mutex_unlock(&client->lock);
1060 if (!valid)
1061 return -EINVAL;
1062 ion_free(client, data.handle);
1063 break;
1064 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001065 case ION_IOC_SHARE:
1066 {
1067 struct ion_fd_data data;
1068
1069 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1070 return -EFAULT;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001071 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001072 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1073 return -EFAULT;
Olav Haugana1c6b992013-12-13 14:23:56 -08001074 if (data.fd < 0)
1075 return data.fd;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001076 break;
1077 }
1078 case ION_IOC_IMPORT:
1079 {
1080 struct ion_fd_data data;
Olav Haugana1c6b992013-12-13 14:23:56 -08001081 int ret = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001082 if (copy_from_user(&data, (void __user *)arg,
1083 sizeof(struct ion_fd_data)))
1084 return -EFAULT;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001085 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugana1c6b992013-12-13 14:23:56 -08001086 if (IS_ERR(data.handle)) {
1087 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001088 data.handle = NULL;
Olav Haugana1c6b992013-12-13 14:23:56 -08001089 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001090 if (copy_to_user((void __user *)arg, &data,
1091 sizeof(struct ion_fd_data)))
1092 return -EFAULT;
Olav Haugana1c6b992013-12-13 14:23:56 -08001093 if (ret < 0)
1094 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001095 break;
1096 }
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001097 case ION_IOC_SYNC:
1098 {
1099 struct ion_fd_data data;
1100 if (copy_from_user(&data, (void __user *)arg,
1101 sizeof(struct ion_fd_data)))
1102 return -EFAULT;
1103 ion_sync_for_device(client, data.fd);
1104 break;
1105 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001106 case ION_IOC_CUSTOM:
1107 {
1108 struct ion_device *dev = client->dev;
1109 struct ion_custom_data data;
1110
1111 if (!dev->custom_ioctl)
1112 return -ENOTTY;
1113 if (copy_from_user(&data, (void __user *)arg,
1114 sizeof(struct ion_custom_data)))
1115 return -EFAULT;
1116 return dev->custom_ioctl(client, data.cmd, data.arg);
1117 }
1118 default:
1119 return -ENOTTY;
1120 }
1121 return 0;
1122}
1123
1124static int ion_release(struct inode *inode, struct file *file)
1125{
1126 struct ion_client *client = file->private_data;
1127
1128 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001129 ion_client_destroy(client);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001130 return 0;
1131}
1132
1133static int ion_open(struct inode *inode, struct file *file)
1134{
1135 struct miscdevice *miscdev = file->private_data;
1136 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1137 struct ion_client *client;
1138
1139 pr_debug("%s: %d\n", __func__, __LINE__);
1140 client = ion_client_create(dev, -1, "user");
1141 if (IS_ERR_OR_NULL(client))
1142 return PTR_ERR(client);
1143 file->private_data = client;
1144
1145 return 0;
1146}
1147
1148static const struct file_operations ion_fops = {
1149 .owner = THIS_MODULE,
1150 .open = ion_open,
1151 .release = ion_release,
1152 .unlocked_ioctl = ion_ioctl,
1153};
1154
1155static size_t ion_debug_heap_total(struct ion_client *client,
1156 enum ion_heap_type type)
1157{
1158 size_t size = 0;
1159 struct rb_node *n;
1160
1161 mutex_lock(&client->lock);
1162 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1163 struct ion_handle *handle = rb_entry(n,
1164 struct ion_handle,
1165 node);
1166 if (handle->buffer->heap->type == type)
1167 size += handle->buffer->size;
1168 }
1169 mutex_unlock(&client->lock);
1170 return size;
1171}
1172
1173static int ion_debug_heap_show(struct seq_file *s, void *unused)
1174{
1175 struct ion_heap *heap = s->private;
1176 struct ion_device *dev = heap->dev;
1177 struct rb_node *n;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001178 size_t total_size = 0;
1179 size_t total_orphaned_size = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001180
1181 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001182 seq_printf(s, "----------------------------------------------------\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001183
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001184 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001185 struct ion_client *client = rb_entry(n, struct ion_client,
1186 node);
1187 size_t size = ion_debug_heap_total(client, heap->type);
1188 if (!size)
1189 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001190 if (client->task) {
1191 char task_comm[TASK_COMM_LEN];
1192
1193 get_task_comm(task_comm, client->task);
1194 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1195 client->pid, size);
1196 } else {
1197 seq_printf(s, "%16.s %16u %16u\n", client->name,
1198 client->pid, size);
1199 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001200 }
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001201 seq_printf(s, "----------------------------------------------------\n");
1202 seq_printf(s, "orphaned allocations (info is from last known client):"
1203 "\n");
1204 mutex_lock(&dev->lock);
1205 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1206 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1207 node);
1208 if (buffer->heap->type == heap->type)
1209 total_size += buffer->size;
1210 if (!buffer->handle_count) {
1211 seq_printf(s, "%16.s %16u %16u\n", buffer->task_comm,
1212 buffer->pid, buffer->size);
1213 total_orphaned_size += buffer->size;
1214 }
1215 }
1216 mutex_unlock(&dev->lock);
1217 seq_printf(s, "----------------------------------------------------\n");
1218 seq_printf(s, "%16.s %16u\n", "total orphaned",
1219 total_orphaned_size);
1220 seq_printf(s, "%16.s %16u\n", "total ", total_size);
1221
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001222 return 0;
1223}
1224
1225static int ion_debug_heap_open(struct inode *inode, struct file *file)
1226{
1227 return single_open(file, ion_debug_heap_show, inode->i_private);
1228}
1229
1230static const struct file_operations debug_heap_fops = {
1231 .open = ion_debug_heap_open,
1232 .read = seq_read,
1233 .llseek = seq_lseek,
1234 .release = single_release,
1235};
1236
1237void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1238{
1239 struct rb_node **p = &dev->heaps.rb_node;
1240 struct rb_node *parent = NULL;
1241 struct ion_heap *entry;
1242
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -08001243 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1244 !heap->ops->unmap_dma)
1245 pr_err("%s: can not add heap with invalid ops struct.\n",
1246 __func__);
1247
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001248 heap->dev = dev;
1249 mutex_lock(&dev->lock);
1250 while (*p) {
1251 parent = *p;
1252 entry = rb_entry(parent, struct ion_heap, node);
1253
1254 if (heap->id < entry->id) {
1255 p = &(*p)->rb_left;
1256 } else if (heap->id > entry->id ) {
1257 p = &(*p)->rb_right;
1258 } else {
1259 pr_err("%s: can not insert multiple heaps with "
1260 "id %d\n", __func__, heap->id);
1261 goto end;
1262 }
1263 }
1264
1265 rb_link_node(&heap->node, parent, p);
1266 rb_insert_color(&heap->node, &dev->heaps);
1267 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1268 &debug_heap_fops);
1269end:
1270 mutex_unlock(&dev->lock);
1271}
1272
1273struct ion_device *ion_device_create(long (*custom_ioctl)
1274 (struct ion_client *client,
1275 unsigned int cmd,
1276 unsigned long arg))
1277{
1278 struct ion_device *idev;
1279 int ret;
1280
1281 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1282 if (!idev)
1283 return ERR_PTR(-ENOMEM);
1284
1285 idev->dev.minor = MISC_DYNAMIC_MINOR;
1286 idev->dev.name = "ion";
1287 idev->dev.fops = &ion_fops;
1288 idev->dev.parent = NULL;
1289 ret = misc_register(&idev->dev);
1290 if (ret) {
1291 pr_err("ion: failed to register misc device.\n");
1292 return ERR_PTR(ret);
1293 }
1294
1295 idev->debug_root = debugfs_create_dir("ion", NULL);
1296 if (IS_ERR_OR_NULL(idev->debug_root))
1297 pr_err("ion: failed to create debug files.\n");
1298
1299 idev->custom_ioctl = custom_ioctl;
1300 idev->buffers = RB_ROOT;
1301 mutex_init(&idev->lock);
1302 idev->heaps = RB_ROOT;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001303 idev->clients = RB_ROOT;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001304 return idev;
1305}
1306
1307void ion_device_destroy(struct ion_device *dev)
1308{
1309 misc_deregister(&dev->dev);
1310 /* XXX need to free the heaps and clients ? */
1311 kfree(dev);
1312}
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -08001313
1314void __init ion_reserve(struct ion_platform_data *data)
1315{
1316 int i, ret;
1317
1318 for (i = 0; i < data->nr; i++) {
1319 if (data->heaps[i].size == 0)
1320 continue;
1321 ret = memblock_reserve(data->heaps[i].base,
1322 data->heaps[i].size);
1323 if (ret)
1324 pr_err("memblock reserve of %x@%lx failed\n",
1325 data->heaps[i].size,
1326 data->heaps[i].base);
1327 }
1328}