blob: 4282f026cc75042acba84a3d1dacade5cfccae58 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -08002
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07003 * drivers/gpu/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
Liam Markcc2d4bd2013-01-16 10:14:40 -08006 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07007 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070020#include <linux/device.h>
21#include <linux/file.h>
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -080022#include <linux/freezer.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070023#include <linux/fs.h>
24#include <linux/anon_inodes.h>
25#include <linux/ion.h>
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -080026#include <linux/kthread.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080028#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070029#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070030#include <linux/mm.h>
31#include <linux/mm_types.h>
32#include <linux/rbtree.h>
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -080033#include <linux/rtmutex.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/seq_file.h>
37#include <linux/uaccess.h>
38#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080039#include <linux/dma-buf.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070040#include <linux/msm_ion.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080041#include <trace/events/kmem.h>
42
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070043
Laura Abbott8c017362011-09-22 20:59:12 -070044#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070045#include "ion_priv.h"
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070046
47/**
48 * struct ion_device - the metadata of the ion device node
49 * @dev: the actual misc device
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -070050 * @buffers: an rb tree of all the existing buffers
51 * @buffer_lock: lock protecting the tree of buffers
52 * @lock: rwsem protecting the tree of heaps and clients
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070053 * @heaps: list of all the heaps in the system
54 * @user_clients: list of all the clients created from userspace
55 */
56struct ion_device {
57 struct miscdevice dev;
58 struct rb_root buffers;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -070059 struct mutex buffer_lock;
60 struct rw_semaphore lock;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -080061 struct plist_head heaps;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070062 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
63 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080064 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070065 struct dentry *debug_root;
66};
67
68/**
69 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070070 * @node: node in the tree of all clients
71 * @dev: backpointer to ion device
72 * @handles: an rb tree of all the handles in this client
73 * @lock: lock protecting the tree of handles
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070074 * @name: used for debugging
75 * @task: used for debugging
76 *
77 * A client represents a list of buffers this client may access.
78 * The mutex stored here is used to protect both handles tree
79 * as well as the handles themselves, and should be held while modifying either.
80 */
81struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070082 struct rb_node node;
83 struct ion_device *dev;
84 struct rb_root handles;
85 struct mutex lock;
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -080086 unsigned int heap_type_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080087 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070088 struct task_struct *task;
89 pid_t pid;
90 struct dentry *debug_root;
91};
92
93/**
94 * ion_handle - a client local reference to a buffer
95 * @ref: reference count
96 * @client: back pointer to the client the buffer resides in
97 * @buffer: pointer to the buffer
98 * @node: node in the client's handle rbtree
99 * @kmap_cnt: count of times this client has mapped to kernel
100 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700101 *
102 * Modifications to node, map_cnt or mapping should be protected by the
103 * lock in the client. Other fields are never changed after initialization.
104 */
105struct ion_handle {
106 struct kref ref;
107 struct ion_client *client;
108 struct ion_buffer *buffer;
109 struct rb_node node;
110 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700111 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700112};
113
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700114bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
115{
116 return ((buffer->flags & ION_FLAG_CACHED) &&
117 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
118}
119
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700120bool ion_buffer_cached(struct ion_buffer *buffer)
121{
122 return !!(buffer->flags & ION_FLAG_CACHED);
123}
124
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700125/* this function should only be called while dev->lock is held */
126static void ion_buffer_add(struct ion_device *dev,
127 struct ion_buffer *buffer)
128{
129 struct rb_node **p = &dev->buffers.rb_node;
130 struct rb_node *parent = NULL;
131 struct ion_buffer *entry;
132
133 while (*p) {
134 parent = *p;
135 entry = rb_entry(parent, struct ion_buffer, node);
136
137 if (buffer < entry) {
138 p = &(*p)->rb_left;
139 } else if (buffer > entry) {
140 p = &(*p)->rb_right;
141 } else {
142 pr_err("%s: buffer already found.", __func__);
143 BUG();
144 }
145 }
146
147 rb_link_node(&buffer->node, parent, p);
148 rb_insert_color(&buffer->node, &dev->buffers);
149}
150
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700151static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
152
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800153static bool ion_heap_drain_freelist(struct ion_heap *heap);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700154/* this function should only be called while dev->lock is held */
155static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
156 struct ion_device *dev,
157 unsigned long len,
158 unsigned long align,
159 unsigned long flags)
160{
161 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800162 struct sg_table *table;
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700163 struct scatterlist *sg;
164 int i, ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700165
166 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
167 if (!buffer)
168 return ERR_PTR(-ENOMEM);
169
170 buffer->heap = heap;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700171 buffer->flags = flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700172 kref_init(&buffer->ref);
173
174 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800175
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700176 if (ret) {
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800177 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
178 goto err2;
179
180 ion_heap_drain_freelist(heap);
181 ret = heap->ops->allocate(heap, buffer, len, align,
182 flags);
183 if (ret)
184 goto err2;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700185 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800186
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700187 buffer->dev = dev;
188 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800189
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700190 table = heap->ops->map_dma(heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800191 if (IS_ERR_OR_NULL(table)) {
192 heap->ops->free(buffer);
193 kfree(buffer);
194 return ERR_PTR(PTR_ERR(table));
195 }
196 buffer->sg_table = table;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700197 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700198 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
199 i) {
200 if (sg_dma_len(sg) == PAGE_SIZE)
201 continue;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700202 pr_err("%s: cached mappings that will be faulted in "
203 "must have pagewise sg_lists\n", __func__);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700204 ret = -EINVAL;
205 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700206 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800207
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700208 ret = ion_buffer_alloc_dirty(buffer);
209 if (ret)
210 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700211 }
212
213 buffer->dev = dev;
214 buffer->size = len;
215 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700216 mutex_init(&buffer->lock);
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700217 /* this will set up dma addresses for the sglist -- it is not
218 technically correct as per the dma api -- a specific
219 device isn't really taking ownership here. However, in practice on
220 our systems the only dma_address space is physical addresses.
221 Additionally, we can't afford the overhead of invalidating every
222 allocation via dma_map_sg. The implicit contract here is that
223 memory comming from the heaps is ready for dma, ie if it has a
224 cached mapping that mapping has been invalidated */
Laura Abbott6c13b9822013-03-29 18:29:03 -0700225 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
226 if (sg_dma_address(sg) == 0)
227 sg_dma_address(sg) = sg_phys(sg);
228 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700229 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700230 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700231 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700232 return buffer;
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700233
234err:
235 heap->ops->unmap_dma(heap, buffer);
236 heap->ops->free(buffer);
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800237err2:
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700238 kfree(buffer);
239 return ERR_PTR(ret);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700240}
241
Laura Abbott93619302012-10-11 11:51:40 -0700242static void ion_delayed_unsecure(struct ion_buffer *buffer)
243{
244 if (buffer->heap->ops->unsecure_buffer)
245 buffer->heap->ops->unsecure_buffer(buffer, 1);
246}
247
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800248static void _ion_buffer_destroy(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700249{
Laura Abbottb14ed962012-01-30 14:18:08 -0800250 if (WARN_ON(buffer->kmap_cnt > 0))
251 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800252 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
253
Laura Abbott93619302012-10-11 11:51:40 -0700254 ion_delayed_unsecure(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700255 buffer->heap->ops->free(buffer);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700256 if (buffer->flags & ION_FLAG_CACHED)
257 kfree(buffer->dirty);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700258 kfree(buffer);
259}
260
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800261static void ion_buffer_destroy(struct kref *kref)
262{
263 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
264 struct ion_heap *heap = buffer->heap;
265 struct ion_device *dev = buffer->dev;
266
267 mutex_lock(&dev->buffer_lock);
268 rb_erase(&buffer->node, &dev->buffers);
269 mutex_unlock(&dev->buffer_lock);
270
271 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
272 rt_mutex_lock(&heap->lock);
273 list_add(&buffer->list, &heap->free_list);
274 rt_mutex_unlock(&heap->lock);
275 wake_up(&heap->waitqueue);
276 return;
277 }
278 _ion_buffer_destroy(buffer);
279}
280
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700281static void ion_buffer_get(struct ion_buffer *buffer)
282{
283 kref_get(&buffer->ref);
284}
285
286static int ion_buffer_put(struct ion_buffer *buffer)
287{
288 return kref_put(&buffer->ref, ion_buffer_destroy);
289}
290
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -0700291static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
292{
293 mutex_lock(&buffer->lock);
294 buffer->handle_count++;
295 mutex_unlock(&buffer->lock);
296}
297
298static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
299{
300 /*
301 * when a buffer is removed from a handle, if it is not in
302 * any other handles, copy the taskcomm and the pid of the
303 * process it's being removed from into the buffer. At this
304 * point there will be no way to track what processes this buffer is
305 * being used by, it only exists as a dma_buf file descriptor.
306 * The taskcomm and pid can provide a debug hint as to where this fd
307 * is in the system
308 */
309 mutex_lock(&buffer->lock);
310 buffer->handle_count--;
311 BUG_ON(buffer->handle_count < 0);
312 if (!buffer->handle_count) {
313 struct task_struct *task;
314
315 task = current->group_leader;
316 get_task_comm(buffer->task_comm, task);
317 buffer->pid = task_pid_nr(task);
318 }
319 mutex_unlock(&buffer->lock);
320}
321
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700322static struct ion_handle *ion_handle_create(struct ion_client *client,
323 struct ion_buffer *buffer)
324{
325 struct ion_handle *handle;
326
327 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
328 if (!handle)
329 return ERR_PTR(-ENOMEM);
330 kref_init(&handle->ref);
331 rb_init_node(&handle->node);
332 handle->client = client;
333 ion_buffer_get(buffer);
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -0700334 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700335 handle->buffer = buffer;
336
337 return handle;
338}
339
Laura Abbottb14ed962012-01-30 14:18:08 -0800340static void ion_handle_kmap_put(struct ion_handle *);
341
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700342static void ion_handle_destroy(struct kref *kref)
343{
344 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800345 struct ion_client *client = handle->client;
346 struct ion_buffer *buffer = handle->buffer;
347
Laura Abbottb14ed962012-01-30 14:18:08 -0800348 mutex_lock(&buffer->lock);
349 while (handle->kmap_cnt)
350 ion_handle_kmap_put(handle);
351 mutex_unlock(&buffer->lock);
352
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700353 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800354 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800355
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -0700356 ion_buffer_remove_from_handle(buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800357 ion_buffer_put(buffer);
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -0700358
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700359 kfree(handle);
360}
361
362struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
363{
364 return handle->buffer;
365}
366
367static void ion_handle_get(struct ion_handle *handle)
368{
369 kref_get(&handle->ref);
370}
371
372static int ion_handle_put(struct ion_handle *handle)
373{
374 return kref_put(&handle->ref, ion_handle_destroy);
375}
376
377static struct ion_handle *ion_handle_lookup(struct ion_client *client,
378 struct ion_buffer *buffer)
379{
380 struct rb_node *n;
381
382 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
383 struct ion_handle *handle = rb_entry(n, struct ion_handle,
384 node);
385 if (handle->buffer == buffer)
386 return handle;
387 }
388 return NULL;
389}
390
391static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
392{
393 struct rb_node *n = client->handles.rb_node;
394
395 while (n) {
396 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
397 node);
398 if (handle < handle_node)
399 n = n->rb_left;
400 else if (handle > handle_node)
401 n = n->rb_right;
402 else
403 return true;
404 }
405 return false;
406}
407
408static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
409{
410 struct rb_node **p = &client->handles.rb_node;
411 struct rb_node *parent = NULL;
412 struct ion_handle *entry;
413
414 while (*p) {
415 parent = *p;
416 entry = rb_entry(parent, struct ion_handle, node);
417
418 if (handle < entry)
419 p = &(*p)->rb_left;
420 else if (handle > entry)
421 p = &(*p)->rb_right;
422 else
423 WARN(1, "%s: buffer already found.", __func__);
424 }
425
426 rb_link_node(&handle->node, parent, p);
427 rb_insert_color(&handle->node, &client->handles);
428}
429
430struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800431 size_t align, unsigned int heap_id_mask,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700432 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700433{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700434 struct ion_handle *handle;
435 struct ion_device *dev = client->dev;
436 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -0800437 struct ion_heap *heap;
Adrian Alexei92538592013-03-27 10:53:43 -0700438 unsigned long secure_allocation = flags & ION_FLAG_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800439 const unsigned int MAX_DBG_STR_LEN = 64;
440 char dbg_str[MAX_DBG_STR_LEN];
441 unsigned int dbg_str_idx = 0;
442
443 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700444
Mitchel Humpherys485650f2013-03-15 16:06:07 -0700445 /*
446 * For now, we don't want to fault in pages individually since
447 * clients are already doing manual cache maintenance. In
448 * other words, the implicit caching infrastructure is in
449 * place (in code) but should not be used.
450 */
451 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
452
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800453 pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
454 len, align, heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700455 /*
456 * traverse the list of heaps available in this system in priority
457 * order. If the heap type is supported by the client, and matches the
458 * request of the caller allocate from it. Repeat until allocate has
459 * succeeded or all heaps have been tried
460 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800461 if (WARN_ON(!len))
462 return ERR_PTR(-EINVAL);
463
464 len = PAGE_ALIGN(len);
465
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700466 down_read(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -0800467 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800468 /* if the caller didn't specify this heap id */
469 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700470 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800471 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700472 if (secure_allocation &&
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800473 !ion_heap_allow_secure_allocation(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -0800474 continue;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800475 trace_ion_alloc_buffer_start(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800476 heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700477 buffer = ion_buffer_create(heap, dev, len, align, flags);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800478 trace_ion_alloc_buffer_end(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800479 heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700480 if (!IS_ERR_OR_NULL(buffer))
481 break;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800482
483 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800484 heap_id_mask, flags,
485 PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800486 if (dbg_str_idx < MAX_DBG_STR_LEN) {
487 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
488 int ret_value = snprintf(&dbg_str[dbg_str_idx],
489 len_left, "%s ", heap->name);
490 if (ret_value >= len_left) {
491 /* overflow */
492 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
493 dbg_str_idx = MAX_DBG_STR_LEN;
494 } else if (ret_value >= 0) {
495 dbg_str_idx += ret_value;
496 } else {
497 /* error */
498 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
499 }
500 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700501 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700502 up_read(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700503
Liam Markcc2d4bd2013-01-16 10:14:40 -0800504 if (buffer == NULL) {
505 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800506 heap_id_mask, flags, -ENODEV);
Laura Abbottb14ed962012-01-30 14:18:08 -0800507 return ERR_PTR(-ENODEV);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800508 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800509
510 if (IS_ERR(buffer)) {
Liam Markcc2d4bd2013-01-16 10:14:40 -0800511 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800512 heap_id_mask, flags,
513 PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800514 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800515 "0x%x) from heap(s) %sfor client %s\n",
516 len, align, dbg_str, client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700517 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800518 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700519
520 handle = ion_handle_create(client, buffer);
521
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700522 /*
523 * ion_buffer_create will create a buffer with a ref_cnt of 1,
524 * and ion_handle_create will take a second reference, drop one here
525 */
526 ion_buffer_put(buffer);
527
Laura Abbottb14ed962012-01-30 14:18:08 -0800528 if (!IS_ERR(handle)) {
529 mutex_lock(&client->lock);
530 ion_handle_add(client, handle);
531 mutex_unlock(&client->lock);
532 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700533
Laura Abbottb14ed962012-01-30 14:18:08 -0800534
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700535 return handle;
536}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800537EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700538
539void ion_free(struct ion_client *client, struct ion_handle *handle)
540{
541 bool valid_handle;
542
543 BUG_ON(client != handle->client);
544
545 mutex_lock(&client->lock);
546 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700547 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800548 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700549 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700550 return;
551 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800552 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700553 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700554}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800555EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700556
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700557int ion_phys(struct ion_client *client, struct ion_handle *handle,
558 ion_phys_addr_t *addr, size_t *len)
559{
560 struct ion_buffer *buffer;
561 int ret;
562
563 mutex_lock(&client->lock);
564 if (!ion_handle_validate(client, handle)) {
565 mutex_unlock(&client->lock);
566 return -EINVAL;
567 }
568
569 buffer = handle->buffer;
570
571 if (!buffer->heap->ops->phys) {
572 pr_err("%s: ion_phys is not implemented by this heap.\n",
573 __func__);
574 mutex_unlock(&client->lock);
575 return -ENODEV;
576 }
577 mutex_unlock(&client->lock);
578 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
579 return ret;
580}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800581EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700582
Laura Abbottb14ed962012-01-30 14:18:08 -0800583static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700584{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700585 void *vaddr;
586
Laura Abbottb14ed962012-01-30 14:18:08 -0800587 if (buffer->kmap_cnt) {
588 buffer->kmap_cnt++;
589 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700590 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800591 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
592 if (IS_ERR_OR_NULL(vaddr))
593 return vaddr;
594 buffer->vaddr = vaddr;
595 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700596 return vaddr;
597}
Laura Abbottb14ed962012-01-30 14:18:08 -0800598
599static void *ion_handle_kmap_get(struct ion_handle *handle)
600{
601 struct ion_buffer *buffer = handle->buffer;
602 void *vaddr;
603
604 if (handle->kmap_cnt) {
605 handle->kmap_cnt++;
606 return buffer->vaddr;
607 }
608 vaddr = ion_buffer_kmap_get(buffer);
609 if (IS_ERR_OR_NULL(vaddr))
610 return vaddr;
611 handle->kmap_cnt++;
612 return vaddr;
613}
614
615static void ion_buffer_kmap_put(struct ion_buffer *buffer)
616{
617 buffer->kmap_cnt--;
618 if (!buffer->kmap_cnt) {
619 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
620 buffer->vaddr = NULL;
621 }
622}
623
624static void ion_handle_kmap_put(struct ion_handle *handle)
625{
626 struct ion_buffer *buffer = handle->buffer;
627
628 handle->kmap_cnt--;
629 if (!handle->kmap_cnt)
630 ion_buffer_kmap_put(buffer);
631}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700632
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700633void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700634{
635 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800636 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700637
638 mutex_lock(&client->lock);
639 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800640 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700641 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700642 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700643 return ERR_PTR(-EINVAL);
644 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700645
Laura Abbottb14ed962012-01-30 14:18:08 -0800646 buffer = handle->buffer;
647
648 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700649 pr_err("%s: map_kernel is not implemented by this heap.\n",
650 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700651 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700652 return ERR_PTR(-ENODEV);
653 }
Laura Abbott894fd582011-08-19 13:33:56 -0700654
Laura Abbottb14ed962012-01-30 14:18:08 -0800655 mutex_lock(&buffer->lock);
656 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700657 mutex_unlock(&buffer->lock);
658 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800659 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700660}
Olav Hauganbd453a92012-07-05 14:21:34 -0700661EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700662
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700663void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
664{
665 struct ion_buffer *buffer;
666
667 mutex_lock(&client->lock);
668 buffer = handle->buffer;
669 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800670 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700671 mutex_unlock(&buffer->lock);
672 mutex_unlock(&client->lock);
673}
Olav Hauganbd453a92012-07-05 14:21:34 -0700674EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700675
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700676static int ion_debug_client_show(struct seq_file *s, void *unused)
677{
678 struct ion_client *client = s->private;
679 struct rb_node *n;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700680
Olav Haugan854c9e12012-05-16 16:34:28 -0700681 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
682 "heap_name", "size_in_bytes", "handle refcount",
683 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700684
685 mutex_lock(&client->lock);
686 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
687 struct ion_handle *handle = rb_entry(n, struct ion_handle,
688 node);
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800689
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700690 enum ion_heap_type type = handle->buffer->heap->type;
691
Olav Haugan854c9e12012-05-16 16:34:28 -0700692 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700693 handle->buffer->heap->name,
694 handle->buffer->size,
695 atomic_read(&handle->ref.refcount),
696 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700697
698 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
699 type == ION_HEAP_TYPE_CARVEOUT ||
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700700 type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
Laura Abbott1135c9e2013-03-13 15:33:40 -0700701 seq_printf(s, " : %12pa", &handle->buffer->priv_phys);
Olav Haugan854c9e12012-05-16 16:34:28 -0700702 else
703 seq_printf(s, " : %12s", "N/A");
704
Olav Haugan854c9e12012-05-16 16:34:28 -0700705 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700706 }
707 mutex_unlock(&client->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700708 return 0;
709}
710
711static int ion_debug_client_open(struct inode *inode, struct file *file)
712{
713 return single_open(file, ion_debug_client_show, inode->i_private);
714}
715
716static const struct file_operations debug_client_fops = {
717 .open = ion_debug_client_open,
718 .read = seq_read,
719 .llseek = seq_lseek,
720 .release = single_release,
721};
722
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700723struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700724 const char *name)
725{
726 struct ion_client *client;
727 struct task_struct *task;
728 struct rb_node **p;
729 struct rb_node *parent = NULL;
730 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700731 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700732 unsigned int name_len;
733
734 if (!name) {
735 pr_err("%s: Name cannot be null\n", __func__);
736 return ERR_PTR(-EINVAL);
737 }
738 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700739
740 get_task_struct(current->group_leader);
741 task_lock(current->group_leader);
742 pid = task_pid_nr(current->group_leader);
743 /* don't bother to store task struct for kernel threads,
744 they can't be killed anyway */
745 if (current->group_leader->flags & PF_KTHREAD) {
746 put_task_struct(current->group_leader);
747 task = NULL;
748 } else {
749 task = current->group_leader;
750 }
751 task_unlock(current->group_leader);
752
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700753 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
754 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800755 if (task)
756 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700757 return ERR_PTR(-ENOMEM);
758 }
759
760 client->dev = dev;
761 client->handles = RB_ROOT;
762 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800763
Olav Haugan6625c7d12012-01-24 13:50:43 -0800764 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800765 if (!client->name) {
766 put_task_struct(current->group_leader);
767 kfree(client);
768 return ERR_PTR(-ENOMEM);
769 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -0800770 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800771 }
772
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700773 client->task = task;
774 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700775
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700776 down_write(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800777 p = &dev->clients.rb_node;
778 while (*p) {
779 parent = *p;
780 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700781
Laura Abbottb14ed962012-01-30 14:18:08 -0800782 if (client < entry)
783 p = &(*p)->rb_left;
784 else if (client > entry)
785 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700786 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800787 rb_link_node(&client->node, parent, p);
788 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700789
Laura Abbotteed86032011-12-05 15:32:36 -0800790
791 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700792 dev->debug_root, client,
793 &debug_client_fops);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700794 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700795
796 return client;
797}
Johan Mossberg73080182012-12-10 17:46:16 +0100798EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700799
Laura Abbottb14ed962012-01-30 14:18:08 -0800800void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700801{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700802 struct ion_device *dev = client->dev;
803 struct rb_node *n;
804
805 pr_debug("%s: %d\n", __func__, __LINE__);
806 while ((n = rb_first(&client->handles))) {
807 struct ion_handle *handle = rb_entry(n, struct ion_handle,
808 node);
809 ion_handle_destroy(&handle->ref);
810 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700811 down_write(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800812 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700813 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -0800814 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700815 debugfs_remove_recursive(client->debug_root);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800816
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700817 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700818
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800819 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700820 kfree(client);
821}
Olav Hauganbd453a92012-07-05 14:21:34 -0700822EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700823
Laura Abbott273dd8e2011-10-12 14:26:33 -0700824int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
825 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700826{
827 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700828
829 mutex_lock(&client->lock);
830 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -0700831 pr_err("%s: invalid handle passed to %s.\n",
832 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700833 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800834 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700835 }
Laura Abbott273dd8e2011-10-12 14:26:33 -0700836 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700837 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700838 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700839 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700840 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800841
Laura Abbott273dd8e2011-10-12 14:26:33 -0700842 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700843}
Laura Abbott273dd8e2011-10-12 14:26:33 -0700844EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700845
Laura Abbott8c017362011-09-22 20:59:12 -0700846int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
847 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800848{
Laura Abbott8c017362011-09-22 20:59:12 -0700849 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800850
Laura Abbott8c017362011-09-22 20:59:12 -0700851 mutex_lock(&client->lock);
852 if (!ion_handle_validate(client, handle)) {
853 pr_err("%s: invalid handle passed to %s.\n",
854 __func__, __func__);
855 mutex_unlock(&client->lock);
856 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700857 }
Laura Abbott8c017362011-09-22 20:59:12 -0700858 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700859 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700860 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700861 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700862 mutex_unlock(&client->lock);
863
864 return 0;
865}
866EXPORT_SYMBOL(ion_handle_get_size);
867
Laura Abbottb14ed962012-01-30 14:18:08 -0800868struct sg_table *ion_sg_table(struct ion_client *client,
869 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700870{
Laura Abbottb14ed962012-01-30 14:18:08 -0800871 struct ion_buffer *buffer;
872 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700873
Laura Abbottb14ed962012-01-30 14:18:08 -0800874 mutex_lock(&client->lock);
875 if (!ion_handle_validate(client, handle)) {
876 pr_err("%s: invalid handle passed to map_dma.\n",
877 __func__);
878 mutex_unlock(&client->lock);
879 return ERR_PTR(-EINVAL);
880 }
881 buffer = handle->buffer;
882 table = buffer->sg_table;
883 mutex_unlock(&client->lock);
884 return table;
885}
Olav Hauganbd453a92012-07-05 14:21:34 -0700886EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -0800887
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800888struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
889 size_t chunk_size, size_t total_size)
890{
891 struct sg_table *table;
892 int i, n_chunks, ret;
893 struct scatterlist *sg;
894
895 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
896 if (!table)
897 return ERR_PTR(-ENOMEM);
898
899 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
900 pr_debug("creating sg_table with %d chunks\n", n_chunks);
901
902 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
903 if (ret)
904 goto err0;
905
906 for_each_sg(table->sgl, sg, table->nents, i) {
907 dma_addr_t addr = buffer_base + i * chunk_size;
908 sg_dma_address(sg) = addr;
Olav Hauganbbdc30a2013-03-30 06:48:35 -0700909 sg_dma_len(sg) = chunk_size;
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800910 }
911
912 return table;
913err0:
914 kfree(table);
915 return ERR_PTR(ret);
916}
917
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700918static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
919 struct device *dev,
920 enum dma_data_direction direction);
921
Laura Abbottb14ed962012-01-30 14:18:08 -0800922static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
923 enum dma_data_direction direction)
924{
925 struct dma_buf *dmabuf = attachment->dmabuf;
926 struct ion_buffer *buffer = dmabuf->priv;
927
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700928 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbottb14ed962012-01-30 14:18:08 -0800929 return buffer->sg_table;
930}
931
932static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
933 struct sg_table *table,
934 enum dma_data_direction direction)
935{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800936}
937
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700938static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
939{
940 unsigned long pages = buffer->sg_table->nents;
941 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
942
943 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
944 if (!buffer->dirty)
945 return -ENOMEM;
946 return 0;
947}
948
949struct ion_vma_list {
950 struct list_head list;
951 struct vm_area_struct *vma;
952};
953
954static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
955 struct device *dev,
956 enum dma_data_direction dir)
957{
958 struct scatterlist *sg;
959 int i;
960 struct ion_vma_list *vma_list;
961
962 pr_debug("%s: syncing for device %s\n", __func__,
963 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700964
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700965 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700966 return;
967
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700968 mutex_lock(&buffer->lock);
969 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
970 if (!test_bit(i, buffer->dirty))
971 continue;
972 dma_sync_sg_for_device(dev, sg, 1, dir);
973 clear_bit(i, buffer->dirty);
974 }
975 list_for_each_entry(vma_list, &buffer->vmas, list) {
976 struct vm_area_struct *vma = vma_list->vma;
977
978 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
979 NULL);
980 }
981 mutex_unlock(&buffer->lock);
982}
983
984int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700985{
Laura Abbottb14ed962012-01-30 14:18:08 -0800986 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700987 struct scatterlist *sg;
988 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700989
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700990 mutex_lock(&buffer->lock);
991 set_bit(vmf->pgoff, buffer->dirty);
992
993 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
994 if (i != vmf->pgoff)
995 continue;
996 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
997 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
998 sg_page(sg));
999 break;
1000 }
1001 mutex_unlock(&buffer->lock);
1002 return VM_FAULT_NOPAGE;
1003}
1004
1005static void ion_vm_open(struct vm_area_struct *vma)
1006{
1007 struct ion_buffer *buffer = vma->vm_private_data;
1008 struct ion_vma_list *vma_list;
1009
1010 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1011 if (!vma_list)
1012 return;
1013 vma_list->vma = vma;
1014 mutex_lock(&buffer->lock);
1015 list_add(&vma_list->list, &buffer->vmas);
1016 mutex_unlock(&buffer->lock);
1017 pr_debug("%s: adding %p\n", __func__, vma);
1018}
1019
1020static void ion_vm_close(struct vm_area_struct *vma)
1021{
1022 struct ion_buffer *buffer = vma->vm_private_data;
1023 struct ion_vma_list *vma_list, *tmp;
1024
1025 pr_debug("%s\n", __func__);
1026 mutex_lock(&buffer->lock);
1027 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1028 if (vma_list->vma != vma)
1029 continue;
1030 list_del(&vma_list->list);
1031 kfree(vma_list);
1032 pr_debug("%s: deleting %p\n", __func__, vma);
1033 break;
1034 }
1035 mutex_unlock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001036
Laura Abbotta6835092011-11-14 15:27:02 -08001037 if (buffer->heap->ops->unmap_user)
1038 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001039}
1040
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001041struct vm_operations_struct ion_vma_ops = {
1042 .open = ion_vm_open,
1043 .close = ion_vm_close,
1044 .fault = ion_vm_fault,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001045};
1046
Laura Abbottb14ed962012-01-30 14:18:08 -08001047static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001048{
Laura Abbottb14ed962012-01-30 14:18:08 -08001049 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001050 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001051
Laura Abbottb14ed962012-01-30 14:18:08 -08001052 if (!buffer->heap->ops->map_user) {
1053 pr_err("%s: this heap does not define a method for mapping "
1054 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001055 return -EINVAL;
1056 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001057
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001058 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001059 vma->vm_private_data = buffer;
1060 vma->vm_ops = &ion_vma_ops;
Mitchel Humpherys7ce0fe42013-01-10 11:30:26 -08001061 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001062 ion_vm_open(vma);
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001063 return 0;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001064 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001065
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001066 if (!(buffer->flags & ION_FLAG_CACHED))
1067 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1068
1069 mutex_lock(&buffer->lock);
1070 /* now map it to userspace */
1071 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1072 mutex_unlock(&buffer->lock);
1073
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001074 if (ret)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001075 pr_err("%s: failure mapping buffer to userspace\n",
1076 __func__);
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001077
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001078 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001079}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001080
Laura Abbottb14ed962012-01-30 14:18:08 -08001081static void ion_dma_buf_release(struct dma_buf *dmabuf)
1082{
1083 struct ion_buffer *buffer = dmabuf->priv;
1084 ion_buffer_put(buffer);
1085}
1086
1087static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1088{
1089 struct ion_buffer *buffer = dmabuf->priv;
Greg Hackmannd1b0c6c2012-08-22 17:38:04 -07001090 return buffer->vaddr + offset * PAGE_SIZE;
Laura Abbottb14ed962012-01-30 14:18:08 -08001091}
1092
1093static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1094 void *ptr)
1095{
1096 return;
1097}
1098
1099static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1100 size_t len,
1101 enum dma_data_direction direction)
1102{
1103 struct ion_buffer *buffer = dmabuf->priv;
1104 void *vaddr;
1105
1106 if (!buffer->heap->ops->map_kernel) {
1107 pr_err("%s: map kernel is not implemented by this heap.\n",
1108 __func__);
1109 return -ENODEV;
1110 }
1111
1112 mutex_lock(&buffer->lock);
1113 vaddr = ion_buffer_kmap_get(buffer);
1114 mutex_unlock(&buffer->lock);
1115 if (IS_ERR(vaddr))
1116 return PTR_ERR(vaddr);
1117 if (!vaddr)
1118 return -ENOMEM;
1119 return 0;
1120}
1121
1122static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1123 size_t len,
1124 enum dma_data_direction direction)
1125{
1126 struct ion_buffer *buffer = dmabuf->priv;
1127
1128 mutex_lock(&buffer->lock);
1129 ion_buffer_kmap_put(buffer);
1130 mutex_unlock(&buffer->lock);
1131}
1132
1133struct dma_buf_ops dma_buf_ops = {
1134 .map_dma_buf = ion_map_dma_buf,
1135 .unmap_dma_buf = ion_unmap_dma_buf,
1136 .mmap = ion_mmap,
1137 .release = ion_dma_buf_release,
1138 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1139 .end_cpu_access = ion_dma_buf_end_cpu_access,
1140 .kmap_atomic = ion_dma_buf_kmap,
1141 .kunmap_atomic = ion_dma_buf_kunmap,
1142 .kmap = ion_dma_buf_kmap,
1143 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001144};
1145
Johan Mossberg748c11d2013-01-11 13:38:13 +01001146struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1147 struct ion_handle *handle)
Laura Abbottb14ed962012-01-30 14:18:08 -08001148{
1149 struct ion_buffer *buffer;
1150 struct dma_buf *dmabuf;
1151 bool valid_handle;
Laura Abbottb14ed962012-01-30 14:18:08 -08001152
1153 mutex_lock(&client->lock);
1154 valid_handle = ion_handle_validate(client, handle);
1155 mutex_unlock(&client->lock);
1156 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001157 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Johan Mossberg748c11d2013-01-11 13:38:13 +01001158 return ERR_PTR(-EINVAL);
Laura Abbottb14ed962012-01-30 14:18:08 -08001159 }
1160
1161 buffer = handle->buffer;
1162 ion_buffer_get(buffer);
1163 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1164 if (IS_ERR(dmabuf)) {
1165 ion_buffer_put(buffer);
Johan Mossberg748c11d2013-01-11 13:38:13 +01001166 return dmabuf;
Laura Abbottb14ed962012-01-30 14:18:08 -08001167 }
Johan Mossberg748c11d2013-01-11 13:38:13 +01001168
1169 return dmabuf;
1170}
1171EXPORT_SYMBOL(ion_share_dma_buf);
1172
1173int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1174{
1175 struct dma_buf *dmabuf;
1176 int fd;
1177
1178 dmabuf = ion_share_dma_buf(client, handle);
1179 if (IS_ERR(dmabuf))
1180 return PTR_ERR(dmabuf);
1181
Laura Abbottb14ed962012-01-30 14:18:08 -08001182 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001183 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001184 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001185
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001186 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001187}
Johan Mossberg748c11d2013-01-11 13:38:13 +01001188EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001189
Laura Abbottb14ed962012-01-30 14:18:08 -08001190struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1191{
1192 struct dma_buf *dmabuf;
1193 struct ion_buffer *buffer;
1194 struct ion_handle *handle;
1195
1196 dmabuf = dma_buf_get(fd);
1197 if (IS_ERR_OR_NULL(dmabuf))
1198 return ERR_PTR(PTR_ERR(dmabuf));
1199 /* if this memory came from ion */
1200
1201 if (dmabuf->ops != &dma_buf_ops) {
1202 pr_err("%s: can not import dmabuf from another exporter\n",
1203 __func__);
1204 dma_buf_put(dmabuf);
1205 return ERR_PTR(-EINVAL);
1206 }
1207 buffer = dmabuf->priv;
1208
1209 mutex_lock(&client->lock);
1210 /* if a handle exists for this buffer just take a reference to it */
1211 handle = ion_handle_lookup(client, buffer);
1212 if (!IS_ERR_OR_NULL(handle)) {
1213 ion_handle_get(handle);
1214 goto end;
1215 }
1216 handle = ion_handle_create(client, buffer);
1217 if (IS_ERR_OR_NULL(handle))
1218 goto end;
1219 ion_handle_add(client, handle);
1220end:
1221 mutex_unlock(&client->lock);
1222 dma_buf_put(dmabuf);
1223 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001224}
Olav Hauganbd453a92012-07-05 14:21:34 -07001225EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001226
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001227static int ion_sync_for_device(struct ion_client *client, int fd)
1228{
1229 struct dma_buf *dmabuf;
1230 struct ion_buffer *buffer;
1231
1232 dmabuf = dma_buf_get(fd);
1233 if (IS_ERR_OR_NULL(dmabuf))
1234 return PTR_ERR(dmabuf);
1235
1236 /* if this memory came from ion */
1237 if (dmabuf->ops != &dma_buf_ops) {
1238 pr_err("%s: can not sync dmabuf from another exporter\n",
1239 __func__);
1240 dma_buf_put(dmabuf);
1241 return -EINVAL;
1242 }
1243 buffer = dmabuf->priv;
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001244
1245 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1246 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001247 dma_buf_put(dmabuf);
1248 return 0;
1249}
1250
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001251static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1252{
1253 struct ion_client *client = filp->private_data;
1254
1255 switch (cmd) {
1256 case ION_IOC_ALLOC:
1257 {
1258 struct ion_allocation_data data;
1259
1260 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1261 return -EFAULT;
1262 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh7d72bad2012-08-29 18:39:44 -07001263 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001264
Laura Abbottb14ed962012-01-30 14:18:08 -08001265 if (IS_ERR(data.handle))
1266 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001267
Laura Abbottb14ed962012-01-30 14:18:08 -08001268 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1269 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001270 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001271 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001272 break;
1273 }
1274 case ION_IOC_FREE:
1275 {
1276 struct ion_handle_data data;
1277 bool valid;
1278
1279 if (copy_from_user(&data, (void __user *)arg,
1280 sizeof(struct ion_handle_data)))
1281 return -EFAULT;
1282 mutex_lock(&client->lock);
1283 valid = ion_handle_validate(client, data.handle);
1284 mutex_unlock(&client->lock);
1285 if (!valid)
1286 return -EINVAL;
1287 ion_free(client, data.handle);
1288 break;
1289 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001290 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001291 case ION_IOC_SHARE:
1292 {
1293 struct ion_fd_data data;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001294 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1295 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001296
Johan Mossberg748c11d2013-01-11 13:38:13 +01001297 data.fd = ion_share_dma_buf_fd(client, data.handle);
1298
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001299 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1300 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001301 if (data.fd < 0)
1302 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001303 break;
1304 }
1305 case ION_IOC_IMPORT:
1306 {
1307 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001308 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001309 if (copy_from_user(&data, (void __user *)arg,
1310 sizeof(struct ion_fd_data)))
1311 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001312 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan865e97f2012-05-15 14:40:11 -07001313 if (IS_ERR(data.handle)) {
1314 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001315 data.handle = NULL;
Olav Haugan865e97f2012-05-15 14:40:11 -07001316 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001317 if (copy_to_user((void __user *)arg, &data,
1318 sizeof(struct ion_fd_data)))
1319 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001320 if (ret < 0)
1321 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001322 break;
1323 }
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001324 case ION_IOC_SYNC:
1325 {
1326 struct ion_fd_data data;
1327 if (copy_from_user(&data, (void __user *)arg,
1328 sizeof(struct ion_fd_data)))
1329 return -EFAULT;
1330 ion_sync_for_device(client, data.fd);
1331 break;
1332 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001333 case ION_IOC_CUSTOM:
1334 {
1335 struct ion_device *dev = client->dev;
1336 struct ion_custom_data data;
1337
1338 if (!dev->custom_ioctl)
1339 return -ENOTTY;
1340 if (copy_from_user(&data, (void __user *)arg,
1341 sizeof(struct ion_custom_data)))
1342 return -EFAULT;
1343 return dev->custom_ioctl(client, data.cmd, data.arg);
1344 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001345 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001346 return client->dev->custom_ioctl(client,
1347 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001348 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001349 return client->dev->custom_ioctl(client,
1350 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001351 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001352 return client->dev->custom_ioctl(client,
1353 ION_IOC_CLEAN_INV_CACHES, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001354 default:
1355 return -ENOTTY;
1356 }
1357 return 0;
1358}
1359
1360static int ion_release(struct inode *inode, struct file *file)
1361{
1362 struct ion_client *client = file->private_data;
1363
1364 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001365 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001366 return 0;
1367}
1368
1369static int ion_open(struct inode *inode, struct file *file)
1370{
1371 struct miscdevice *miscdev = file->private_data;
1372 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1373 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001374 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001375
1376 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001377 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
Rebecca Schultz Zavin75aec5b2012-12-11 15:23:14 -08001378 client = ion_client_create(dev, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001379 if (IS_ERR_OR_NULL(client))
1380 return PTR_ERR(client);
1381 file->private_data = client;
1382
1383 return 0;
1384}
1385
1386static const struct file_operations ion_fops = {
1387 .owner = THIS_MODULE,
1388 .open = ion_open,
1389 .release = ion_release,
1390 .unlocked_ioctl = ion_ioctl,
1391};
1392
1393static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001394 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001395{
1396 size_t size = 0;
1397 struct rb_node *n;
1398
1399 mutex_lock(&client->lock);
1400 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1401 struct ion_handle *handle = rb_entry(n,
1402 struct ion_handle,
1403 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001404 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001405 size += handle->buffer->size;
1406 }
1407 mutex_unlock(&client->lock);
1408 return size;
1409}
1410
Olav Haugan0671b9a2012-05-25 11:58:56 -07001411/**
1412 * Searches through a clients handles to find if the buffer is owned
1413 * by this client. Used for debug output.
1414 * @param client pointer to candidate owner of buffer
1415 * @param buf pointer to buffer that we are trying to find the owner of
1416 * @return 1 if found, 0 otherwise
1417 */
1418static int ion_debug_find_buffer_owner(const struct ion_client *client,
1419 const struct ion_buffer *buf)
1420{
1421 struct rb_node *n;
1422
1423 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1424 const struct ion_handle *handle = rb_entry(n,
1425 const struct ion_handle,
1426 node);
1427 if (handle->buffer == buf)
1428 return 1;
1429 }
1430 return 0;
1431}
1432
1433/**
1434 * Adds mem_map_data pointer to the tree of mem_map
1435 * Used for debug output.
1436 * @param mem_map The mem_map tree
1437 * @param data The new data to add to the tree
1438 */
1439static void ion_debug_mem_map_add(struct rb_root *mem_map,
1440 struct mem_map_data *data)
1441{
1442 struct rb_node **p = &mem_map->rb_node;
1443 struct rb_node *parent = NULL;
1444 struct mem_map_data *entry;
1445
1446 while (*p) {
1447 parent = *p;
1448 entry = rb_entry(parent, struct mem_map_data, node);
1449
1450 if (data->addr < entry->addr) {
1451 p = &(*p)->rb_left;
1452 } else if (data->addr > entry->addr) {
1453 p = &(*p)->rb_right;
1454 } else {
1455 pr_err("%s: mem_map_data already found.", __func__);
1456 BUG();
1457 }
1458 }
1459 rb_link_node(&data->node, parent, p);
1460 rb_insert_color(&data->node, mem_map);
1461}
1462
1463/**
1464 * Search for an owner of a buffer by iterating over all ION clients.
1465 * @param dev ion device containing pointers to all the clients.
1466 * @param buffer pointer to buffer we are trying to find the owner of.
1467 * @return name of owner.
1468 */
1469const char *ion_debug_locate_owner(const struct ion_device *dev,
1470 const struct ion_buffer *buffer)
1471{
1472 struct rb_node *j;
1473 const char *client_name = NULL;
1474
Laura Abbottb14ed962012-01-30 14:18:08 -08001475 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001476 j = rb_next(j)) {
1477 struct ion_client *client = rb_entry(j, struct ion_client,
1478 node);
1479 if (ion_debug_find_buffer_owner(client, buffer))
1480 client_name = client->name;
1481 }
1482 return client_name;
1483}
1484
1485/**
1486 * Create a mem_map of the heap.
1487 * @param s seq_file to log error message to.
1488 * @param heap The heap to create mem_map for.
1489 * @param mem_map The mem map to be created.
1490 */
1491void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1492 struct rb_root *mem_map)
1493{
1494 struct ion_device *dev = heap->dev;
1495 struct rb_node *n;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301496 size_t size;
1497
1498 if (!heap->ops->phys)
1499 return;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001500
1501 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1502 struct ion_buffer *buffer =
1503 rb_entry(n, struct ion_buffer, node);
1504 if (buffer->heap->id == heap->id) {
1505 struct mem_map_data *data =
1506 kzalloc(sizeof(*data), GFP_KERNEL);
1507 if (!data) {
1508 seq_printf(s, "ERROR: out of memory. "
1509 "Part of memory map will not be logged\n");
1510 break;
1511 }
Chintan Pandyadaf75622013-01-29 19:40:01 +05301512
1513 buffer->heap->ops->phys(buffer->heap, buffer,
1514 &(data->addr), &size);
1515 data->size = (unsigned long) size;
1516 data->addr_end = data->addr + data->size - 1;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001517 data->client_name = ion_debug_locate_owner(dev, buffer);
1518 ion_debug_mem_map_add(mem_map, data);
1519 }
1520 }
1521}
1522
1523/**
1524 * Free the memory allocated by ion_debug_mem_map_create
1525 * @param mem_map The mem map to free.
1526 */
1527static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1528{
1529 if (mem_map) {
1530 struct rb_node *n;
1531 while ((n = rb_first(mem_map)) != 0) {
1532 struct mem_map_data *data =
1533 rb_entry(n, struct mem_map_data, node);
1534 rb_erase(&data->node, mem_map);
1535 kfree(data);
1536 }
1537 }
1538}
1539
1540/**
1541 * Print heap debug information.
1542 * @param s seq_file to log message to.
1543 * @param heap pointer to heap that we will print debug information for.
1544 */
1545static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1546{
1547 if (heap->ops->print_debug) {
1548 struct rb_root mem_map = RB_ROOT;
1549 ion_debug_mem_map_create(s, heap, &mem_map);
1550 heap->ops->print_debug(heap, s, &mem_map);
1551 ion_debug_mem_map_destroy(&mem_map);
1552 }
1553}
1554
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001555static int ion_debug_heap_show(struct seq_file *s, void *unused)
1556{
1557 struct ion_heap *heap = s->private;
1558 struct ion_device *dev = heap->dev;
1559 struct rb_node *n;
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -07001560 size_t total_size = 0;
1561 size_t total_orphaned_size = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001562
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001563 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001564 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -07001565 seq_printf(s, "----------------------------------------------------\n");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001566
Laura Abbottb14ed962012-01-30 14:18:08 -08001567 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001568 struct ion_client *client = rb_entry(n, struct ion_client,
1569 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001570 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001571 if (!size)
1572 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001573 if (client->task) {
1574 char task_comm[TASK_COMM_LEN];
1575
1576 get_task_comm(task_comm, client->task);
1577 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1578 client->pid, size);
1579 } else {
1580 seq_printf(s, "%16.s %16u %16u\n", client->name,
1581 client->pid, size);
1582 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001583 }
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -07001584 seq_printf(s, "----------------------------------------------------\n");
1585 seq_printf(s, "orphaned allocations (info is from last known client):"
1586 "\n");
1587 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1588 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1589 node);
1590 if (buffer->heap->type == heap->type)
1591 total_size += buffer->size;
1592 if (!buffer->handle_count) {
1593 seq_printf(s, "%16.s %16u %16u\n", buffer->task_comm,
1594 buffer->pid, buffer->size);
1595 total_orphaned_size += buffer->size;
1596 }
1597 }
1598 seq_printf(s, "----------------------------------------------------\n");
1599 seq_printf(s, "%16.s %16u\n", "total orphaned",
1600 total_orphaned_size);
1601 seq_printf(s, "%16.s %16u\n", "total ", total_size);
1602 seq_printf(s, "----------------------------------------------------\n");
1603
Olav Haugan0671b9a2012-05-25 11:58:56 -07001604 ion_heap_print_debug(s, heap);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001605 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001606 return 0;
1607}
1608
1609static int ion_debug_heap_open(struct inode *inode, struct file *file)
1610{
1611 return single_open(file, ion_debug_heap_show, inode->i_private);
1612}
1613
1614static const struct file_operations debug_heap_fops = {
1615 .open = ion_debug_heap_open,
1616 .read = seq_read,
1617 .llseek = seq_lseek,
1618 .release = single_release,
1619};
1620
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001621static size_t ion_heap_free_list_is_empty(struct ion_heap *heap)
1622{
1623 bool is_empty;
1624
1625 rt_mutex_lock(&heap->lock);
1626 is_empty = list_empty(&heap->free_list);
1627 rt_mutex_unlock(&heap->lock);
1628
1629 return is_empty;
1630}
1631
1632static int ion_heap_deferred_free(void *data)
1633{
1634 struct ion_heap *heap = data;
1635
1636 while (true) {
1637 struct ion_buffer *buffer;
1638
1639 wait_event_freezable(heap->waitqueue,
1640 !ion_heap_free_list_is_empty(heap));
1641
1642 rt_mutex_lock(&heap->lock);
1643 if (list_empty(&heap->free_list)) {
1644 rt_mutex_unlock(&heap->lock);
1645 continue;
1646 }
1647 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
1648 list);
1649 list_del(&buffer->list);
1650 rt_mutex_unlock(&heap->lock);
1651 _ion_buffer_destroy(buffer);
1652 }
1653
1654 return 0;
1655}
1656
1657static bool ion_heap_drain_freelist(struct ion_heap *heap)
1658{
1659 struct ion_buffer *buffer, *tmp;
1660
1661 if (ion_heap_free_list_is_empty(heap))
1662 return false;
1663 rt_mutex_lock(&heap->lock);
1664 list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
1665 _ion_buffer_destroy(buffer);
1666 list_del(&buffer->list);
1667 }
1668 BUG_ON(!list_empty(&heap->free_list));
1669 rt_mutex_unlock(&heap->lock);
1670
1671
1672 return true;
1673}
1674
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001675void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1676{
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001677 struct sched_param param = { .sched_priority = 0 };
1678
Laura Abbottb14ed962012-01-30 14:18:08 -08001679 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1680 !heap->ops->unmap_dma)
1681 pr_err("%s: can not add heap with invalid ops struct.\n",
1682 __func__);
1683
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001684 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
1685 INIT_LIST_HEAD(&heap->free_list);
1686 rt_mutex_init(&heap->lock);
1687 init_waitqueue_head(&heap->waitqueue);
1688 heap->task = kthread_run(ion_heap_deferred_free, heap,
1689 "%s", heap->name);
1690 sched_setscheduler(heap->task, SCHED_IDLE, &param);
1691 if (IS_ERR(heap->task))
1692 pr_err("%s: creating thread for deferred free failed\n",
1693 __func__);
1694 }
1695
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001696 heap->dev = dev;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001697 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001698 /* use negative heap->id to reverse the priority -- when traversing
1699 the list later attempt higher id numbers first */
1700 plist_node_init(&heap->node, -heap->id);
1701 plist_add(&heap->node, &dev->heaps);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001702 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1703 &debug_heap_fops);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001704 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001705}
1706
Laura Abbott93619302012-10-11 11:51:40 -07001707int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
1708 int version, void *data, int flags)
1709{
1710 int ret = -EINVAL;
1711 struct ion_heap *heap;
1712 struct ion_buffer *buffer;
1713
1714 mutex_lock(&client->lock);
1715 if (!ion_handle_validate(client, handle)) {
1716 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1717 goto out_unlock;
1718 }
1719
1720 buffer = handle->buffer;
1721 heap = buffer->heap;
1722
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001723 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001724 pr_err("%s: cannot secure buffer from non secure heap\n",
1725 __func__);
1726 goto out_unlock;
1727 }
1728
1729 BUG_ON(!buffer->heap->ops->secure_buffer);
1730 /*
1731 * Protect the handle via the client lock to ensure we aren't
1732 * racing with free
1733 */
1734 ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
1735
1736out_unlock:
1737 mutex_unlock(&client->lock);
1738 return ret;
1739}
1740
1741int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
1742{
1743 int ret = -EINVAL;
1744 struct ion_heap *heap;
1745 struct ion_buffer *buffer;
1746
1747 mutex_lock(&client->lock);
1748 if (!ion_handle_validate(client, handle)) {
1749 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1750 goto out_unlock;
1751 }
1752
1753 buffer = handle->buffer;
1754 heap = buffer->heap;
1755
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001756 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001757 pr_err("%s: cannot secure buffer from non secure heap\n",
1758 __func__);
1759 goto out_unlock;
1760 }
1761
1762 BUG_ON(!buffer->heap->ops->unsecure_buffer);
1763 /*
1764 * Protect the handle via the client lock to ensure we aren't
1765 * racing with free
1766 */
1767 ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
1768
1769out_unlock:
1770 mutex_unlock(&client->lock);
1771 return ret;
1772}
1773
Laura Abbott7e446482012-06-13 15:59:39 -07001774int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1775 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001776{
Olav Haugan0a852512012-01-09 10:20:55 -08001777 int ret_val = 0;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001778 struct ion_heap *heap;
Olav Haugan0a852512012-01-09 10:20:55 -08001779
1780 /*
1781 * traverse the list of heaps available in this system
1782 * and find the heap that is specified.
1783 */
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001784 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001785 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001786 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001787 continue;
1788 if (ION_HEAP(heap->id) != heap_id)
1789 continue;
1790 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001791 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001792 else
1793 ret_val = -EINVAL;
1794 break;
1795 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001796 up_write(&dev->lock);
Olav Haugan0a852512012-01-09 10:20:55 -08001797 return ret_val;
1798}
Olav Hauganbd453a92012-07-05 14:21:34 -07001799EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001800
Laura Abbott7e446482012-06-13 15:59:39 -07001801int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1802 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001803{
Olav Haugan0a852512012-01-09 10:20:55 -08001804 int ret_val = 0;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001805 struct ion_heap *heap;
Olav Haugan0a852512012-01-09 10:20:55 -08001806
1807 /*
1808 * traverse the list of heaps available in this system
1809 * and find the heap that is specified.
1810 */
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001811 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001812 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001813 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001814 continue;
1815 if (ION_HEAP(heap->id) != heap_id)
1816 continue;
1817 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001818 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001819 else
1820 ret_val = -EINVAL;
1821 break;
1822 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001823 up_write(&dev->lock);
Olav Haugan0a852512012-01-09 10:20:55 -08001824 return ret_val;
1825}
Olav Hauganbd453a92012-07-05 14:21:34 -07001826EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001827
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001828struct ion_device *ion_device_create(long (*custom_ioctl)
1829 (struct ion_client *client,
1830 unsigned int cmd,
1831 unsigned long arg))
1832{
1833 struct ion_device *idev;
1834 int ret;
1835
1836 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1837 if (!idev)
1838 return ERR_PTR(-ENOMEM);
1839
1840 idev->dev.minor = MISC_DYNAMIC_MINOR;
1841 idev->dev.name = "ion";
1842 idev->dev.fops = &ion_fops;
1843 idev->dev.parent = NULL;
1844 ret = misc_register(&idev->dev);
1845 if (ret) {
1846 pr_err("ion: failed to register misc device.\n");
1847 return ERR_PTR(ret);
1848 }
1849
1850 idev->debug_root = debugfs_create_dir("ion", NULL);
1851 if (IS_ERR_OR_NULL(idev->debug_root))
1852 pr_err("ion: failed to create debug files.\n");
1853
1854 idev->custom_ioctl = custom_ioctl;
1855 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001856 mutex_init(&idev->buffer_lock);
1857 init_rwsem(&idev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001858 plist_head_init(&idev->heaps);
Laura Abbottb14ed962012-01-30 14:18:08 -08001859 idev->clients = RB_ROOT;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001860 return idev;
1861}
1862
1863void ion_device_destroy(struct ion_device *dev)
1864{
1865 misc_deregister(&dev->dev);
1866 /* XXX need to free the heaps and clients ? */
1867 kfree(dev);
1868}
Laura Abbottb14ed962012-01-30 14:18:08 -08001869
1870void __init ion_reserve(struct ion_platform_data *data)
1871{
Rebecca Schultz Zavinfbce1512012-11-15 10:31:02 -08001872 int i;
Laura Abbottb14ed962012-01-30 14:18:08 -08001873
1874 for (i = 0; i < data->nr; i++) {
1875 if (data->heaps[i].size == 0)
1876 continue;
Rebecca Schultz Zavinfbce1512012-11-15 10:31:02 -08001877
1878 if (data->heaps[i].base == 0) {
1879 phys_addr_t paddr;
1880 paddr = memblock_alloc_base(data->heaps[i].size,
1881 data->heaps[i].align,
1882 MEMBLOCK_ALLOC_ANYWHERE);
1883 if (!paddr) {
1884 pr_err("%s: error allocating memblock for "
1885 "heap %d\n",
1886 __func__, i);
1887 continue;
1888 }
1889 data->heaps[i].base = paddr;
1890 } else {
1891 int ret = memblock_reserve(data->heaps[i].base,
1892 data->heaps[i].size);
1893 if (ret)
1894 pr_err("memblock reserve of %x@%pa failed\n",
1895 data->heaps[i].size,
1896 &data->heaps[i].base);
1897 }
1898 pr_info("%s: %s reserved base %pa size %d\n", __func__,
1899 data->heaps[i].name,
1900 &data->heaps[i].base,
1901 data->heaps[i].size);
Laura Abbottb14ed962012-01-30 14:18:08 -08001902 }
1903}