blob: 4fd59e4bab9cace954d02594191121527694adc7 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Liam Markcc2d4bd2013-01-16 10:14:40 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070036#include <linux/msm_ion.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080037#include <trace/events/kmem.h>
38
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070039
Laura Abbott8c017362011-09-22 20:59:12 -070040#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070041#include "ion_priv.h"
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070042
43/**
44 * struct ion_device - the metadata of the ion device node
45 * @dev: the actual misc device
46 * @buffers: an rb tree of all the existing buffers
47 * @lock: lock protecting the buffers & heaps trees
48 * @heaps: list of all the heaps in the system
49 * @user_clients: list of all the clients created from userspace
50 */
51struct ion_device {
52 struct miscdevice dev;
53 struct rb_root buffers;
54 struct mutex lock;
55 struct rb_root heaps;
56 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
57 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080058 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070059 struct dentry *debug_root;
60};
61
62/**
63 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070064 * @node: node in the tree of all clients
65 * @dev: backpointer to ion device
66 * @handles: an rb tree of all the handles in this client
67 * @lock: lock protecting the tree of handles
68 * @heap_mask: mask of all supported heaps
69 * @name: used for debugging
70 * @task: used for debugging
71 *
72 * A client represents a list of buffers this client may access.
73 * The mutex stored here is used to protect both handles tree
74 * as well as the handles themselves, and should be held while modifying either.
75 */
76struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070077 struct rb_node node;
78 struct ion_device *dev;
79 struct rb_root handles;
80 struct mutex lock;
81 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080082 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070083 struct task_struct *task;
84 pid_t pid;
85 struct dentry *debug_root;
86};
87
88/**
89 * ion_handle - a client local reference to a buffer
90 * @ref: reference count
91 * @client: back pointer to the client the buffer resides in
92 * @buffer: pointer to the buffer
93 * @node: node in the client's handle rbtree
94 * @kmap_cnt: count of times this client has mapped to kernel
95 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070096 *
97 * Modifications to node, map_cnt or mapping should be protected by the
98 * lock in the client. Other fields are never changed after initialization.
99 */
100struct ion_handle {
101 struct kref ref;
102 struct ion_client *client;
103 struct ion_buffer *buffer;
104 struct rb_node node;
105 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700106 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700107};
108
Olav Hauganb3676592012-03-02 15:02:25 -0800109static void ion_iommu_release(struct kref *kref);
110
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700111/* this function should only be called while dev->lock is held */
112static void ion_buffer_add(struct ion_device *dev,
113 struct ion_buffer *buffer)
114{
115 struct rb_node **p = &dev->buffers.rb_node;
116 struct rb_node *parent = NULL;
117 struct ion_buffer *entry;
118
119 while (*p) {
120 parent = *p;
121 entry = rb_entry(parent, struct ion_buffer, node);
122
123 if (buffer < entry) {
124 p = &(*p)->rb_left;
125 } else if (buffer > entry) {
126 p = &(*p)->rb_right;
127 } else {
128 pr_err("%s: buffer already found.", __func__);
129 BUG();
130 }
131 }
132
133 rb_link_node(&buffer->node, parent, p);
134 rb_insert_color(&buffer->node, &dev->buffers);
135}
136
Olav Haugan0fa9b602012-01-25 11:50:38 -0800137static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700138 struct ion_iommu_map *iommu)
139{
140 struct rb_node **p = &buffer->iommu_maps.rb_node;
141 struct rb_node *parent = NULL;
142 struct ion_iommu_map *entry;
143
144 while (*p) {
145 parent = *p;
146 entry = rb_entry(parent, struct ion_iommu_map, node);
147
148 if (iommu->key < entry->key) {
149 p = &(*p)->rb_left;
150 } else if (iommu->key > entry->key) {
151 p = &(*p)->rb_right;
152 } else {
153 pr_err("%s: buffer %p already has mapping for domain %d"
154 " and partition %d\n", __func__,
155 buffer,
156 iommu_map_domain(iommu),
157 iommu_map_partition(iommu));
158 BUG();
159 }
160 }
161
162 rb_link_node(&iommu->node, parent, p);
163 rb_insert_color(&iommu->node, &buffer->iommu_maps);
164
165}
166
167static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
168 unsigned int domain_no,
169 unsigned int partition_no)
170{
171 struct rb_node **p = &buffer->iommu_maps.rb_node;
172 struct rb_node *parent = NULL;
173 struct ion_iommu_map *entry;
174 uint64_t key = domain_no;
175 key = key << 32 | partition_no;
176
177 while (*p) {
178 parent = *p;
179 entry = rb_entry(parent, struct ion_iommu_map, node);
180
181 if (key < entry->key)
182 p = &(*p)->rb_left;
183 else if (key > entry->key)
184 p = &(*p)->rb_right;
185 else
186 return entry;
187 }
188
189 return NULL;
190}
191
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700192static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
193
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700194/* this function should only be called while dev->lock is held */
195static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
196 struct ion_device *dev,
197 unsigned long len,
198 unsigned long align,
199 unsigned long flags)
200{
201 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800202 struct sg_table *table;
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700203 struct scatterlist *sg;
204 int i, ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700205
206 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
207 if (!buffer)
208 return ERR_PTR(-ENOMEM);
209
210 buffer->heap = heap;
211 kref_init(&buffer->ref);
212
213 ret = heap->ops->allocate(heap, buffer, len, align, flags);
214 if (ret) {
215 kfree(buffer);
216 return ERR_PTR(ret);
217 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800218
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700219 buffer->dev = dev;
220 buffer->size = len;
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700221 buffer->flags = flags;
Laura Abbottb14ed962012-01-30 14:18:08 -0800222
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700223 table = heap->ops->map_dma(heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800224 if (IS_ERR_OR_NULL(table)) {
225 heap->ops->free(buffer);
226 kfree(buffer);
227 return ERR_PTR(PTR_ERR(table));
228 }
229 buffer->sg_table = table;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700230 if (buffer->flags & ION_FLAG_CACHED)
231 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
232 i) {
233 if (sg_dma_len(sg) == PAGE_SIZE)
234 continue;
235 pr_err("%s: cached mappings must have pagewise "
236 "sg_lists\n", __func__);
237 heap->ops->unmap_dma(heap, buffer);
238 kfree(buffer);
239 return ERR_PTR(-EINVAL);
240 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800241
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700242 ret = ion_buffer_alloc_dirty(buffer);
243 if (ret) {
244 heap->ops->unmap_dma(heap, buffer);
245 heap->ops->free(buffer);
246 kfree(buffer);
247 return ERR_PTR(ret);
248 }
249
250 buffer->dev = dev;
251 buffer->size = len;
252 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700253 mutex_init(&buffer->lock);
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700254 /* this will set up dma addresses for the sglist -- it is not
255 technically correct as per the dma api -- a specific
256 device isn't really taking ownership here. However, in practice on
257 our systems the only dma_address space is physical addresses.
258 Additionally, we can't afford the overhead of invalidating every
259 allocation via dma_map_sg. The implicit contract here is that
260 memory comming from the heaps is ready for dma, ie if it has a
261 cached mapping that mapping has been invalidated */
262 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
263 sg_dma_address(sg) = sg_phys(sg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700264 ion_buffer_add(dev, buffer);
265 return buffer;
266}
267
Olav Hauganb3676592012-03-02 15:02:25 -0800268/**
269 * Check for delayed IOMMU unmapping. Also unmap any outstanding
270 * mappings which would otherwise have been leaked.
271 */
272static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
273{
274 struct ion_iommu_map *iommu_map;
275 struct rb_node *node;
276 const struct rb_root *rb = &(buffer->iommu_maps);
277 unsigned long ref_count;
278 unsigned int delayed_unmap;
279
280 mutex_lock(&buffer->lock);
281
282 while ((node = rb_first(rb)) != 0) {
283 iommu_map = rb_entry(node, struct ion_iommu_map, node);
284 ref_count = atomic_read(&iommu_map->ref.refcount);
285 delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
286
287 if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
288 pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
289 __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
290 iommu_map->domain_info[DI_PARTITION_NUM]);
291 }
292 /* set ref count to 1 to force release */
293 kref_init(&iommu_map->ref);
294 kref_put(&iommu_map->ref, ion_iommu_release);
295 }
296
297 mutex_unlock(&buffer->lock);
298}
299
Laura Abbott93619302012-10-11 11:51:40 -0700300static void ion_delayed_unsecure(struct ion_buffer *buffer)
301{
302 if (buffer->heap->ops->unsecure_buffer)
303 buffer->heap->ops->unsecure_buffer(buffer, 1);
304}
305
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700306static void ion_buffer_destroy(struct kref *kref)
307{
308 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
309 struct ion_device *dev = buffer->dev;
310
Laura Abbottb14ed962012-01-30 14:18:08 -0800311 if (WARN_ON(buffer->kmap_cnt > 0))
312 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
313
314 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
315
Laura Abbott93619302012-10-11 11:51:40 -0700316 ion_delayed_unsecure(buffer);
Olav Hauganb3676592012-03-02 15:02:25 -0800317 ion_iommu_delayed_unmap(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700318 buffer->heap->ops->free(buffer);
319 mutex_lock(&dev->lock);
320 rb_erase(&buffer->node, &dev->buffers);
321 mutex_unlock(&dev->lock);
322 kfree(buffer);
323}
324
325static void ion_buffer_get(struct ion_buffer *buffer)
326{
327 kref_get(&buffer->ref);
328}
329
330static int ion_buffer_put(struct ion_buffer *buffer)
331{
332 return kref_put(&buffer->ref, ion_buffer_destroy);
333}
334
335static struct ion_handle *ion_handle_create(struct ion_client *client,
336 struct ion_buffer *buffer)
337{
338 struct ion_handle *handle;
339
340 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
341 if (!handle)
342 return ERR_PTR(-ENOMEM);
343 kref_init(&handle->ref);
344 rb_init_node(&handle->node);
345 handle->client = client;
346 ion_buffer_get(buffer);
347 handle->buffer = buffer;
348
349 return handle;
350}
351
Laura Abbottb14ed962012-01-30 14:18:08 -0800352static void ion_handle_kmap_put(struct ion_handle *);
353
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700354static void ion_handle_destroy(struct kref *kref)
355{
356 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800357 struct ion_client *client = handle->client;
358 struct ion_buffer *buffer = handle->buffer;
359
Laura Abbottb14ed962012-01-30 14:18:08 -0800360 mutex_lock(&buffer->lock);
361 while (handle->kmap_cnt)
362 ion_handle_kmap_put(handle);
363 mutex_unlock(&buffer->lock);
364
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700365 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800366 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800367
368 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700369 kfree(handle);
370}
371
372struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
373{
374 return handle->buffer;
375}
376
377static void ion_handle_get(struct ion_handle *handle)
378{
379 kref_get(&handle->ref);
380}
381
382static int ion_handle_put(struct ion_handle *handle)
383{
384 return kref_put(&handle->ref, ion_handle_destroy);
385}
386
387static struct ion_handle *ion_handle_lookup(struct ion_client *client,
388 struct ion_buffer *buffer)
389{
390 struct rb_node *n;
391
392 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
393 struct ion_handle *handle = rb_entry(n, struct ion_handle,
394 node);
395 if (handle->buffer == buffer)
396 return handle;
397 }
398 return NULL;
399}
400
401static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
402{
403 struct rb_node *n = client->handles.rb_node;
404
405 while (n) {
406 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
407 node);
408 if (handle < handle_node)
409 n = n->rb_left;
410 else if (handle > handle_node)
411 n = n->rb_right;
412 else
413 return true;
414 }
415 return false;
416}
417
418static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
419{
420 struct rb_node **p = &client->handles.rb_node;
421 struct rb_node *parent = NULL;
422 struct ion_handle *entry;
423
424 while (*p) {
425 parent = *p;
426 entry = rb_entry(parent, struct ion_handle, node);
427
428 if (handle < entry)
429 p = &(*p)->rb_left;
430 else if (handle > entry)
431 p = &(*p)->rb_right;
432 else
433 WARN(1, "%s: buffer already found.", __func__);
434 }
435
436 rb_link_node(&handle->node, parent, p);
437 rb_insert_color(&handle->node, &client->handles);
438}
439
440struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700441 size_t align, unsigned int heap_mask,
442 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700443{
444 struct rb_node *n;
445 struct ion_handle *handle;
446 struct ion_device *dev = client->dev;
447 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800448 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800449 const unsigned int MAX_DBG_STR_LEN = 64;
450 char dbg_str[MAX_DBG_STR_LEN];
451 unsigned int dbg_str_idx = 0;
452
453 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700454
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700455 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
456 align, heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700457 /*
458 * traverse the list of heaps available in this system in priority
459 * order. If the heap type is supported by the client, and matches the
460 * request of the caller allocate from it. Repeat until allocate has
461 * succeeded or all heaps have been tried
462 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800463 if (WARN_ON(!len))
464 return ERR_PTR(-EINVAL);
465
466 len = PAGE_ALIGN(len);
467
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700468 mutex_lock(&dev->lock);
469 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
470 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
471 /* if the client doesn't support this heap type */
472 if (!((1 << heap->type) & client->heap_mask))
473 continue;
474 /* if the caller didn't specify this heap type */
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700475 if (!((1 << heap->id) & heap_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700476 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800477 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700478 if (secure_allocation &&
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800479 !ion_heap_allow_secure_allocation(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -0800480 continue;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800481 trace_ion_alloc_buffer_start(client->name, heap->name, len,
482 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700483 buffer = ion_buffer_create(heap, dev, len, align, flags);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800484 trace_ion_alloc_buffer_end(client->name, heap->name, len,
485 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700486 if (!IS_ERR_OR_NULL(buffer))
487 break;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800488
489 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
490 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800491 if (dbg_str_idx < MAX_DBG_STR_LEN) {
492 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
493 int ret_value = snprintf(&dbg_str[dbg_str_idx],
494 len_left, "%s ", heap->name);
495 if (ret_value >= len_left) {
496 /* overflow */
497 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
498 dbg_str_idx = MAX_DBG_STR_LEN;
499 } else if (ret_value >= 0) {
500 dbg_str_idx += ret_value;
501 } else {
502 /* error */
503 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
504 }
505 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700506 }
507 mutex_unlock(&dev->lock);
508
Liam Markcc2d4bd2013-01-16 10:14:40 -0800509 if (buffer == NULL) {
510 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
511 heap_mask, flags, -ENODEV);
Laura Abbottb14ed962012-01-30 14:18:08 -0800512 return ERR_PTR(-ENODEV);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800513 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800514
515 if (IS_ERR(buffer)) {
Liam Markcc2d4bd2013-01-16 10:14:40 -0800516 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
517 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800518 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
519 "0x%x) from heap(s) %sfor client %s with heap "
520 "mask 0x%x\n",
521 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700522 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800523 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700524
525 handle = ion_handle_create(client, buffer);
526
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700527 /*
528 * ion_buffer_create will create a buffer with a ref_cnt of 1,
529 * and ion_handle_create will take a second reference, drop one here
530 */
531 ion_buffer_put(buffer);
532
Laura Abbottb14ed962012-01-30 14:18:08 -0800533 if (!IS_ERR(handle)) {
534 mutex_lock(&client->lock);
535 ion_handle_add(client, handle);
536 mutex_unlock(&client->lock);
537 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700538
Laura Abbottb14ed962012-01-30 14:18:08 -0800539
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700540 return handle;
541}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800542EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700543
544void ion_free(struct ion_client *client, struct ion_handle *handle)
545{
546 bool valid_handle;
547
548 BUG_ON(client != handle->client);
549
550 mutex_lock(&client->lock);
551 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700552 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800553 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700554 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700555 return;
556 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800557 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700558 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700559}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800560EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700561
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700562int ion_phys(struct ion_client *client, struct ion_handle *handle,
563 ion_phys_addr_t *addr, size_t *len)
564{
565 struct ion_buffer *buffer;
566 int ret;
567
568 mutex_lock(&client->lock);
569 if (!ion_handle_validate(client, handle)) {
570 mutex_unlock(&client->lock);
571 return -EINVAL;
572 }
573
574 buffer = handle->buffer;
575
576 if (!buffer->heap->ops->phys) {
577 pr_err("%s: ion_phys is not implemented by this heap.\n",
578 __func__);
579 mutex_unlock(&client->lock);
580 return -ENODEV;
581 }
582 mutex_unlock(&client->lock);
583 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
584 return ret;
585}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800586EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700587
Laura Abbottb14ed962012-01-30 14:18:08 -0800588static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700589{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700590 void *vaddr;
591
Laura Abbottb14ed962012-01-30 14:18:08 -0800592 if (buffer->kmap_cnt) {
593 buffer->kmap_cnt++;
594 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700595 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800596 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
597 if (IS_ERR_OR_NULL(vaddr))
598 return vaddr;
599 buffer->vaddr = vaddr;
600 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700601 return vaddr;
602}
Laura Abbottb14ed962012-01-30 14:18:08 -0800603
604static void *ion_handle_kmap_get(struct ion_handle *handle)
605{
606 struct ion_buffer *buffer = handle->buffer;
607 void *vaddr;
608
609 if (handle->kmap_cnt) {
610 handle->kmap_cnt++;
611 return buffer->vaddr;
612 }
613 vaddr = ion_buffer_kmap_get(buffer);
614 if (IS_ERR_OR_NULL(vaddr))
615 return vaddr;
616 handle->kmap_cnt++;
617 return vaddr;
618}
619
620static void ion_buffer_kmap_put(struct ion_buffer *buffer)
621{
622 buffer->kmap_cnt--;
623 if (!buffer->kmap_cnt) {
624 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
625 buffer->vaddr = NULL;
626 }
627}
628
629static void ion_handle_kmap_put(struct ion_handle *handle)
630{
631 struct ion_buffer *buffer = handle->buffer;
632
633 handle->kmap_cnt--;
634 if (!handle->kmap_cnt)
635 ion_buffer_kmap_put(buffer);
636}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700637
Olav Hauganb3676592012-03-02 15:02:25 -0800638static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700639 int domain_num, int partition_num, unsigned long align,
640 unsigned long iova_length, unsigned long flags,
641 unsigned long *iova)
642{
643 struct ion_iommu_map *data;
644 int ret;
645
646 data = kmalloc(sizeof(*data), GFP_ATOMIC);
647
648 if (!data)
Olav Hauganb3676592012-03-02 15:02:25 -0800649 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700650
651 data->buffer = buffer;
652 iommu_map_domain(data) = domain_num;
653 iommu_map_partition(data) = partition_num;
654
655 ret = buffer->heap->ops->map_iommu(buffer, data,
656 domain_num,
657 partition_num,
658 align,
659 iova_length,
660 flags);
661
662 if (ret)
663 goto out;
664
665 kref_init(&data->ref);
666 *iova = data->iova_addr;
667
668 ion_iommu_add(buffer, data);
669
Olav Hauganb3676592012-03-02 15:02:25 -0800670 return data;
Laura Abbott8c017362011-09-22 20:59:12 -0700671
672out:
Laura Abbott8c017362011-09-22 20:59:12 -0700673 kfree(data);
Olav Hauganb3676592012-03-02 15:02:25 -0800674 return ERR_PTR(ret);
Laura Abbott8c017362011-09-22 20:59:12 -0700675}
676
677int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
678 int domain_num, int partition_num, unsigned long align,
679 unsigned long iova_length, unsigned long *iova,
680 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800681 unsigned long flags, unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700682{
683 struct ion_buffer *buffer;
684 struct ion_iommu_map *iommu_map;
685 int ret = 0;
686
Huaibin Yang89399702013-01-25 15:32:59 -0800687 if (IS_ERR_OR_NULL(client)) {
688 pr_err("%s: client pointer is invalid\n", __func__);
689 return -EINVAL;
690 }
691 if (IS_ERR_OR_NULL(handle)) {
692 pr_err("%s: handle pointer is invalid\n", __func__);
693 return -EINVAL;
694 }
695 if (IS_ERR_OR_NULL(handle->buffer)) {
696 pr_err("%s: buffer pointer is invalid\n", __func__);
697 return -EINVAL;
698 }
699
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800700 if (ION_IS_CACHED(flags)) {
701 pr_err("%s: Cannot map iommu as cached.\n", __func__);
702 return -EINVAL;
703 }
704
Laura Abbott8c017362011-09-22 20:59:12 -0700705 mutex_lock(&client->lock);
706 if (!ion_handle_validate(client, handle)) {
707 pr_err("%s: invalid handle passed to map_kernel.\n",
708 __func__);
709 mutex_unlock(&client->lock);
710 return -EINVAL;
711 }
712
713 buffer = handle->buffer;
714 mutex_lock(&buffer->lock);
715
716 if (!handle->buffer->heap->ops->map_iommu) {
717 pr_err("%s: map_iommu is not implemented by this heap.\n",
718 __func__);
719 ret = -ENODEV;
720 goto out;
721 }
722
Laura Abbott8c017362011-09-22 20:59:12 -0700723 /*
724 * If clients don't want a custom iova length, just use whatever
725 * the buffer size is
726 */
727 if (!iova_length)
728 iova_length = buffer->size;
729
730 if (buffer->size > iova_length) {
731 pr_debug("%s: iova length %lx is not at least buffer size"
732 " %x\n", __func__, iova_length, buffer->size);
733 ret = -EINVAL;
734 goto out;
735 }
736
737 if (buffer->size & ~PAGE_MASK) {
738 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
739 buffer->size, PAGE_SIZE);
740 ret = -EINVAL;
741 goto out;
742 }
743
744 if (iova_length & ~PAGE_MASK) {
745 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
746 iova_length, PAGE_SIZE);
747 ret = -EINVAL;
748 goto out;
749 }
750
751 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
Olav Hauganb3676592012-03-02 15:02:25 -0800752 if (!iommu_map) {
753 iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
754 align, iova_length, flags, iova);
Laura Abbottb14ed962012-01-30 14:18:08 -0800755 if (!IS_ERR_OR_NULL(iommu_map)) {
Olav Hauganb3676592012-03-02 15:02:25 -0800756 iommu_map->flags = iommu_flags;
757
758 if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
759 kref_get(&iommu_map->ref);
Laura Abbott11bca602012-09-14 12:48:18 -0700760 } else {
761 ret = PTR_ERR(iommu_map);
Olav Hauganb3676592012-03-02 15:02:25 -0800762 }
Laura Abbott8c017362011-09-22 20:59:12 -0700763 } else {
Olav Hauganb3676592012-03-02 15:02:25 -0800764 if (iommu_map->flags != iommu_flags) {
765 pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
766 __func__, handle,
767 iommu_map->flags, iommu_flags);
Olav Hauganb3676592012-03-02 15:02:25 -0800768 ret = -EINVAL;
769 } else if (iommu_map->mapped_size != iova_length) {
Laura Abbott8c017362011-09-22 20:59:12 -0700770 pr_err("%s: handle %p is already mapped with length"
Olav Hauganb3676592012-03-02 15:02:25 -0800771 " %x, trying to map with length %lx\n",
Laura Abbott8c017362011-09-22 20:59:12 -0700772 __func__, handle, iommu_map->mapped_size,
773 iova_length);
Laura Abbott8c017362011-09-22 20:59:12 -0700774 ret = -EINVAL;
775 } else {
776 kref_get(&iommu_map->ref);
777 *iova = iommu_map->iova_addr;
778 }
779 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800780 if (!ret)
781 buffer->iommu_map_cnt++;
Laura Abbott8c017362011-09-22 20:59:12 -0700782 *buffer_size = buffer->size;
783out:
784 mutex_unlock(&buffer->lock);
785 mutex_unlock(&client->lock);
786 return ret;
787}
788EXPORT_SYMBOL(ion_map_iommu);
789
790static void ion_iommu_release(struct kref *kref)
791{
792 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
793 ref);
794 struct ion_buffer *buffer = map->buffer;
795
796 rb_erase(&map->node, &buffer->iommu_maps);
797 buffer->heap->ops->unmap_iommu(map);
798 kfree(map);
799}
800
801void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
802 int domain_num, int partition_num)
803{
804 struct ion_iommu_map *iommu_map;
805 struct ion_buffer *buffer;
806
Huaibin Yang89399702013-01-25 15:32:59 -0800807 if (IS_ERR_OR_NULL(client)) {
808 pr_err("%s: client pointer is invalid\n", __func__);
809 return;
810 }
811 if (IS_ERR_OR_NULL(handle)) {
812 pr_err("%s: handle pointer is invalid\n", __func__);
813 return;
814 }
815 if (IS_ERR_OR_NULL(handle->buffer)) {
816 pr_err("%s: buffer pointer is invalid\n", __func__);
817 return;
818 }
819
Laura Abbott8c017362011-09-22 20:59:12 -0700820 mutex_lock(&client->lock);
821 buffer = handle->buffer;
822
823 mutex_lock(&buffer->lock);
824
825 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
826
827 if (!iommu_map) {
828 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
829 domain_num, partition_num, buffer);
830 goto out;
831 }
832
Laura Abbott8c017362011-09-22 20:59:12 -0700833 kref_put(&iommu_map->ref, ion_iommu_release);
834
Laura Abbottb14ed962012-01-30 14:18:08 -0800835 buffer->iommu_map_cnt--;
Laura Abbott8c017362011-09-22 20:59:12 -0700836out:
837 mutex_unlock(&buffer->lock);
838
839 mutex_unlock(&client->lock);
840
841}
842EXPORT_SYMBOL(ion_unmap_iommu);
843
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700844void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700845{
846 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800847 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700848
849 mutex_lock(&client->lock);
850 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800851 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700852 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700853 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700854 return ERR_PTR(-EINVAL);
855 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700856
Laura Abbottb14ed962012-01-30 14:18:08 -0800857 buffer = handle->buffer;
858
859 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700860 pr_err("%s: map_kernel is not implemented by this heap.\n",
861 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700862 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700863 return ERR_PTR(-ENODEV);
864 }
Laura Abbott894fd582011-08-19 13:33:56 -0700865
Laura Abbottb14ed962012-01-30 14:18:08 -0800866 mutex_lock(&buffer->lock);
867 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700868 mutex_unlock(&buffer->lock);
869 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800870 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700871}
Olav Hauganbd453a92012-07-05 14:21:34 -0700872EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700873
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700874void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
875{
876 struct ion_buffer *buffer;
877
878 mutex_lock(&client->lock);
879 buffer = handle->buffer;
880 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800881 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700882 mutex_unlock(&buffer->lock);
883 mutex_unlock(&client->lock);
884}
Olav Hauganbd453a92012-07-05 14:21:34 -0700885EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700886
Olav Haugan41f85792012-02-08 15:28:05 -0800887int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700888 void *uaddr, unsigned long offset, unsigned long len,
889 unsigned int cmd)
890{
891 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700892 int ret = -EINVAL;
893
894 mutex_lock(&client->lock);
895 if (!ion_handle_validate(client, handle)) {
896 pr_err("%s: invalid handle passed to do_cache_op.\n",
897 __func__);
898 mutex_unlock(&client->lock);
899 return -EINVAL;
900 }
901 buffer = handle->buffer;
902 mutex_lock(&buffer->lock);
903
Laura Abbottcbaa6682011-10-19 12:14:14 -0700904 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700905 ret = 0;
906 goto out;
907 }
908
909 if (!handle->buffer->heap->ops->cache_op) {
910 pr_err("%s: cache_op is not implemented by this heap.\n",
911 __func__);
912 ret = -ENODEV;
913 goto out;
914 }
915
Laura Abbottabcb6f72011-10-04 16:26:49 -0700916
917 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
918 offset, len, cmd);
919
920out:
921 mutex_unlock(&buffer->lock);
922 mutex_unlock(&client->lock);
923 return ret;
924
925}
Olav Hauganbd453a92012-07-05 14:21:34 -0700926EXPORT_SYMBOL(ion_do_cache_op);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700927
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700928static int ion_debug_client_show(struct seq_file *s, void *unused)
929{
930 struct ion_client *client = s->private;
931 struct rb_node *n;
Olav Haugan854c9e12012-05-16 16:34:28 -0700932 struct rb_node *n2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700933
Olav Haugan854c9e12012-05-16 16:34:28 -0700934 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
935 "heap_name", "size_in_bytes", "handle refcount",
936 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700937
938 mutex_lock(&client->lock);
939 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
940 struct ion_handle *handle = rb_entry(n, struct ion_handle,
941 node);
942 enum ion_heap_type type = handle->buffer->heap->type;
943
Olav Haugan854c9e12012-05-16 16:34:28 -0700944 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700945 handle->buffer->heap->name,
946 handle->buffer->size,
947 atomic_read(&handle->ref.refcount),
948 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700949
950 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
951 type == ION_HEAP_TYPE_CARVEOUT ||
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700952 type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
Laura Abbott1135c9e2013-03-13 15:33:40 -0700953 seq_printf(s, " : %12pa", &handle->buffer->priv_phys);
Olav Haugan854c9e12012-05-16 16:34:28 -0700954 else
955 seq_printf(s, " : %12s", "N/A");
956
957 for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
958 n2 = rb_next(n2)) {
959 struct ion_iommu_map *imap =
960 rb_entry(n2, struct ion_iommu_map, node);
961 seq_printf(s, " : [%d,%d] - %8lx",
962 imap->domain_info[DI_DOMAIN_NUM],
963 imap->domain_info[DI_PARTITION_NUM],
964 imap->iova_addr);
965 }
966 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700967 }
968 mutex_unlock(&client->lock);
969
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700970 return 0;
971}
972
973static int ion_debug_client_open(struct inode *inode, struct file *file)
974{
975 return single_open(file, ion_debug_client_show, inode->i_private);
976}
977
978static const struct file_operations debug_client_fops = {
979 .open = ion_debug_client_open,
980 .read = seq_read,
981 .llseek = seq_lseek,
982 .release = single_release,
983};
984
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700985struct ion_client *ion_client_create(struct ion_device *dev,
986 unsigned int heap_mask,
987 const char *name)
988{
989 struct ion_client *client;
990 struct task_struct *task;
991 struct rb_node **p;
992 struct rb_node *parent = NULL;
993 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700994 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700995 unsigned int name_len;
996
997 if (!name) {
998 pr_err("%s: Name cannot be null\n", __func__);
999 return ERR_PTR(-EINVAL);
1000 }
1001 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001002
1003 get_task_struct(current->group_leader);
1004 task_lock(current->group_leader);
1005 pid = task_pid_nr(current->group_leader);
1006 /* don't bother to store task struct for kernel threads,
1007 they can't be killed anyway */
1008 if (current->group_leader->flags & PF_KTHREAD) {
1009 put_task_struct(current->group_leader);
1010 task = NULL;
1011 } else {
1012 task = current->group_leader;
1013 }
1014 task_unlock(current->group_leader);
1015
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001016 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1017 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001018 if (task)
1019 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001020 return ERR_PTR(-ENOMEM);
1021 }
1022
1023 client->dev = dev;
1024 client->handles = RB_ROOT;
1025 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001026
Olav Haugan6625c7d12012-01-24 13:50:43 -08001027 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001028 if (!client->name) {
1029 put_task_struct(current->group_leader);
1030 kfree(client);
1031 return ERR_PTR(-ENOMEM);
1032 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -08001033 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001034 }
1035
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001036 client->heap_mask = heap_mask;
1037 client->task = task;
1038 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001039
1040 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001041 p = &dev->clients.rb_node;
1042 while (*p) {
1043 parent = *p;
1044 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001045
Laura Abbottb14ed962012-01-30 14:18:08 -08001046 if (client < entry)
1047 p = &(*p)->rb_left;
1048 else if (client > entry)
1049 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001050 }
Laura Abbottb14ed962012-01-30 14:18:08 -08001051 rb_link_node(&client->node, parent, p);
1052 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001053
Laura Abbotteed86032011-12-05 15:32:36 -08001054
1055 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001056 dev->debug_root, client,
1057 &debug_client_fops);
1058 mutex_unlock(&dev->lock);
1059
1060 return client;
1061}
1062
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001063/**
1064 * ion_mark_dangling_buffers_locked() - Mark dangling buffers
1065 * @dev: the ion device whose buffers will be searched
1066 *
1067 * Sets marked=1 for all known buffers associated with `dev' that no
1068 * longer have a handle pointing to them. dev->lock should be held
1069 * across a call to this function (and should only be unlocked after
1070 * checking for marked buffers).
1071 */
1072static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
1073{
1074 struct rb_node *n, *n2;
1075 /* mark all buffers as 1 */
1076 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1077 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1078 node);
1079
1080 buf->marked = 1;
1081 }
1082
1083 /* now see which buffers we can access */
1084 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1085 struct ion_client *client = rb_entry(n, struct ion_client,
1086 node);
1087
1088 mutex_lock(&client->lock);
1089 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1090 struct ion_handle *handle
1091 = rb_entry(n2, struct ion_handle, node);
1092
1093 handle->buffer->marked = 0;
1094
1095 }
1096 mutex_unlock(&client->lock);
1097
1098 }
1099}
1100
1101#ifdef CONFIG_ION_LEAK_CHECK
1102static u32 ion_debug_check_leaks_on_destroy;
1103
1104static int ion_check_for_and_print_leaks(struct ion_device *dev)
1105{
1106 struct rb_node *n;
1107 int num_leaks = 0;
1108
1109 if (!ion_debug_check_leaks_on_destroy)
1110 return 0;
1111
1112 /* check for leaked buffers (those that no longer have a
1113 * handle pointing to them) */
1114 ion_mark_dangling_buffers_locked(dev);
1115
1116 /* Anyone still marked as a 1 means a leaked handle somewhere */
1117 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1118 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1119 node);
1120
1121 if (buf->marked == 1) {
1122 pr_info("Leaked ion buffer at %p\n", buf);
1123 num_leaks++;
1124 }
1125 }
1126 return num_leaks;
1127}
1128static void setup_ion_leak_check(struct dentry *debug_root)
1129{
1130 debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
1131 &ion_debug_check_leaks_on_destroy);
1132}
1133#else
1134static int ion_check_for_and_print_leaks(struct ion_device *dev)
1135{
1136 return 0;
1137}
1138static void setup_ion_leak_check(struct dentry *debug_root)
1139{
1140}
1141#endif
1142
Laura Abbottb14ed962012-01-30 14:18:08 -08001143void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001144{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001145 struct ion_device *dev = client->dev;
1146 struct rb_node *n;
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001147 int num_leaks;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001148
1149 pr_debug("%s: %d\n", __func__, __LINE__);
1150 while ((n = rb_first(&client->handles))) {
1151 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1152 node);
1153 ion_handle_destroy(&handle->ref);
1154 }
1155 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001156 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001157 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -08001158 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001159 debugfs_remove_recursive(client->debug_root);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001160
1161 num_leaks = ion_check_for_and_print_leaks(dev);
1162
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001163 mutex_unlock(&dev->lock);
1164
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001165 if (num_leaks) {
1166 struct task_struct *current_task = current;
1167 char current_task_name[TASK_COMM_LEN];
1168 get_task_comm(current_task_name, current_task);
1169 WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
1170 __func__, num_leaks, num_leaks == 1 ? "" : "s");
1171 pr_info("task name at time of leak: %s, pid: %d\n",
1172 current_task_name, current_task->pid);
1173 }
1174
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001175 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001176 kfree(client);
1177}
Olav Hauganbd453a92012-07-05 14:21:34 -07001178EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001179
Laura Abbott273dd8e2011-10-12 14:26:33 -07001180int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1181 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001182{
1183 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001184
1185 mutex_lock(&client->lock);
1186 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -07001187 pr_err("%s: invalid handle passed to %s.\n",
1188 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001189 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001190 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001191 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001192 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001193 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001194 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001195 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001196 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001197
Laura Abbott273dd8e2011-10-12 14:26:33 -07001198 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001199}
Laura Abbott273dd8e2011-10-12 14:26:33 -07001200EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001201
Laura Abbott8c017362011-09-22 20:59:12 -07001202int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1203 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001204{
Laura Abbott8c017362011-09-22 20:59:12 -07001205 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001206
Laura Abbott8c017362011-09-22 20:59:12 -07001207 mutex_lock(&client->lock);
1208 if (!ion_handle_validate(client, handle)) {
1209 pr_err("%s: invalid handle passed to %s.\n",
1210 __func__, __func__);
1211 mutex_unlock(&client->lock);
1212 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001213 }
Laura Abbott8c017362011-09-22 20:59:12 -07001214 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001215 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001216 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001217 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001218 mutex_unlock(&client->lock);
1219
1220 return 0;
1221}
1222EXPORT_SYMBOL(ion_handle_get_size);
1223
Laura Abbottb14ed962012-01-30 14:18:08 -08001224struct sg_table *ion_sg_table(struct ion_client *client,
1225 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001226{
Laura Abbottb14ed962012-01-30 14:18:08 -08001227 struct ion_buffer *buffer;
1228 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001229
Laura Abbottb14ed962012-01-30 14:18:08 -08001230 mutex_lock(&client->lock);
1231 if (!ion_handle_validate(client, handle)) {
1232 pr_err("%s: invalid handle passed to map_dma.\n",
1233 __func__);
1234 mutex_unlock(&client->lock);
1235 return ERR_PTR(-EINVAL);
1236 }
1237 buffer = handle->buffer;
1238 table = buffer->sg_table;
1239 mutex_unlock(&client->lock);
1240 return table;
1241}
Olav Hauganbd453a92012-07-05 14:21:34 -07001242EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -08001243
Mitchel Humpherys0432d692013-01-08 17:03:10 -08001244struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
1245 size_t chunk_size, size_t total_size)
1246{
1247 struct sg_table *table;
1248 int i, n_chunks, ret;
1249 struct scatterlist *sg;
1250
1251 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1252 if (!table)
1253 return ERR_PTR(-ENOMEM);
1254
1255 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
1256 pr_debug("creating sg_table with %d chunks\n", n_chunks);
1257
1258 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
1259 if (ret)
1260 goto err0;
1261
1262 for_each_sg(table->sgl, sg, table->nents, i) {
1263 dma_addr_t addr = buffer_base + i * chunk_size;
1264 sg_dma_address(sg) = addr;
1265 }
1266
1267 return table;
1268err0:
1269 kfree(table);
1270 return ERR_PTR(ret);
1271}
1272
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001273static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1274 struct device *dev,
1275 enum dma_data_direction direction);
1276
Laura Abbottb14ed962012-01-30 14:18:08 -08001277static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1278 enum dma_data_direction direction)
1279{
1280 struct dma_buf *dmabuf = attachment->dmabuf;
1281 struct ion_buffer *buffer = dmabuf->priv;
1282
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001283 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbottb14ed962012-01-30 14:18:08 -08001284 return buffer->sg_table;
1285}
1286
1287static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1288 struct sg_table *table,
1289 enum dma_data_direction direction)
1290{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001291}
1292
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001293static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
1294{
1295 unsigned long pages = buffer->sg_table->nents;
1296 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
1297
1298 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
1299 if (!buffer->dirty)
1300 return -ENOMEM;
1301 return 0;
1302}
1303
1304struct ion_vma_list {
1305 struct list_head list;
1306 struct vm_area_struct *vma;
1307};
1308
1309static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1310 struct device *dev,
1311 enum dma_data_direction dir)
1312{
1313 struct scatterlist *sg;
1314 int i;
1315 struct ion_vma_list *vma_list;
1316
1317 pr_debug("%s: syncing for device %s\n", __func__,
1318 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001319
1320 if (!(buffer->flags & ION_FLAG_CACHED))
1321 return;
1322
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001323 mutex_lock(&buffer->lock);
1324 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1325 if (!test_bit(i, buffer->dirty))
1326 continue;
1327 dma_sync_sg_for_device(dev, sg, 1, dir);
1328 clear_bit(i, buffer->dirty);
1329 }
1330 list_for_each_entry(vma_list, &buffer->vmas, list) {
1331 struct vm_area_struct *vma = vma_list->vma;
1332
1333 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1334 NULL);
1335 }
1336 mutex_unlock(&buffer->lock);
1337}
1338
1339int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001340{
Laura Abbottb14ed962012-01-30 14:18:08 -08001341 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001342 struct scatterlist *sg;
1343 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001344
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001345 mutex_lock(&buffer->lock);
1346 set_bit(vmf->pgoff, buffer->dirty);
1347
1348 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1349 if (i != vmf->pgoff)
1350 continue;
1351 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
1352 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
1353 sg_page(sg));
1354 break;
1355 }
1356 mutex_unlock(&buffer->lock);
1357 return VM_FAULT_NOPAGE;
1358}
1359
1360static void ion_vm_open(struct vm_area_struct *vma)
1361{
1362 struct ion_buffer *buffer = vma->vm_private_data;
1363 struct ion_vma_list *vma_list;
1364
1365 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1366 if (!vma_list)
1367 return;
1368 vma_list->vma = vma;
1369 mutex_lock(&buffer->lock);
1370 list_add(&vma_list->list, &buffer->vmas);
1371 mutex_unlock(&buffer->lock);
1372 pr_debug("%s: adding %p\n", __func__, vma);
1373}
1374
1375static void ion_vm_close(struct vm_area_struct *vma)
1376{
1377 struct ion_buffer *buffer = vma->vm_private_data;
1378 struct ion_vma_list *vma_list, *tmp;
1379
1380 pr_debug("%s\n", __func__);
1381 mutex_lock(&buffer->lock);
1382 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1383 if (vma_list->vma != vma)
1384 continue;
1385 list_del(&vma_list->list);
1386 kfree(vma_list);
1387 pr_debug("%s: deleting %p\n", __func__, vma);
1388 break;
1389 }
1390 mutex_unlock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001391
Laura Abbotta6835092011-11-14 15:27:02 -08001392 if (buffer->heap->ops->unmap_user)
1393 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001394}
1395
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001396struct vm_operations_struct ion_vma_ops = {
1397 .open = ion_vm_open,
1398 .close = ion_vm_close,
1399 .fault = ion_vm_fault,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001400};
1401
Laura Abbottb14ed962012-01-30 14:18:08 -08001402static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001403{
Laura Abbottb14ed962012-01-30 14:18:08 -08001404 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001405 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001406
Laura Abbottb14ed962012-01-30 14:18:08 -08001407 if (!buffer->heap->ops->map_user) {
1408 pr_err("%s: this heap does not define a method for mapping "
1409 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001410 return -EINVAL;
1411 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001412
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001413 if (buffer->flags & ION_FLAG_CACHED) {
1414 vma->vm_private_data = buffer;
1415 vma->vm_ops = &ion_vma_ops;
Mitchel Humpherys7ce0fe42013-01-10 11:30:26 -08001416 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001417 ion_vm_open(vma);
1418 } else {
1419 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1420 mutex_lock(&buffer->lock);
1421 /* now map it to userspace */
1422 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1423 mutex_unlock(&buffer->lock);
1424 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001425
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001426 if (ret)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001427 pr_err("%s: failure mapping buffer to userspace\n",
1428 __func__);
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001429
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001430 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001431}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001432
Laura Abbottb14ed962012-01-30 14:18:08 -08001433static void ion_dma_buf_release(struct dma_buf *dmabuf)
1434{
1435 struct ion_buffer *buffer = dmabuf->priv;
1436 ion_buffer_put(buffer);
1437}
1438
1439static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1440{
1441 struct ion_buffer *buffer = dmabuf->priv;
1442 return buffer->vaddr + offset;
1443}
1444
1445static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1446 void *ptr)
1447{
1448 return;
1449}
1450
1451static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1452 size_t len,
1453 enum dma_data_direction direction)
1454{
1455 struct ion_buffer *buffer = dmabuf->priv;
1456 void *vaddr;
1457
1458 if (!buffer->heap->ops->map_kernel) {
1459 pr_err("%s: map kernel is not implemented by this heap.\n",
1460 __func__);
1461 return -ENODEV;
1462 }
1463
1464 mutex_lock(&buffer->lock);
1465 vaddr = ion_buffer_kmap_get(buffer);
1466 mutex_unlock(&buffer->lock);
1467 if (IS_ERR(vaddr))
1468 return PTR_ERR(vaddr);
1469 if (!vaddr)
1470 return -ENOMEM;
1471 return 0;
1472}
1473
1474static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1475 size_t len,
1476 enum dma_data_direction direction)
1477{
1478 struct ion_buffer *buffer = dmabuf->priv;
1479
1480 mutex_lock(&buffer->lock);
1481 ion_buffer_kmap_put(buffer);
1482 mutex_unlock(&buffer->lock);
1483}
1484
1485struct dma_buf_ops dma_buf_ops = {
1486 .map_dma_buf = ion_map_dma_buf,
1487 .unmap_dma_buf = ion_unmap_dma_buf,
1488 .mmap = ion_mmap,
1489 .release = ion_dma_buf_release,
1490 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1491 .end_cpu_access = ion_dma_buf_end_cpu_access,
1492 .kmap_atomic = ion_dma_buf_kmap,
1493 .kunmap_atomic = ion_dma_buf_kunmap,
1494 .kmap = ion_dma_buf_kmap,
1495 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001496};
1497
Laura Abbottb14ed962012-01-30 14:18:08 -08001498int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1499{
1500 struct ion_buffer *buffer;
1501 struct dma_buf *dmabuf;
1502 bool valid_handle;
1503 int fd;
1504
1505 mutex_lock(&client->lock);
1506 valid_handle = ion_handle_validate(client, handle);
1507 mutex_unlock(&client->lock);
1508 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001509 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001510 return -EINVAL;
1511 }
1512
1513 buffer = handle->buffer;
1514 ion_buffer_get(buffer);
1515 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1516 if (IS_ERR(dmabuf)) {
1517 ion_buffer_put(buffer);
1518 return PTR_ERR(dmabuf);
1519 }
1520 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001521 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001522 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001523
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001524 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001525}
Olav Hauganbd453a92012-07-05 14:21:34 -07001526EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001527
Laura Abbottb14ed962012-01-30 14:18:08 -08001528struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1529{
1530 struct dma_buf *dmabuf;
1531 struct ion_buffer *buffer;
1532 struct ion_handle *handle;
1533
1534 dmabuf = dma_buf_get(fd);
1535 if (IS_ERR_OR_NULL(dmabuf))
1536 return ERR_PTR(PTR_ERR(dmabuf));
1537 /* if this memory came from ion */
1538
1539 if (dmabuf->ops != &dma_buf_ops) {
1540 pr_err("%s: can not import dmabuf from another exporter\n",
1541 __func__);
1542 dma_buf_put(dmabuf);
1543 return ERR_PTR(-EINVAL);
1544 }
1545 buffer = dmabuf->priv;
1546
1547 mutex_lock(&client->lock);
1548 /* if a handle exists for this buffer just take a reference to it */
1549 handle = ion_handle_lookup(client, buffer);
1550 if (!IS_ERR_OR_NULL(handle)) {
1551 ion_handle_get(handle);
1552 goto end;
1553 }
1554 handle = ion_handle_create(client, buffer);
1555 if (IS_ERR_OR_NULL(handle))
1556 goto end;
1557 ion_handle_add(client, handle);
1558end:
1559 mutex_unlock(&client->lock);
1560 dma_buf_put(dmabuf);
1561 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001562}
Olav Hauganbd453a92012-07-05 14:21:34 -07001563EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001564
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001565static int ion_sync_for_device(struct ion_client *client, int fd)
1566{
1567 struct dma_buf *dmabuf;
1568 struct ion_buffer *buffer;
1569
1570 dmabuf = dma_buf_get(fd);
1571 if (IS_ERR_OR_NULL(dmabuf))
1572 return PTR_ERR(dmabuf);
1573
1574 /* if this memory came from ion */
1575 if (dmabuf->ops != &dma_buf_ops) {
1576 pr_err("%s: can not sync dmabuf from another exporter\n",
1577 __func__);
1578 dma_buf_put(dmabuf);
1579 return -EINVAL;
1580 }
1581 buffer = dmabuf->priv;
1582 ion_buffer_sync_for_device(buffer, NULL, DMA_BIDIRECTIONAL);
1583 dma_buf_put(dmabuf);
1584 return 0;
1585}
1586
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001587static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1588{
1589 struct ion_client *client = filp->private_data;
1590
1591 switch (cmd) {
1592 case ION_IOC_ALLOC:
1593 {
1594 struct ion_allocation_data data;
1595
1596 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1597 return -EFAULT;
1598 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh7d72bad2012-08-29 18:39:44 -07001599 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001600
Laura Abbottb14ed962012-01-30 14:18:08 -08001601 if (IS_ERR(data.handle))
1602 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001603
Laura Abbottb14ed962012-01-30 14:18:08 -08001604 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1605 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001606 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001607 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001608 break;
1609 }
1610 case ION_IOC_FREE:
1611 {
1612 struct ion_handle_data data;
1613 bool valid;
1614
1615 if (copy_from_user(&data, (void __user *)arg,
1616 sizeof(struct ion_handle_data)))
1617 return -EFAULT;
1618 mutex_lock(&client->lock);
1619 valid = ion_handle_validate(client, data.handle);
1620 mutex_unlock(&client->lock);
1621 if (!valid)
1622 return -EINVAL;
1623 ion_free(client, data.handle);
1624 break;
1625 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001626 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001627 case ION_IOC_SHARE:
1628 {
1629 struct ion_fd_data data;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001630 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1631 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001632
Laura Abbottb14ed962012-01-30 14:18:08 -08001633 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001634 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1635 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001636 if (data.fd < 0)
1637 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001638 break;
1639 }
1640 case ION_IOC_IMPORT:
1641 {
1642 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001643 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001644 if (copy_from_user(&data, (void __user *)arg,
1645 sizeof(struct ion_fd_data)))
1646 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001647 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan865e97f2012-05-15 14:40:11 -07001648 if (IS_ERR(data.handle)) {
1649 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001650 data.handle = NULL;
Olav Haugan865e97f2012-05-15 14:40:11 -07001651 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001652 if (copy_to_user((void __user *)arg, &data,
1653 sizeof(struct ion_fd_data)))
1654 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001655 if (ret < 0)
1656 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001657 break;
1658 }
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001659 case ION_IOC_SYNC:
1660 {
1661 struct ion_fd_data data;
1662 if (copy_from_user(&data, (void __user *)arg,
1663 sizeof(struct ion_fd_data)))
1664 return -EFAULT;
1665 ion_sync_for_device(client, data.fd);
1666 break;
1667 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001668 case ION_IOC_CUSTOM:
1669 {
1670 struct ion_device *dev = client->dev;
1671 struct ion_custom_data data;
1672
1673 if (!dev->custom_ioctl)
1674 return -ENOTTY;
1675 if (copy_from_user(&data, (void __user *)arg,
1676 sizeof(struct ion_custom_data)))
1677 return -EFAULT;
1678 return dev->custom_ioctl(client, data.cmd, data.arg);
1679 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001680 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001681 return client->dev->custom_ioctl(client,
1682 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001683 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001684 return client->dev->custom_ioctl(client,
1685 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001686 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001687 return client->dev->custom_ioctl(client,
1688 ION_IOC_CLEAN_INV_CACHES, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001689 default:
1690 return -ENOTTY;
1691 }
1692 return 0;
1693}
1694
1695static int ion_release(struct inode *inode, struct file *file)
1696{
1697 struct ion_client *client = file->private_data;
1698
1699 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001700 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001701 return 0;
1702}
1703
1704static int ion_open(struct inode *inode, struct file *file)
1705{
1706 struct miscdevice *miscdev = file->private_data;
1707 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1708 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001709 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001710
1711 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001712 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1713 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001714 if (IS_ERR_OR_NULL(client))
1715 return PTR_ERR(client);
1716 file->private_data = client;
1717
1718 return 0;
1719}
1720
1721static const struct file_operations ion_fops = {
1722 .owner = THIS_MODULE,
1723 .open = ion_open,
1724 .release = ion_release,
1725 .unlocked_ioctl = ion_ioctl,
1726};
1727
1728static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001729 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001730{
1731 size_t size = 0;
1732 struct rb_node *n;
1733
1734 mutex_lock(&client->lock);
1735 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1736 struct ion_handle *handle = rb_entry(n,
1737 struct ion_handle,
1738 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001739 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001740 size += handle->buffer->size;
1741 }
1742 mutex_unlock(&client->lock);
1743 return size;
1744}
1745
Olav Haugan0671b9a2012-05-25 11:58:56 -07001746/**
1747 * Searches through a clients handles to find if the buffer is owned
1748 * by this client. Used for debug output.
1749 * @param client pointer to candidate owner of buffer
1750 * @param buf pointer to buffer that we are trying to find the owner of
1751 * @return 1 if found, 0 otherwise
1752 */
1753static int ion_debug_find_buffer_owner(const struct ion_client *client,
1754 const struct ion_buffer *buf)
1755{
1756 struct rb_node *n;
1757
1758 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1759 const struct ion_handle *handle = rb_entry(n,
1760 const struct ion_handle,
1761 node);
1762 if (handle->buffer == buf)
1763 return 1;
1764 }
1765 return 0;
1766}
1767
1768/**
1769 * Adds mem_map_data pointer to the tree of mem_map
1770 * Used for debug output.
1771 * @param mem_map The mem_map tree
1772 * @param data The new data to add to the tree
1773 */
1774static void ion_debug_mem_map_add(struct rb_root *mem_map,
1775 struct mem_map_data *data)
1776{
1777 struct rb_node **p = &mem_map->rb_node;
1778 struct rb_node *parent = NULL;
1779 struct mem_map_data *entry;
1780
1781 while (*p) {
1782 parent = *p;
1783 entry = rb_entry(parent, struct mem_map_data, node);
1784
1785 if (data->addr < entry->addr) {
1786 p = &(*p)->rb_left;
1787 } else if (data->addr > entry->addr) {
1788 p = &(*p)->rb_right;
1789 } else {
1790 pr_err("%s: mem_map_data already found.", __func__);
1791 BUG();
1792 }
1793 }
1794 rb_link_node(&data->node, parent, p);
1795 rb_insert_color(&data->node, mem_map);
1796}
1797
1798/**
1799 * Search for an owner of a buffer by iterating over all ION clients.
1800 * @param dev ion device containing pointers to all the clients.
1801 * @param buffer pointer to buffer we are trying to find the owner of.
1802 * @return name of owner.
1803 */
1804const char *ion_debug_locate_owner(const struct ion_device *dev,
1805 const struct ion_buffer *buffer)
1806{
1807 struct rb_node *j;
1808 const char *client_name = NULL;
1809
Laura Abbottb14ed962012-01-30 14:18:08 -08001810 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001811 j = rb_next(j)) {
1812 struct ion_client *client = rb_entry(j, struct ion_client,
1813 node);
1814 if (ion_debug_find_buffer_owner(client, buffer))
1815 client_name = client->name;
1816 }
1817 return client_name;
1818}
1819
1820/**
1821 * Create a mem_map of the heap.
1822 * @param s seq_file to log error message to.
1823 * @param heap The heap to create mem_map for.
1824 * @param mem_map The mem map to be created.
1825 */
1826void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1827 struct rb_root *mem_map)
1828{
1829 struct ion_device *dev = heap->dev;
1830 struct rb_node *n;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301831 size_t size;
1832
1833 if (!heap->ops->phys)
1834 return;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001835
1836 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1837 struct ion_buffer *buffer =
1838 rb_entry(n, struct ion_buffer, node);
1839 if (buffer->heap->id == heap->id) {
1840 struct mem_map_data *data =
1841 kzalloc(sizeof(*data), GFP_KERNEL);
1842 if (!data) {
1843 seq_printf(s, "ERROR: out of memory. "
1844 "Part of memory map will not be logged\n");
1845 break;
1846 }
Chintan Pandyadaf75622013-01-29 19:40:01 +05301847
1848 buffer->heap->ops->phys(buffer->heap, buffer,
1849 &(data->addr), &size);
1850 data->size = (unsigned long) size;
1851 data->addr_end = data->addr + data->size - 1;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001852 data->client_name = ion_debug_locate_owner(dev, buffer);
1853 ion_debug_mem_map_add(mem_map, data);
1854 }
1855 }
1856}
1857
1858/**
1859 * Free the memory allocated by ion_debug_mem_map_create
1860 * @param mem_map The mem map to free.
1861 */
1862static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1863{
1864 if (mem_map) {
1865 struct rb_node *n;
1866 while ((n = rb_first(mem_map)) != 0) {
1867 struct mem_map_data *data =
1868 rb_entry(n, struct mem_map_data, node);
1869 rb_erase(&data->node, mem_map);
1870 kfree(data);
1871 }
1872 }
1873}
1874
1875/**
1876 * Print heap debug information.
1877 * @param s seq_file to log message to.
1878 * @param heap pointer to heap that we will print debug information for.
1879 */
1880static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1881{
1882 if (heap->ops->print_debug) {
1883 struct rb_root mem_map = RB_ROOT;
1884 ion_debug_mem_map_create(s, heap, &mem_map);
1885 heap->ops->print_debug(heap, s, &mem_map);
1886 ion_debug_mem_map_destroy(&mem_map);
1887 }
1888}
1889
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001890static int ion_debug_heap_show(struct seq_file *s, void *unused)
1891{
1892 struct ion_heap *heap = s->private;
1893 struct ion_device *dev = heap->dev;
1894 struct rb_node *n;
1895
Olav Haugane4900b52012-05-25 11:58:03 -07001896 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001897 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001898
Laura Abbottb14ed962012-01-30 14:18:08 -08001899 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001900 struct ion_client *client = rb_entry(n, struct ion_client,
1901 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001902 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001903 if (!size)
1904 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001905 if (client->task) {
1906 char task_comm[TASK_COMM_LEN];
1907
1908 get_task_comm(task_comm, client->task);
1909 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1910 client->pid, size);
1911 } else {
1912 seq_printf(s, "%16.s %16u %16u\n", client->name,
1913 client->pid, size);
1914 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001915 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001916 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001917 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001918 return 0;
1919}
1920
1921static int ion_debug_heap_open(struct inode *inode, struct file *file)
1922{
1923 return single_open(file, ion_debug_heap_show, inode->i_private);
1924}
1925
1926static const struct file_operations debug_heap_fops = {
1927 .open = ion_debug_heap_open,
1928 .read = seq_read,
1929 .llseek = seq_lseek,
1930 .release = single_release,
1931};
1932
1933void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1934{
1935 struct rb_node **p = &dev->heaps.rb_node;
1936 struct rb_node *parent = NULL;
1937 struct ion_heap *entry;
1938
Laura Abbottb14ed962012-01-30 14:18:08 -08001939 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1940 !heap->ops->unmap_dma)
1941 pr_err("%s: can not add heap with invalid ops struct.\n",
1942 __func__);
1943
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001944 heap->dev = dev;
1945 mutex_lock(&dev->lock);
1946 while (*p) {
1947 parent = *p;
1948 entry = rb_entry(parent, struct ion_heap, node);
1949
1950 if (heap->id < entry->id) {
1951 p = &(*p)->rb_left;
1952 } else if (heap->id > entry->id ) {
1953 p = &(*p)->rb_right;
1954 } else {
1955 pr_err("%s: can not insert multiple heaps with "
1956 "id %d\n", __func__, heap->id);
1957 goto end;
1958 }
1959 }
1960
1961 rb_link_node(&heap->node, parent, p);
1962 rb_insert_color(&heap->node, &dev->heaps);
1963 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1964 &debug_heap_fops);
1965end:
1966 mutex_unlock(&dev->lock);
1967}
1968
Laura Abbott93619302012-10-11 11:51:40 -07001969int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
1970 int version, void *data, int flags)
1971{
1972 int ret = -EINVAL;
1973 struct ion_heap *heap;
1974 struct ion_buffer *buffer;
1975
1976 mutex_lock(&client->lock);
1977 if (!ion_handle_validate(client, handle)) {
1978 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1979 goto out_unlock;
1980 }
1981
1982 buffer = handle->buffer;
1983 heap = buffer->heap;
1984
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001985 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001986 pr_err("%s: cannot secure buffer from non secure heap\n",
1987 __func__);
1988 goto out_unlock;
1989 }
1990
1991 BUG_ON(!buffer->heap->ops->secure_buffer);
1992 /*
1993 * Protect the handle via the client lock to ensure we aren't
1994 * racing with free
1995 */
1996 ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
1997
1998out_unlock:
1999 mutex_unlock(&client->lock);
2000 return ret;
2001}
2002
2003int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
2004{
2005 int ret = -EINVAL;
2006 struct ion_heap *heap;
2007 struct ion_buffer *buffer;
2008
2009 mutex_lock(&client->lock);
2010 if (!ion_handle_validate(client, handle)) {
2011 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
2012 goto out_unlock;
2013 }
2014
2015 buffer = handle->buffer;
2016 heap = buffer->heap;
2017
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002018 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07002019 pr_err("%s: cannot secure buffer from non secure heap\n",
2020 __func__);
2021 goto out_unlock;
2022 }
2023
2024 BUG_ON(!buffer->heap->ops->unsecure_buffer);
2025 /*
2026 * Protect the handle via the client lock to ensure we aren't
2027 * racing with free
2028 */
2029 ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
2030
2031out_unlock:
2032 mutex_unlock(&client->lock);
2033 return ret;
2034}
2035
Laura Abbott7e446482012-06-13 15:59:39 -07002036int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
2037 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08002038{
2039 struct rb_node *n;
2040 int ret_val = 0;
2041
2042 /*
2043 * traverse the list of heaps available in this system
2044 * and find the heap that is specified.
2045 */
2046 mutex_lock(&dev->lock);
2047 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
2048 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002049 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08002050 continue;
2051 if (ION_HEAP(heap->id) != heap_id)
2052 continue;
2053 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07002054 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08002055 else
2056 ret_val = -EINVAL;
2057 break;
2058 }
2059 mutex_unlock(&dev->lock);
2060 return ret_val;
2061}
Olav Hauganbd453a92012-07-05 14:21:34 -07002062EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08002063
Laura Abbott7e446482012-06-13 15:59:39 -07002064int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
2065 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08002066{
2067 struct rb_node *n;
2068 int ret_val = 0;
2069
2070 /*
2071 * traverse the list of heaps available in this system
2072 * and find the heap that is specified.
2073 */
2074 mutex_lock(&dev->lock);
2075 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
2076 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002077 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08002078 continue;
2079 if (ION_HEAP(heap->id) != heap_id)
2080 continue;
2081 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07002082 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08002083 else
2084 ret_val = -EINVAL;
2085 break;
2086 }
2087 mutex_unlock(&dev->lock);
2088 return ret_val;
2089}
Olav Hauganbd453a92012-07-05 14:21:34 -07002090EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08002091
Laura Abbott404f8242011-10-31 14:22:53 -07002092static int ion_debug_leak_show(struct seq_file *s, void *unused)
2093{
2094 struct ion_device *dev = s->private;
2095 struct rb_node *n;
Laura Abbott404f8242011-10-31 14:22:53 -07002096
Laura Abbott404f8242011-10-31 14:22:53 -07002097 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
2098 "ref cnt");
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002099
Laura Abbott404f8242011-10-31 14:22:53 -07002100 mutex_lock(&dev->lock);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002101 ion_mark_dangling_buffers_locked(dev);
Laura Abbott404f8242011-10-31 14:22:53 -07002102
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002103 /* Anyone still marked as a 1 means a leaked handle somewhere */
Laura Abbott404f8242011-10-31 14:22:53 -07002104 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
2105 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
2106 node);
2107
2108 if (buf->marked == 1)
2109 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
2110 (int)buf, buf->heap->name, buf->size,
2111 atomic_read(&buf->ref.refcount));
2112 }
2113 mutex_unlock(&dev->lock);
2114 return 0;
2115}
2116
2117static int ion_debug_leak_open(struct inode *inode, struct file *file)
2118{
2119 return single_open(file, ion_debug_leak_show, inode->i_private);
2120}
2121
2122static const struct file_operations debug_leak_fops = {
2123 .open = ion_debug_leak_open,
2124 .read = seq_read,
2125 .llseek = seq_lseek,
2126 .release = single_release,
2127};
2128
2129
2130
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07002131struct ion_device *ion_device_create(long (*custom_ioctl)
2132 (struct ion_client *client,
2133 unsigned int cmd,
2134 unsigned long arg))
2135{
2136 struct ion_device *idev;
2137 int ret;
2138
2139 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
2140 if (!idev)
2141 return ERR_PTR(-ENOMEM);
2142
2143 idev->dev.minor = MISC_DYNAMIC_MINOR;
2144 idev->dev.name = "ion";
2145 idev->dev.fops = &ion_fops;
2146 idev->dev.parent = NULL;
2147 ret = misc_register(&idev->dev);
2148 if (ret) {
2149 pr_err("ion: failed to register misc device.\n");
2150 return ERR_PTR(ret);
2151 }
2152
2153 idev->debug_root = debugfs_create_dir("ion", NULL);
2154 if (IS_ERR_OR_NULL(idev->debug_root))
2155 pr_err("ion: failed to create debug files.\n");
2156
2157 idev->custom_ioctl = custom_ioctl;
2158 idev->buffers = RB_ROOT;
2159 mutex_init(&idev->lock);
2160 idev->heaps = RB_ROOT;
Laura Abbottb14ed962012-01-30 14:18:08 -08002161 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07002162 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
2163 &debug_leak_fops);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002164
2165 setup_ion_leak_check(idev->debug_root);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07002166 return idev;
2167}
2168
2169void ion_device_destroy(struct ion_device *dev)
2170{
2171 misc_deregister(&dev->dev);
2172 /* XXX need to free the heaps and clients ? */
2173 kfree(dev);
2174}
Laura Abbottb14ed962012-01-30 14:18:08 -08002175
2176void __init ion_reserve(struct ion_platform_data *data)
2177{
2178 int i, ret;
2179
2180 for (i = 0; i < data->nr; i++) {
2181 if (data->heaps[i].size == 0)
2182 continue;
2183 ret = memblock_reserve(data->heaps[i].base,
2184 data->heaps[i].size);
2185 if (ret)
Laura Abbott1135c9e2013-03-13 15:33:40 -07002186 pr_err("memblock reserve of %x@%pa failed\n",
Laura Abbottb14ed962012-01-30 14:18:08 -08002187 data->heaps[i].size,
Laura Abbott1135c9e2013-03-13 15:33:40 -07002188 &data->heaps[i].base);
Laura Abbottb14ed962012-01-30 14:18:08 -08002189 }
2190}