blob: 1eddf99587a8cd40abf4fcf8d607170c779a81f7 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Liam Markcc2d4bd2013-01-16 10:14:40 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070036#include <linux/msm_ion.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080037#include <trace/events/kmem.h>
38
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070039
Laura Abbott8c017362011-09-22 20:59:12 -070040#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070041#include "ion_priv.h"
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070042
43/**
44 * struct ion_device - the metadata of the ion device node
45 * @dev: the actual misc device
46 * @buffers: an rb tree of all the existing buffers
47 * @lock: lock protecting the buffers & heaps trees
48 * @heaps: list of all the heaps in the system
49 * @user_clients: list of all the clients created from userspace
50 */
51struct ion_device {
52 struct miscdevice dev;
53 struct rb_root buffers;
54 struct mutex lock;
55 struct rb_root heaps;
56 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
57 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080058 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070059 struct dentry *debug_root;
60};
61
62/**
63 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070064 * @node: node in the tree of all clients
65 * @dev: backpointer to ion device
66 * @handles: an rb tree of all the handles in this client
67 * @lock: lock protecting the tree of handles
68 * @heap_mask: mask of all supported heaps
69 * @name: used for debugging
70 * @task: used for debugging
71 *
72 * A client represents a list of buffers this client may access.
73 * The mutex stored here is used to protect both handles tree
74 * as well as the handles themselves, and should be held while modifying either.
75 */
76struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070077 struct rb_node node;
78 struct ion_device *dev;
79 struct rb_root handles;
80 struct mutex lock;
81 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080082 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070083 struct task_struct *task;
84 pid_t pid;
85 struct dentry *debug_root;
86};
87
88/**
89 * ion_handle - a client local reference to a buffer
90 * @ref: reference count
91 * @client: back pointer to the client the buffer resides in
92 * @buffer: pointer to the buffer
93 * @node: node in the client's handle rbtree
94 * @kmap_cnt: count of times this client has mapped to kernel
95 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070096 *
97 * Modifications to node, map_cnt or mapping should be protected by the
98 * lock in the client. Other fields are never changed after initialization.
99 */
100struct ion_handle {
101 struct kref ref;
102 struct ion_client *client;
103 struct ion_buffer *buffer;
104 struct rb_node node;
105 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700106 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700107};
108
Olav Hauganb3676592012-03-02 15:02:25 -0800109static void ion_iommu_release(struct kref *kref);
110
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700111/* this function should only be called while dev->lock is held */
112static void ion_buffer_add(struct ion_device *dev,
113 struct ion_buffer *buffer)
114{
115 struct rb_node **p = &dev->buffers.rb_node;
116 struct rb_node *parent = NULL;
117 struct ion_buffer *entry;
118
119 while (*p) {
120 parent = *p;
121 entry = rb_entry(parent, struct ion_buffer, node);
122
123 if (buffer < entry) {
124 p = &(*p)->rb_left;
125 } else if (buffer > entry) {
126 p = &(*p)->rb_right;
127 } else {
128 pr_err("%s: buffer already found.", __func__);
129 BUG();
130 }
131 }
132
133 rb_link_node(&buffer->node, parent, p);
134 rb_insert_color(&buffer->node, &dev->buffers);
135}
136
Olav Haugan0fa9b602012-01-25 11:50:38 -0800137static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700138 struct ion_iommu_map *iommu)
139{
140 struct rb_node **p = &buffer->iommu_maps.rb_node;
141 struct rb_node *parent = NULL;
142 struct ion_iommu_map *entry;
143
144 while (*p) {
145 parent = *p;
146 entry = rb_entry(parent, struct ion_iommu_map, node);
147
148 if (iommu->key < entry->key) {
149 p = &(*p)->rb_left;
150 } else if (iommu->key > entry->key) {
151 p = &(*p)->rb_right;
152 } else {
153 pr_err("%s: buffer %p already has mapping for domain %d"
154 " and partition %d\n", __func__,
155 buffer,
156 iommu_map_domain(iommu),
157 iommu_map_partition(iommu));
158 BUG();
159 }
160 }
161
162 rb_link_node(&iommu->node, parent, p);
163 rb_insert_color(&iommu->node, &buffer->iommu_maps);
164
165}
166
167static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
168 unsigned int domain_no,
169 unsigned int partition_no)
170{
171 struct rb_node **p = &buffer->iommu_maps.rb_node;
172 struct rb_node *parent = NULL;
173 struct ion_iommu_map *entry;
174 uint64_t key = domain_no;
175 key = key << 32 | partition_no;
176
177 while (*p) {
178 parent = *p;
179 entry = rb_entry(parent, struct ion_iommu_map, node);
180
181 if (key < entry->key)
182 p = &(*p)->rb_left;
183 else if (key > entry->key)
184 p = &(*p)->rb_right;
185 else
186 return entry;
187 }
188
189 return NULL;
190}
191
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700192static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
193
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700194/* this function should only be called while dev->lock is held */
195static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
196 struct ion_device *dev,
197 unsigned long len,
198 unsigned long align,
199 unsigned long flags)
200{
201 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800202 struct sg_table *table;
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700203 struct scatterlist *sg;
204 int i, ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700205
206 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
207 if (!buffer)
208 return ERR_PTR(-ENOMEM);
209
210 buffer->heap = heap;
211 kref_init(&buffer->ref);
212
213 ret = heap->ops->allocate(heap, buffer, len, align, flags);
214 if (ret) {
215 kfree(buffer);
216 return ERR_PTR(ret);
217 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800218
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700219 buffer->dev = dev;
220 buffer->size = len;
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700221 buffer->flags = flags;
Laura Abbottb14ed962012-01-30 14:18:08 -0800222
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700223 table = heap->ops->map_dma(heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800224 if (IS_ERR_OR_NULL(table)) {
225 heap->ops->free(buffer);
226 kfree(buffer);
227 return ERR_PTR(PTR_ERR(table));
228 }
229 buffer->sg_table = table;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700230 if (buffer->flags & ION_FLAG_CACHED)
231 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
232 i) {
233 if (sg_dma_len(sg) == PAGE_SIZE)
234 continue;
235 pr_err("%s: cached mappings must have pagewise "
236 "sg_lists\n", __func__);
237 heap->ops->unmap_dma(heap, buffer);
238 kfree(buffer);
239 return ERR_PTR(-EINVAL);
240 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800241
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700242 ret = ion_buffer_alloc_dirty(buffer);
243 if (ret) {
244 heap->ops->unmap_dma(heap, buffer);
245 heap->ops->free(buffer);
246 kfree(buffer);
247 return ERR_PTR(ret);
248 }
249
250 buffer->dev = dev;
251 buffer->size = len;
252 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700253 mutex_init(&buffer->lock);
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700254 /* this will set up dma addresses for the sglist -- it is not
255 technically correct as per the dma api -- a specific
256 device isn't really taking ownership here. However, in practice on
257 our systems the only dma_address space is physical addresses.
258 Additionally, we can't afford the overhead of invalidating every
259 allocation via dma_map_sg. The implicit contract here is that
260 memory comming from the heaps is ready for dma, ie if it has a
261 cached mapping that mapping has been invalidated */
262 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
263 sg_dma_address(sg) = sg_phys(sg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700264 ion_buffer_add(dev, buffer);
265 return buffer;
266}
267
Olav Hauganb3676592012-03-02 15:02:25 -0800268/**
269 * Check for delayed IOMMU unmapping. Also unmap any outstanding
270 * mappings which would otherwise have been leaked.
271 */
272static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
273{
274 struct ion_iommu_map *iommu_map;
275 struct rb_node *node;
276 const struct rb_root *rb = &(buffer->iommu_maps);
277 unsigned long ref_count;
278 unsigned int delayed_unmap;
279
280 mutex_lock(&buffer->lock);
281
282 while ((node = rb_first(rb)) != 0) {
283 iommu_map = rb_entry(node, struct ion_iommu_map, node);
284 ref_count = atomic_read(&iommu_map->ref.refcount);
285 delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
286
287 if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
288 pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
289 __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
290 iommu_map->domain_info[DI_PARTITION_NUM]);
291 }
292 /* set ref count to 1 to force release */
293 kref_init(&iommu_map->ref);
294 kref_put(&iommu_map->ref, ion_iommu_release);
295 }
296
297 mutex_unlock(&buffer->lock);
298}
299
Laura Abbott93619302012-10-11 11:51:40 -0700300static void ion_delayed_unsecure(struct ion_buffer *buffer)
301{
302 if (buffer->heap->ops->unsecure_buffer)
303 buffer->heap->ops->unsecure_buffer(buffer, 1);
304}
305
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700306static void ion_buffer_destroy(struct kref *kref)
307{
308 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
309 struct ion_device *dev = buffer->dev;
310
Laura Abbottb14ed962012-01-30 14:18:08 -0800311 if (WARN_ON(buffer->kmap_cnt > 0))
312 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
313
314 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
315
Laura Abbott93619302012-10-11 11:51:40 -0700316 ion_delayed_unsecure(buffer);
Olav Hauganb3676592012-03-02 15:02:25 -0800317 ion_iommu_delayed_unmap(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700318 buffer->heap->ops->free(buffer);
319 mutex_lock(&dev->lock);
320 rb_erase(&buffer->node, &dev->buffers);
321 mutex_unlock(&dev->lock);
322 kfree(buffer);
323}
324
325static void ion_buffer_get(struct ion_buffer *buffer)
326{
327 kref_get(&buffer->ref);
328}
329
330static int ion_buffer_put(struct ion_buffer *buffer)
331{
332 return kref_put(&buffer->ref, ion_buffer_destroy);
333}
334
335static struct ion_handle *ion_handle_create(struct ion_client *client,
336 struct ion_buffer *buffer)
337{
338 struct ion_handle *handle;
339
340 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
341 if (!handle)
342 return ERR_PTR(-ENOMEM);
343 kref_init(&handle->ref);
344 rb_init_node(&handle->node);
345 handle->client = client;
346 ion_buffer_get(buffer);
347 handle->buffer = buffer;
348
349 return handle;
350}
351
Laura Abbottb14ed962012-01-30 14:18:08 -0800352static void ion_handle_kmap_put(struct ion_handle *);
353
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700354static void ion_handle_destroy(struct kref *kref)
355{
356 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800357 struct ion_client *client = handle->client;
358 struct ion_buffer *buffer = handle->buffer;
359
Laura Abbottb14ed962012-01-30 14:18:08 -0800360 mutex_lock(&buffer->lock);
361 while (handle->kmap_cnt)
362 ion_handle_kmap_put(handle);
363 mutex_unlock(&buffer->lock);
364
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700365 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800366 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800367
368 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700369 kfree(handle);
370}
371
372struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
373{
374 return handle->buffer;
375}
376
377static void ion_handle_get(struct ion_handle *handle)
378{
379 kref_get(&handle->ref);
380}
381
382static int ion_handle_put(struct ion_handle *handle)
383{
384 return kref_put(&handle->ref, ion_handle_destroy);
385}
386
387static struct ion_handle *ion_handle_lookup(struct ion_client *client,
388 struct ion_buffer *buffer)
389{
390 struct rb_node *n;
391
392 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
393 struct ion_handle *handle = rb_entry(n, struct ion_handle,
394 node);
395 if (handle->buffer == buffer)
396 return handle;
397 }
398 return NULL;
399}
400
401static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
402{
403 struct rb_node *n = client->handles.rb_node;
404
405 while (n) {
406 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
407 node);
408 if (handle < handle_node)
409 n = n->rb_left;
410 else if (handle > handle_node)
411 n = n->rb_right;
412 else
413 return true;
414 }
415 return false;
416}
417
418static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
419{
420 struct rb_node **p = &client->handles.rb_node;
421 struct rb_node *parent = NULL;
422 struct ion_handle *entry;
423
424 while (*p) {
425 parent = *p;
426 entry = rb_entry(parent, struct ion_handle, node);
427
428 if (handle < entry)
429 p = &(*p)->rb_left;
430 else if (handle > entry)
431 p = &(*p)->rb_right;
432 else
433 WARN(1, "%s: buffer already found.", __func__);
434 }
435
436 rb_link_node(&handle->node, parent, p);
437 rb_insert_color(&handle->node, &client->handles);
438}
439
440struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700441 size_t align, unsigned int heap_mask,
442 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700443{
444 struct rb_node *n;
445 struct ion_handle *handle;
446 struct ion_device *dev = client->dev;
447 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800448 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800449 const unsigned int MAX_DBG_STR_LEN = 64;
450 char dbg_str[MAX_DBG_STR_LEN];
451 unsigned int dbg_str_idx = 0;
452
453 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700454
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700455 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
456 align, heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700457 /*
458 * traverse the list of heaps available in this system in priority
459 * order. If the heap type is supported by the client, and matches the
460 * request of the caller allocate from it. Repeat until allocate has
461 * succeeded or all heaps have been tried
462 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800463 if (WARN_ON(!len))
464 return ERR_PTR(-EINVAL);
465
466 len = PAGE_ALIGN(len);
467
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700468 mutex_lock(&dev->lock);
469 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
470 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
471 /* if the client doesn't support this heap type */
472 if (!((1 << heap->type) & client->heap_mask))
473 continue;
474 /* if the caller didn't specify this heap type */
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700475 if (!((1 << heap->id) & heap_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700476 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800477 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700478 if (secure_allocation &&
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800479 !ion_heap_allow_secure_allocation(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -0800480 continue;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800481 trace_ion_alloc_buffer_start(client->name, heap->name, len,
482 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700483 buffer = ion_buffer_create(heap, dev, len, align, flags);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800484 trace_ion_alloc_buffer_end(client->name, heap->name, len,
485 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700486 if (!IS_ERR_OR_NULL(buffer))
487 break;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800488
489 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
490 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800491 if (dbg_str_idx < MAX_DBG_STR_LEN) {
492 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
493 int ret_value = snprintf(&dbg_str[dbg_str_idx],
494 len_left, "%s ", heap->name);
495 if (ret_value >= len_left) {
496 /* overflow */
497 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
498 dbg_str_idx = MAX_DBG_STR_LEN;
499 } else if (ret_value >= 0) {
500 dbg_str_idx += ret_value;
501 } else {
502 /* error */
503 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
504 }
505 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700506 }
507 mutex_unlock(&dev->lock);
508
Liam Markcc2d4bd2013-01-16 10:14:40 -0800509 if (buffer == NULL) {
510 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
511 heap_mask, flags, -ENODEV);
Laura Abbottb14ed962012-01-30 14:18:08 -0800512 return ERR_PTR(-ENODEV);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800513 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800514
515 if (IS_ERR(buffer)) {
Liam Markcc2d4bd2013-01-16 10:14:40 -0800516 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
517 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800518 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
519 "0x%x) from heap(s) %sfor client %s with heap "
520 "mask 0x%x\n",
521 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700522 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800523 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700524
525 handle = ion_handle_create(client, buffer);
526
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700527 /*
528 * ion_buffer_create will create a buffer with a ref_cnt of 1,
529 * and ion_handle_create will take a second reference, drop one here
530 */
531 ion_buffer_put(buffer);
532
Laura Abbottb14ed962012-01-30 14:18:08 -0800533 if (!IS_ERR(handle)) {
534 mutex_lock(&client->lock);
535 ion_handle_add(client, handle);
536 mutex_unlock(&client->lock);
537 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700538
Laura Abbottb14ed962012-01-30 14:18:08 -0800539
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700540 return handle;
541}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800542EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700543
544void ion_free(struct ion_client *client, struct ion_handle *handle)
545{
546 bool valid_handle;
547
548 BUG_ON(client != handle->client);
549
550 mutex_lock(&client->lock);
551 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700552 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800553 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700554 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700555 return;
556 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800557 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700558 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700559}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800560EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700561
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700562int ion_phys(struct ion_client *client, struct ion_handle *handle,
563 ion_phys_addr_t *addr, size_t *len)
564{
565 struct ion_buffer *buffer;
566 int ret;
567
568 mutex_lock(&client->lock);
569 if (!ion_handle_validate(client, handle)) {
570 mutex_unlock(&client->lock);
571 return -EINVAL;
572 }
573
574 buffer = handle->buffer;
575
576 if (!buffer->heap->ops->phys) {
577 pr_err("%s: ion_phys is not implemented by this heap.\n",
578 __func__);
579 mutex_unlock(&client->lock);
580 return -ENODEV;
581 }
582 mutex_unlock(&client->lock);
583 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
584 return ret;
585}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800586EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700587
Laura Abbottb14ed962012-01-30 14:18:08 -0800588static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700589{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700590 void *vaddr;
591
Laura Abbottb14ed962012-01-30 14:18:08 -0800592 if (buffer->kmap_cnt) {
593 buffer->kmap_cnt++;
594 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700595 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800596 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
597 if (IS_ERR_OR_NULL(vaddr))
598 return vaddr;
599 buffer->vaddr = vaddr;
600 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700601 return vaddr;
602}
Laura Abbottb14ed962012-01-30 14:18:08 -0800603
604static void *ion_handle_kmap_get(struct ion_handle *handle)
605{
606 struct ion_buffer *buffer = handle->buffer;
607 void *vaddr;
608
609 if (handle->kmap_cnt) {
610 handle->kmap_cnt++;
611 return buffer->vaddr;
612 }
613 vaddr = ion_buffer_kmap_get(buffer);
614 if (IS_ERR_OR_NULL(vaddr))
615 return vaddr;
616 handle->kmap_cnt++;
617 return vaddr;
618}
619
620static void ion_buffer_kmap_put(struct ion_buffer *buffer)
621{
622 buffer->kmap_cnt--;
623 if (!buffer->kmap_cnt) {
624 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
625 buffer->vaddr = NULL;
626 }
627}
628
629static void ion_handle_kmap_put(struct ion_handle *handle)
630{
631 struct ion_buffer *buffer = handle->buffer;
632
633 handle->kmap_cnt--;
634 if (!handle->kmap_cnt)
635 ion_buffer_kmap_put(buffer);
636}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700637
Olav Hauganb3676592012-03-02 15:02:25 -0800638static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700639 int domain_num, int partition_num, unsigned long align,
640 unsigned long iova_length, unsigned long flags,
641 unsigned long *iova)
642{
643 struct ion_iommu_map *data;
644 int ret;
645
646 data = kmalloc(sizeof(*data), GFP_ATOMIC);
647
648 if (!data)
Olav Hauganb3676592012-03-02 15:02:25 -0800649 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700650
651 data->buffer = buffer;
652 iommu_map_domain(data) = domain_num;
653 iommu_map_partition(data) = partition_num;
654
655 ret = buffer->heap->ops->map_iommu(buffer, data,
656 domain_num,
657 partition_num,
658 align,
659 iova_length,
660 flags);
661
662 if (ret)
663 goto out;
664
665 kref_init(&data->ref);
666 *iova = data->iova_addr;
667
668 ion_iommu_add(buffer, data);
669
Olav Hauganb3676592012-03-02 15:02:25 -0800670 return data;
Laura Abbott8c017362011-09-22 20:59:12 -0700671
672out:
Laura Abbott8c017362011-09-22 20:59:12 -0700673 kfree(data);
Olav Hauganb3676592012-03-02 15:02:25 -0800674 return ERR_PTR(ret);
Laura Abbott8c017362011-09-22 20:59:12 -0700675}
676
677int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
678 int domain_num, int partition_num, unsigned long align,
679 unsigned long iova_length, unsigned long *iova,
680 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800681 unsigned long flags, unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700682{
683 struct ion_buffer *buffer;
684 struct ion_iommu_map *iommu_map;
685 int ret = 0;
686
Huaibin Yang89399702013-01-25 15:32:59 -0800687 if (IS_ERR_OR_NULL(client)) {
688 pr_err("%s: client pointer is invalid\n", __func__);
689 return -EINVAL;
690 }
691 if (IS_ERR_OR_NULL(handle)) {
692 pr_err("%s: handle pointer is invalid\n", __func__);
693 return -EINVAL;
694 }
695 if (IS_ERR_OR_NULL(handle->buffer)) {
696 pr_err("%s: buffer pointer is invalid\n", __func__);
697 return -EINVAL;
698 }
699
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800700 if (ION_IS_CACHED(flags)) {
701 pr_err("%s: Cannot map iommu as cached.\n", __func__);
702 return -EINVAL;
703 }
704
Laura Abbott8c017362011-09-22 20:59:12 -0700705 mutex_lock(&client->lock);
706 if (!ion_handle_validate(client, handle)) {
707 pr_err("%s: invalid handle passed to map_kernel.\n",
708 __func__);
709 mutex_unlock(&client->lock);
710 return -EINVAL;
711 }
712
713 buffer = handle->buffer;
714 mutex_lock(&buffer->lock);
715
716 if (!handle->buffer->heap->ops->map_iommu) {
717 pr_err("%s: map_iommu is not implemented by this heap.\n",
718 __func__);
719 ret = -ENODEV;
720 goto out;
721 }
722
Laura Abbott8c017362011-09-22 20:59:12 -0700723 /*
724 * If clients don't want a custom iova length, just use whatever
725 * the buffer size is
726 */
727 if (!iova_length)
728 iova_length = buffer->size;
729
730 if (buffer->size > iova_length) {
731 pr_debug("%s: iova length %lx is not at least buffer size"
732 " %x\n", __func__, iova_length, buffer->size);
733 ret = -EINVAL;
734 goto out;
735 }
736
737 if (buffer->size & ~PAGE_MASK) {
738 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
739 buffer->size, PAGE_SIZE);
740 ret = -EINVAL;
741 goto out;
742 }
743
744 if (iova_length & ~PAGE_MASK) {
745 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
746 iova_length, PAGE_SIZE);
747 ret = -EINVAL;
748 goto out;
749 }
750
751 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
Olav Hauganb3676592012-03-02 15:02:25 -0800752 if (!iommu_map) {
753 iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
754 align, iova_length, flags, iova);
Laura Abbottb14ed962012-01-30 14:18:08 -0800755 if (!IS_ERR_OR_NULL(iommu_map)) {
Olav Hauganb3676592012-03-02 15:02:25 -0800756 iommu_map->flags = iommu_flags;
757
758 if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
759 kref_get(&iommu_map->ref);
Laura Abbott11bca602012-09-14 12:48:18 -0700760 } else {
761 ret = PTR_ERR(iommu_map);
Olav Hauganb3676592012-03-02 15:02:25 -0800762 }
Laura Abbott8c017362011-09-22 20:59:12 -0700763 } else {
Olav Hauganb3676592012-03-02 15:02:25 -0800764 if (iommu_map->flags != iommu_flags) {
765 pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
766 __func__, handle,
767 iommu_map->flags, iommu_flags);
Olav Hauganb3676592012-03-02 15:02:25 -0800768 ret = -EINVAL;
769 } else if (iommu_map->mapped_size != iova_length) {
Laura Abbott8c017362011-09-22 20:59:12 -0700770 pr_err("%s: handle %p is already mapped with length"
Olav Hauganb3676592012-03-02 15:02:25 -0800771 " %x, trying to map with length %lx\n",
Laura Abbott8c017362011-09-22 20:59:12 -0700772 __func__, handle, iommu_map->mapped_size,
773 iova_length);
Laura Abbott8c017362011-09-22 20:59:12 -0700774 ret = -EINVAL;
775 } else {
776 kref_get(&iommu_map->ref);
777 *iova = iommu_map->iova_addr;
778 }
779 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800780 if (!ret)
781 buffer->iommu_map_cnt++;
Laura Abbott8c017362011-09-22 20:59:12 -0700782 *buffer_size = buffer->size;
783out:
784 mutex_unlock(&buffer->lock);
785 mutex_unlock(&client->lock);
786 return ret;
787}
788EXPORT_SYMBOL(ion_map_iommu);
789
790static void ion_iommu_release(struct kref *kref)
791{
792 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
793 ref);
794 struct ion_buffer *buffer = map->buffer;
795
796 rb_erase(&map->node, &buffer->iommu_maps);
797 buffer->heap->ops->unmap_iommu(map);
798 kfree(map);
799}
800
801void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
802 int domain_num, int partition_num)
803{
804 struct ion_iommu_map *iommu_map;
805 struct ion_buffer *buffer;
806
Huaibin Yang89399702013-01-25 15:32:59 -0800807 if (IS_ERR_OR_NULL(client)) {
808 pr_err("%s: client pointer is invalid\n", __func__);
809 return;
810 }
811 if (IS_ERR_OR_NULL(handle)) {
812 pr_err("%s: handle pointer is invalid\n", __func__);
813 return;
814 }
815 if (IS_ERR_OR_NULL(handle->buffer)) {
816 pr_err("%s: buffer pointer is invalid\n", __func__);
817 return;
818 }
819
Laura Abbott8c017362011-09-22 20:59:12 -0700820 mutex_lock(&client->lock);
821 buffer = handle->buffer;
822
823 mutex_lock(&buffer->lock);
824
825 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
826
827 if (!iommu_map) {
828 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
829 domain_num, partition_num, buffer);
830 goto out;
831 }
832
Laura Abbott8c017362011-09-22 20:59:12 -0700833 kref_put(&iommu_map->ref, ion_iommu_release);
834
Laura Abbottb14ed962012-01-30 14:18:08 -0800835 buffer->iommu_map_cnt--;
Laura Abbott8c017362011-09-22 20:59:12 -0700836out:
837 mutex_unlock(&buffer->lock);
838
839 mutex_unlock(&client->lock);
840
841}
842EXPORT_SYMBOL(ion_unmap_iommu);
843
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700844void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700845{
846 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800847 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700848
849 mutex_lock(&client->lock);
850 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800851 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700852 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700853 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700854 return ERR_PTR(-EINVAL);
855 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700856
Laura Abbottb14ed962012-01-30 14:18:08 -0800857 buffer = handle->buffer;
858
859 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700860 pr_err("%s: map_kernel is not implemented by this heap.\n",
861 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700862 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700863 return ERR_PTR(-ENODEV);
864 }
Laura Abbott894fd582011-08-19 13:33:56 -0700865
Laura Abbottb14ed962012-01-30 14:18:08 -0800866 mutex_lock(&buffer->lock);
867 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700868 mutex_unlock(&buffer->lock);
869 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800870 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700871}
Olav Hauganbd453a92012-07-05 14:21:34 -0700872EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700873
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700874void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
875{
876 struct ion_buffer *buffer;
877
878 mutex_lock(&client->lock);
879 buffer = handle->buffer;
880 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800881 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700882 mutex_unlock(&buffer->lock);
883 mutex_unlock(&client->lock);
884}
Olav Hauganbd453a92012-07-05 14:21:34 -0700885EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700886
Olav Haugan41f85792012-02-08 15:28:05 -0800887int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700888 void *uaddr, unsigned long offset, unsigned long len,
889 unsigned int cmd)
890{
891 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700892 int ret = -EINVAL;
893
894 mutex_lock(&client->lock);
895 if (!ion_handle_validate(client, handle)) {
896 pr_err("%s: invalid handle passed to do_cache_op.\n",
897 __func__);
898 mutex_unlock(&client->lock);
899 return -EINVAL;
900 }
901 buffer = handle->buffer;
902 mutex_lock(&buffer->lock);
903
Laura Abbottcbaa6682011-10-19 12:14:14 -0700904 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700905 ret = 0;
906 goto out;
907 }
908
909 if (!handle->buffer->heap->ops->cache_op) {
910 pr_err("%s: cache_op is not implemented by this heap.\n",
911 __func__);
912 ret = -ENODEV;
913 goto out;
914 }
915
Laura Abbottabcb6f72011-10-04 16:26:49 -0700916
917 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
918 offset, len, cmd);
919
920out:
921 mutex_unlock(&buffer->lock);
922 mutex_unlock(&client->lock);
923 return ret;
924
925}
Olav Hauganbd453a92012-07-05 14:21:34 -0700926EXPORT_SYMBOL(ion_do_cache_op);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700927
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700928static int ion_debug_client_show(struct seq_file *s, void *unused)
929{
930 struct ion_client *client = s->private;
931 struct rb_node *n;
Olav Haugan854c9e12012-05-16 16:34:28 -0700932 struct rb_node *n2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700933
Olav Haugan854c9e12012-05-16 16:34:28 -0700934 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
935 "heap_name", "size_in_bytes", "handle refcount",
936 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700937
938 mutex_lock(&client->lock);
939 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
940 struct ion_handle *handle = rb_entry(n, struct ion_handle,
941 node);
942 enum ion_heap_type type = handle->buffer->heap->type;
943
Olav Haugan854c9e12012-05-16 16:34:28 -0700944 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700945 handle->buffer->heap->name,
946 handle->buffer->size,
947 atomic_read(&handle->ref.refcount),
948 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700949
950 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
951 type == ION_HEAP_TYPE_CARVEOUT ||
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700952 type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
Laura Abbott1135c9e2013-03-13 15:33:40 -0700953 seq_printf(s, " : %12pa", &handle->buffer->priv_phys);
Olav Haugan854c9e12012-05-16 16:34:28 -0700954 else
955 seq_printf(s, " : %12s", "N/A");
956
957 for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
958 n2 = rb_next(n2)) {
959 struct ion_iommu_map *imap =
960 rb_entry(n2, struct ion_iommu_map, node);
961 seq_printf(s, " : [%d,%d] - %8lx",
962 imap->domain_info[DI_DOMAIN_NUM],
963 imap->domain_info[DI_PARTITION_NUM],
964 imap->iova_addr);
965 }
966 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700967 }
968 mutex_unlock(&client->lock);
969
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700970 return 0;
971}
972
973static int ion_debug_client_open(struct inode *inode, struct file *file)
974{
975 return single_open(file, ion_debug_client_show, inode->i_private);
976}
977
978static const struct file_operations debug_client_fops = {
979 .open = ion_debug_client_open,
980 .read = seq_read,
981 .llseek = seq_lseek,
982 .release = single_release,
983};
984
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700985struct ion_client *ion_client_create(struct ion_device *dev,
986 unsigned int heap_mask,
987 const char *name)
988{
989 struct ion_client *client;
990 struct task_struct *task;
991 struct rb_node **p;
992 struct rb_node *parent = NULL;
993 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700994 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700995 unsigned int name_len;
996
997 if (!name) {
998 pr_err("%s: Name cannot be null\n", __func__);
999 return ERR_PTR(-EINVAL);
1000 }
1001 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001002
1003 get_task_struct(current->group_leader);
1004 task_lock(current->group_leader);
1005 pid = task_pid_nr(current->group_leader);
1006 /* don't bother to store task struct for kernel threads,
1007 they can't be killed anyway */
1008 if (current->group_leader->flags & PF_KTHREAD) {
1009 put_task_struct(current->group_leader);
1010 task = NULL;
1011 } else {
1012 task = current->group_leader;
1013 }
1014 task_unlock(current->group_leader);
1015
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001016 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1017 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001018 if (task)
1019 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001020 return ERR_PTR(-ENOMEM);
1021 }
1022
1023 client->dev = dev;
1024 client->handles = RB_ROOT;
1025 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001026
Olav Haugan6625c7d12012-01-24 13:50:43 -08001027 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001028 if (!client->name) {
1029 put_task_struct(current->group_leader);
1030 kfree(client);
1031 return ERR_PTR(-ENOMEM);
1032 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -08001033 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001034 }
1035
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001036 client->heap_mask = heap_mask;
1037 client->task = task;
1038 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001039
1040 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001041 p = &dev->clients.rb_node;
1042 while (*p) {
1043 parent = *p;
1044 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001045
Laura Abbottb14ed962012-01-30 14:18:08 -08001046 if (client < entry)
1047 p = &(*p)->rb_left;
1048 else if (client > entry)
1049 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001050 }
Laura Abbottb14ed962012-01-30 14:18:08 -08001051 rb_link_node(&client->node, parent, p);
1052 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001053
Laura Abbotteed86032011-12-05 15:32:36 -08001054
1055 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001056 dev->debug_root, client,
1057 &debug_client_fops);
1058 mutex_unlock(&dev->lock);
1059
1060 return client;
1061}
1062
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001063/**
1064 * ion_mark_dangling_buffers_locked() - Mark dangling buffers
1065 * @dev: the ion device whose buffers will be searched
1066 *
1067 * Sets marked=1 for all known buffers associated with `dev' that no
1068 * longer have a handle pointing to them. dev->lock should be held
1069 * across a call to this function (and should only be unlocked after
1070 * checking for marked buffers).
1071 */
1072static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
1073{
1074 struct rb_node *n, *n2;
1075 /* mark all buffers as 1 */
1076 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1077 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1078 node);
1079
1080 buf->marked = 1;
1081 }
1082
1083 /* now see which buffers we can access */
1084 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1085 struct ion_client *client = rb_entry(n, struct ion_client,
1086 node);
1087
1088 mutex_lock(&client->lock);
1089 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1090 struct ion_handle *handle
1091 = rb_entry(n2, struct ion_handle, node);
1092
1093 handle->buffer->marked = 0;
1094
1095 }
1096 mutex_unlock(&client->lock);
1097
1098 }
1099}
1100
1101#ifdef CONFIG_ION_LEAK_CHECK
1102static u32 ion_debug_check_leaks_on_destroy;
1103
1104static int ion_check_for_and_print_leaks(struct ion_device *dev)
1105{
1106 struct rb_node *n;
1107 int num_leaks = 0;
1108
1109 if (!ion_debug_check_leaks_on_destroy)
1110 return 0;
1111
1112 /* check for leaked buffers (those that no longer have a
1113 * handle pointing to them) */
1114 ion_mark_dangling_buffers_locked(dev);
1115
1116 /* Anyone still marked as a 1 means a leaked handle somewhere */
1117 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1118 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1119 node);
1120
1121 if (buf->marked == 1) {
1122 pr_info("Leaked ion buffer at %p\n", buf);
1123 num_leaks++;
1124 }
1125 }
1126 return num_leaks;
1127}
1128static void setup_ion_leak_check(struct dentry *debug_root)
1129{
1130 debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
1131 &ion_debug_check_leaks_on_destroy);
1132}
1133#else
1134static int ion_check_for_and_print_leaks(struct ion_device *dev)
1135{
1136 return 0;
1137}
1138static void setup_ion_leak_check(struct dentry *debug_root)
1139{
1140}
1141#endif
1142
Laura Abbottb14ed962012-01-30 14:18:08 -08001143void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001144{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001145 struct ion_device *dev = client->dev;
1146 struct rb_node *n;
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001147 int num_leaks;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001148
1149 pr_debug("%s: %d\n", __func__, __LINE__);
1150 while ((n = rb_first(&client->handles))) {
1151 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1152 node);
1153 ion_handle_destroy(&handle->ref);
1154 }
1155 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001156 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001157 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -08001158 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001159 debugfs_remove_recursive(client->debug_root);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001160
1161 num_leaks = ion_check_for_and_print_leaks(dev);
1162
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001163 mutex_unlock(&dev->lock);
1164
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001165 if (num_leaks) {
1166 struct task_struct *current_task = current;
1167 char current_task_name[TASK_COMM_LEN];
1168 get_task_comm(current_task_name, current_task);
1169 WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
1170 __func__, num_leaks, num_leaks == 1 ? "" : "s");
1171 pr_info("task name at time of leak: %s, pid: %d\n",
1172 current_task_name, current_task->pid);
1173 }
1174
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001175 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001176 kfree(client);
1177}
Olav Hauganbd453a92012-07-05 14:21:34 -07001178EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001179
Laura Abbott273dd8e2011-10-12 14:26:33 -07001180int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1181 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001182{
1183 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001184
1185 mutex_lock(&client->lock);
1186 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -07001187 pr_err("%s: invalid handle passed to %s.\n",
1188 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001189 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001190 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001191 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001192 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001193 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001194 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001195 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001196 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001197
Laura Abbott273dd8e2011-10-12 14:26:33 -07001198 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001199}
Laura Abbott273dd8e2011-10-12 14:26:33 -07001200EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001201
Laura Abbott8c017362011-09-22 20:59:12 -07001202int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1203 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001204{
Laura Abbott8c017362011-09-22 20:59:12 -07001205 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001206
Laura Abbott8c017362011-09-22 20:59:12 -07001207 mutex_lock(&client->lock);
1208 if (!ion_handle_validate(client, handle)) {
1209 pr_err("%s: invalid handle passed to %s.\n",
1210 __func__, __func__);
1211 mutex_unlock(&client->lock);
1212 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001213 }
Laura Abbott8c017362011-09-22 20:59:12 -07001214 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001215 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001216 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001217 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001218 mutex_unlock(&client->lock);
1219
1220 return 0;
1221}
1222EXPORT_SYMBOL(ion_handle_get_size);
1223
Laura Abbottb14ed962012-01-30 14:18:08 -08001224struct sg_table *ion_sg_table(struct ion_client *client,
1225 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001226{
Laura Abbottb14ed962012-01-30 14:18:08 -08001227 struct ion_buffer *buffer;
1228 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001229
Laura Abbottb14ed962012-01-30 14:18:08 -08001230 mutex_lock(&client->lock);
1231 if (!ion_handle_validate(client, handle)) {
1232 pr_err("%s: invalid handle passed to map_dma.\n",
1233 __func__);
1234 mutex_unlock(&client->lock);
1235 return ERR_PTR(-EINVAL);
1236 }
1237 buffer = handle->buffer;
1238 table = buffer->sg_table;
1239 mutex_unlock(&client->lock);
1240 return table;
1241}
Olav Hauganbd453a92012-07-05 14:21:34 -07001242EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -08001243
Mitchel Humpherys0432d692013-01-08 17:03:10 -08001244struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
1245 size_t chunk_size, size_t total_size)
1246{
1247 struct sg_table *table;
1248 int i, n_chunks, ret;
1249 struct scatterlist *sg;
1250
1251 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1252 if (!table)
1253 return ERR_PTR(-ENOMEM);
1254
1255 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
1256 pr_debug("creating sg_table with %d chunks\n", n_chunks);
1257
1258 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
1259 if (ret)
1260 goto err0;
1261
1262 for_each_sg(table->sgl, sg, table->nents, i) {
1263 dma_addr_t addr = buffer_base + i * chunk_size;
1264 sg_dma_address(sg) = addr;
1265 }
1266
1267 return table;
1268err0:
1269 kfree(table);
1270 return ERR_PTR(ret);
1271}
1272
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001273static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1274 struct device *dev,
1275 enum dma_data_direction direction);
1276
Laura Abbottb14ed962012-01-30 14:18:08 -08001277static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1278 enum dma_data_direction direction)
1279{
1280 struct dma_buf *dmabuf = attachment->dmabuf;
1281 struct ion_buffer *buffer = dmabuf->priv;
1282
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001283 if (buffer->flags & ION_FLAG_CACHED)
1284 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbottb14ed962012-01-30 14:18:08 -08001285 return buffer->sg_table;
1286}
1287
1288static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1289 struct sg_table *table,
1290 enum dma_data_direction direction)
1291{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001292}
1293
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001294static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
1295{
1296 unsigned long pages = buffer->sg_table->nents;
1297 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
1298
1299 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
1300 if (!buffer->dirty)
1301 return -ENOMEM;
1302 return 0;
1303}
1304
1305struct ion_vma_list {
1306 struct list_head list;
1307 struct vm_area_struct *vma;
1308};
1309
1310static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1311 struct device *dev,
1312 enum dma_data_direction dir)
1313{
1314 struct scatterlist *sg;
1315 int i;
1316 struct ion_vma_list *vma_list;
1317
1318 pr_debug("%s: syncing for device %s\n", __func__,
1319 dev ? dev_name(dev) : "null");
1320 mutex_lock(&buffer->lock);
1321 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1322 if (!test_bit(i, buffer->dirty))
1323 continue;
1324 dma_sync_sg_for_device(dev, sg, 1, dir);
1325 clear_bit(i, buffer->dirty);
1326 }
1327 list_for_each_entry(vma_list, &buffer->vmas, list) {
1328 struct vm_area_struct *vma = vma_list->vma;
1329
1330 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1331 NULL);
1332 }
1333 mutex_unlock(&buffer->lock);
1334}
1335
1336int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001337{
Laura Abbottb14ed962012-01-30 14:18:08 -08001338 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001339 struct scatterlist *sg;
1340 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001341
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001342 mutex_lock(&buffer->lock);
1343 set_bit(vmf->pgoff, buffer->dirty);
1344
1345 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1346 if (i != vmf->pgoff)
1347 continue;
1348 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
1349 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
1350 sg_page(sg));
1351 break;
1352 }
1353 mutex_unlock(&buffer->lock);
1354 return VM_FAULT_NOPAGE;
1355}
1356
1357static void ion_vm_open(struct vm_area_struct *vma)
1358{
1359 struct ion_buffer *buffer = vma->vm_private_data;
1360 struct ion_vma_list *vma_list;
1361
1362 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1363 if (!vma_list)
1364 return;
1365 vma_list->vma = vma;
1366 mutex_lock(&buffer->lock);
1367 list_add(&vma_list->list, &buffer->vmas);
1368 mutex_unlock(&buffer->lock);
1369 pr_debug("%s: adding %p\n", __func__, vma);
1370}
1371
1372static void ion_vm_close(struct vm_area_struct *vma)
1373{
1374 struct ion_buffer *buffer = vma->vm_private_data;
1375 struct ion_vma_list *vma_list, *tmp;
1376
1377 pr_debug("%s\n", __func__);
1378 mutex_lock(&buffer->lock);
1379 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1380 if (vma_list->vma != vma)
1381 continue;
1382 list_del(&vma_list->list);
1383 kfree(vma_list);
1384 pr_debug("%s: deleting %p\n", __func__, vma);
1385 break;
1386 }
1387 mutex_unlock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001388
Laura Abbotta6835092011-11-14 15:27:02 -08001389 if (buffer->heap->ops->unmap_user)
1390 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001391}
1392
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001393struct vm_operations_struct ion_vma_ops = {
1394 .open = ion_vm_open,
1395 .close = ion_vm_close,
1396 .fault = ion_vm_fault,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001397};
1398
Laura Abbottb14ed962012-01-30 14:18:08 -08001399static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001400{
Laura Abbottb14ed962012-01-30 14:18:08 -08001401 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001402 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001403
Laura Abbottb14ed962012-01-30 14:18:08 -08001404 if (!buffer->heap->ops->map_user) {
1405 pr_err("%s: this heap does not define a method for mapping "
1406 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001407 return -EINVAL;
1408 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001409
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001410 if (buffer->flags & ION_FLAG_CACHED) {
1411 vma->vm_private_data = buffer;
1412 vma->vm_ops = &ion_vma_ops;
Mitchel Humpherys7ce0fe42013-01-10 11:30:26 -08001413 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001414 ion_vm_open(vma);
1415 } else {
1416 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1417 mutex_lock(&buffer->lock);
1418 /* now map it to userspace */
1419 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1420 mutex_unlock(&buffer->lock);
1421 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001422
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001423 if (ret)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001424 pr_err("%s: failure mapping buffer to userspace\n",
1425 __func__);
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001426
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001427 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001428}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001429
Laura Abbottb14ed962012-01-30 14:18:08 -08001430static void ion_dma_buf_release(struct dma_buf *dmabuf)
1431{
1432 struct ion_buffer *buffer = dmabuf->priv;
1433 ion_buffer_put(buffer);
1434}
1435
1436static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1437{
1438 struct ion_buffer *buffer = dmabuf->priv;
1439 return buffer->vaddr + offset;
1440}
1441
1442static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1443 void *ptr)
1444{
1445 return;
1446}
1447
1448static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1449 size_t len,
1450 enum dma_data_direction direction)
1451{
1452 struct ion_buffer *buffer = dmabuf->priv;
1453 void *vaddr;
1454
1455 if (!buffer->heap->ops->map_kernel) {
1456 pr_err("%s: map kernel is not implemented by this heap.\n",
1457 __func__);
1458 return -ENODEV;
1459 }
1460
1461 mutex_lock(&buffer->lock);
1462 vaddr = ion_buffer_kmap_get(buffer);
1463 mutex_unlock(&buffer->lock);
1464 if (IS_ERR(vaddr))
1465 return PTR_ERR(vaddr);
1466 if (!vaddr)
1467 return -ENOMEM;
1468 return 0;
1469}
1470
1471static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1472 size_t len,
1473 enum dma_data_direction direction)
1474{
1475 struct ion_buffer *buffer = dmabuf->priv;
1476
1477 mutex_lock(&buffer->lock);
1478 ion_buffer_kmap_put(buffer);
1479 mutex_unlock(&buffer->lock);
1480}
1481
1482struct dma_buf_ops dma_buf_ops = {
1483 .map_dma_buf = ion_map_dma_buf,
1484 .unmap_dma_buf = ion_unmap_dma_buf,
1485 .mmap = ion_mmap,
1486 .release = ion_dma_buf_release,
1487 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1488 .end_cpu_access = ion_dma_buf_end_cpu_access,
1489 .kmap_atomic = ion_dma_buf_kmap,
1490 .kunmap_atomic = ion_dma_buf_kunmap,
1491 .kmap = ion_dma_buf_kmap,
1492 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001493};
1494
Laura Abbottb14ed962012-01-30 14:18:08 -08001495int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1496{
1497 struct ion_buffer *buffer;
1498 struct dma_buf *dmabuf;
1499 bool valid_handle;
1500 int fd;
1501
1502 mutex_lock(&client->lock);
1503 valid_handle = ion_handle_validate(client, handle);
1504 mutex_unlock(&client->lock);
1505 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001506 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001507 return -EINVAL;
1508 }
1509
1510 buffer = handle->buffer;
1511 ion_buffer_get(buffer);
1512 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1513 if (IS_ERR(dmabuf)) {
1514 ion_buffer_put(buffer);
1515 return PTR_ERR(dmabuf);
1516 }
1517 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001518 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001519 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001520
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001521 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001522}
Olav Hauganbd453a92012-07-05 14:21:34 -07001523EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001524
Laura Abbottb14ed962012-01-30 14:18:08 -08001525struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1526{
1527 struct dma_buf *dmabuf;
1528 struct ion_buffer *buffer;
1529 struct ion_handle *handle;
1530
1531 dmabuf = dma_buf_get(fd);
1532 if (IS_ERR_OR_NULL(dmabuf))
1533 return ERR_PTR(PTR_ERR(dmabuf));
1534 /* if this memory came from ion */
1535
1536 if (dmabuf->ops != &dma_buf_ops) {
1537 pr_err("%s: can not import dmabuf from another exporter\n",
1538 __func__);
1539 dma_buf_put(dmabuf);
1540 return ERR_PTR(-EINVAL);
1541 }
1542 buffer = dmabuf->priv;
1543
1544 mutex_lock(&client->lock);
1545 /* if a handle exists for this buffer just take a reference to it */
1546 handle = ion_handle_lookup(client, buffer);
1547 if (!IS_ERR_OR_NULL(handle)) {
1548 ion_handle_get(handle);
1549 goto end;
1550 }
1551 handle = ion_handle_create(client, buffer);
1552 if (IS_ERR_OR_NULL(handle))
1553 goto end;
1554 ion_handle_add(client, handle);
1555end:
1556 mutex_unlock(&client->lock);
1557 dma_buf_put(dmabuf);
1558 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001559}
Olav Hauganbd453a92012-07-05 14:21:34 -07001560EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001561
1562static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1563{
1564 struct ion_client *client = filp->private_data;
1565
1566 switch (cmd) {
1567 case ION_IOC_ALLOC:
1568 {
1569 struct ion_allocation_data data;
1570
1571 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1572 return -EFAULT;
1573 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh7d72bad2012-08-29 18:39:44 -07001574 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001575
Laura Abbottb14ed962012-01-30 14:18:08 -08001576 if (IS_ERR(data.handle))
1577 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001578
Laura Abbottb14ed962012-01-30 14:18:08 -08001579 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1580 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001581 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001582 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001583 break;
1584 }
1585 case ION_IOC_FREE:
1586 {
1587 struct ion_handle_data data;
1588 bool valid;
1589
1590 if (copy_from_user(&data, (void __user *)arg,
1591 sizeof(struct ion_handle_data)))
1592 return -EFAULT;
1593 mutex_lock(&client->lock);
1594 valid = ion_handle_validate(client, data.handle);
1595 mutex_unlock(&client->lock);
1596 if (!valid)
1597 return -EINVAL;
1598 ion_free(client, data.handle);
1599 break;
1600 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001601 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001602 case ION_IOC_SHARE:
1603 {
1604 struct ion_fd_data data;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001605 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1606 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001607
Laura Abbottb14ed962012-01-30 14:18:08 -08001608 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001609 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1610 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001611 if (data.fd < 0)
1612 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001613 break;
1614 }
1615 case ION_IOC_IMPORT:
1616 {
1617 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001618 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001619 if (copy_from_user(&data, (void __user *)arg,
1620 sizeof(struct ion_fd_data)))
1621 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001622 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan865e97f2012-05-15 14:40:11 -07001623 if (IS_ERR(data.handle)) {
1624 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001625 data.handle = NULL;
Olav Haugan865e97f2012-05-15 14:40:11 -07001626 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001627 if (copy_to_user((void __user *)arg, &data,
1628 sizeof(struct ion_fd_data)))
1629 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001630 if (ret < 0)
1631 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001632 break;
1633 }
1634 case ION_IOC_CUSTOM:
1635 {
1636 struct ion_device *dev = client->dev;
1637 struct ion_custom_data data;
1638
1639 if (!dev->custom_ioctl)
1640 return -ENOTTY;
1641 if (copy_from_user(&data, (void __user *)arg,
1642 sizeof(struct ion_custom_data)))
1643 return -EFAULT;
1644 return dev->custom_ioctl(client, data.cmd, data.arg);
1645 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001646 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001647 return client->dev->custom_ioctl(client,
1648 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001649 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001650 return client->dev->custom_ioctl(client,
1651 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001652 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001653 return client->dev->custom_ioctl(client,
1654 ION_IOC_CLEAN_INV_CACHES, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001655 default:
1656 return -ENOTTY;
1657 }
1658 return 0;
1659}
1660
1661static int ion_release(struct inode *inode, struct file *file)
1662{
1663 struct ion_client *client = file->private_data;
1664
1665 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001666 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001667 return 0;
1668}
1669
1670static int ion_open(struct inode *inode, struct file *file)
1671{
1672 struct miscdevice *miscdev = file->private_data;
1673 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1674 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001675 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001676
1677 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001678 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1679 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001680 if (IS_ERR_OR_NULL(client))
1681 return PTR_ERR(client);
1682 file->private_data = client;
1683
1684 return 0;
1685}
1686
1687static const struct file_operations ion_fops = {
1688 .owner = THIS_MODULE,
1689 .open = ion_open,
1690 .release = ion_release,
1691 .unlocked_ioctl = ion_ioctl,
1692};
1693
1694static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001695 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001696{
1697 size_t size = 0;
1698 struct rb_node *n;
1699
1700 mutex_lock(&client->lock);
1701 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1702 struct ion_handle *handle = rb_entry(n,
1703 struct ion_handle,
1704 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001705 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001706 size += handle->buffer->size;
1707 }
1708 mutex_unlock(&client->lock);
1709 return size;
1710}
1711
Olav Haugan0671b9a2012-05-25 11:58:56 -07001712/**
1713 * Searches through a clients handles to find if the buffer is owned
1714 * by this client. Used for debug output.
1715 * @param client pointer to candidate owner of buffer
1716 * @param buf pointer to buffer that we are trying to find the owner of
1717 * @return 1 if found, 0 otherwise
1718 */
1719static int ion_debug_find_buffer_owner(const struct ion_client *client,
1720 const struct ion_buffer *buf)
1721{
1722 struct rb_node *n;
1723
1724 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1725 const struct ion_handle *handle = rb_entry(n,
1726 const struct ion_handle,
1727 node);
1728 if (handle->buffer == buf)
1729 return 1;
1730 }
1731 return 0;
1732}
1733
1734/**
1735 * Adds mem_map_data pointer to the tree of mem_map
1736 * Used for debug output.
1737 * @param mem_map The mem_map tree
1738 * @param data The new data to add to the tree
1739 */
1740static void ion_debug_mem_map_add(struct rb_root *mem_map,
1741 struct mem_map_data *data)
1742{
1743 struct rb_node **p = &mem_map->rb_node;
1744 struct rb_node *parent = NULL;
1745 struct mem_map_data *entry;
1746
1747 while (*p) {
1748 parent = *p;
1749 entry = rb_entry(parent, struct mem_map_data, node);
1750
1751 if (data->addr < entry->addr) {
1752 p = &(*p)->rb_left;
1753 } else if (data->addr > entry->addr) {
1754 p = &(*p)->rb_right;
1755 } else {
1756 pr_err("%s: mem_map_data already found.", __func__);
1757 BUG();
1758 }
1759 }
1760 rb_link_node(&data->node, parent, p);
1761 rb_insert_color(&data->node, mem_map);
1762}
1763
1764/**
1765 * Search for an owner of a buffer by iterating over all ION clients.
1766 * @param dev ion device containing pointers to all the clients.
1767 * @param buffer pointer to buffer we are trying to find the owner of.
1768 * @return name of owner.
1769 */
1770const char *ion_debug_locate_owner(const struct ion_device *dev,
1771 const struct ion_buffer *buffer)
1772{
1773 struct rb_node *j;
1774 const char *client_name = NULL;
1775
Laura Abbottb14ed962012-01-30 14:18:08 -08001776 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001777 j = rb_next(j)) {
1778 struct ion_client *client = rb_entry(j, struct ion_client,
1779 node);
1780 if (ion_debug_find_buffer_owner(client, buffer))
1781 client_name = client->name;
1782 }
1783 return client_name;
1784}
1785
1786/**
1787 * Create a mem_map of the heap.
1788 * @param s seq_file to log error message to.
1789 * @param heap The heap to create mem_map for.
1790 * @param mem_map The mem map to be created.
1791 */
1792void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1793 struct rb_root *mem_map)
1794{
1795 struct ion_device *dev = heap->dev;
1796 struct rb_node *n;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301797 size_t size;
1798
1799 if (!heap->ops->phys)
1800 return;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001801
1802 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1803 struct ion_buffer *buffer =
1804 rb_entry(n, struct ion_buffer, node);
1805 if (buffer->heap->id == heap->id) {
1806 struct mem_map_data *data =
1807 kzalloc(sizeof(*data), GFP_KERNEL);
1808 if (!data) {
1809 seq_printf(s, "ERROR: out of memory. "
1810 "Part of memory map will not be logged\n");
1811 break;
1812 }
Chintan Pandyadaf75622013-01-29 19:40:01 +05301813
1814 buffer->heap->ops->phys(buffer->heap, buffer,
1815 &(data->addr), &size);
1816 data->size = (unsigned long) size;
1817 data->addr_end = data->addr + data->size - 1;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001818 data->client_name = ion_debug_locate_owner(dev, buffer);
1819 ion_debug_mem_map_add(mem_map, data);
1820 }
1821 }
1822}
1823
1824/**
1825 * Free the memory allocated by ion_debug_mem_map_create
1826 * @param mem_map The mem map to free.
1827 */
1828static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1829{
1830 if (mem_map) {
1831 struct rb_node *n;
1832 while ((n = rb_first(mem_map)) != 0) {
1833 struct mem_map_data *data =
1834 rb_entry(n, struct mem_map_data, node);
1835 rb_erase(&data->node, mem_map);
1836 kfree(data);
1837 }
1838 }
1839}
1840
1841/**
1842 * Print heap debug information.
1843 * @param s seq_file to log message to.
1844 * @param heap pointer to heap that we will print debug information for.
1845 */
1846static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1847{
1848 if (heap->ops->print_debug) {
1849 struct rb_root mem_map = RB_ROOT;
1850 ion_debug_mem_map_create(s, heap, &mem_map);
1851 heap->ops->print_debug(heap, s, &mem_map);
1852 ion_debug_mem_map_destroy(&mem_map);
1853 }
1854}
1855
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001856static int ion_debug_heap_show(struct seq_file *s, void *unused)
1857{
1858 struct ion_heap *heap = s->private;
1859 struct ion_device *dev = heap->dev;
1860 struct rb_node *n;
1861
Olav Haugane4900b52012-05-25 11:58:03 -07001862 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001863 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001864
Laura Abbottb14ed962012-01-30 14:18:08 -08001865 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001866 struct ion_client *client = rb_entry(n, struct ion_client,
1867 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001868 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001869 if (!size)
1870 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001871 if (client->task) {
1872 char task_comm[TASK_COMM_LEN];
1873
1874 get_task_comm(task_comm, client->task);
1875 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1876 client->pid, size);
1877 } else {
1878 seq_printf(s, "%16.s %16u %16u\n", client->name,
1879 client->pid, size);
1880 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001881 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001882 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001883 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001884 return 0;
1885}
1886
1887static int ion_debug_heap_open(struct inode *inode, struct file *file)
1888{
1889 return single_open(file, ion_debug_heap_show, inode->i_private);
1890}
1891
1892static const struct file_operations debug_heap_fops = {
1893 .open = ion_debug_heap_open,
1894 .read = seq_read,
1895 .llseek = seq_lseek,
1896 .release = single_release,
1897};
1898
1899void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1900{
1901 struct rb_node **p = &dev->heaps.rb_node;
1902 struct rb_node *parent = NULL;
1903 struct ion_heap *entry;
1904
Laura Abbottb14ed962012-01-30 14:18:08 -08001905 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1906 !heap->ops->unmap_dma)
1907 pr_err("%s: can not add heap with invalid ops struct.\n",
1908 __func__);
1909
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001910 heap->dev = dev;
1911 mutex_lock(&dev->lock);
1912 while (*p) {
1913 parent = *p;
1914 entry = rb_entry(parent, struct ion_heap, node);
1915
1916 if (heap->id < entry->id) {
1917 p = &(*p)->rb_left;
1918 } else if (heap->id > entry->id ) {
1919 p = &(*p)->rb_right;
1920 } else {
1921 pr_err("%s: can not insert multiple heaps with "
1922 "id %d\n", __func__, heap->id);
1923 goto end;
1924 }
1925 }
1926
1927 rb_link_node(&heap->node, parent, p);
1928 rb_insert_color(&heap->node, &dev->heaps);
1929 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1930 &debug_heap_fops);
1931end:
1932 mutex_unlock(&dev->lock);
1933}
1934
Laura Abbott93619302012-10-11 11:51:40 -07001935int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
1936 int version, void *data, int flags)
1937{
1938 int ret = -EINVAL;
1939 struct ion_heap *heap;
1940 struct ion_buffer *buffer;
1941
1942 mutex_lock(&client->lock);
1943 if (!ion_handle_validate(client, handle)) {
1944 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1945 goto out_unlock;
1946 }
1947
1948 buffer = handle->buffer;
1949 heap = buffer->heap;
1950
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001951 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001952 pr_err("%s: cannot secure buffer from non secure heap\n",
1953 __func__);
1954 goto out_unlock;
1955 }
1956
1957 BUG_ON(!buffer->heap->ops->secure_buffer);
1958 /*
1959 * Protect the handle via the client lock to ensure we aren't
1960 * racing with free
1961 */
1962 ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
1963
1964out_unlock:
1965 mutex_unlock(&client->lock);
1966 return ret;
1967}
1968
1969int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
1970{
1971 int ret = -EINVAL;
1972 struct ion_heap *heap;
1973 struct ion_buffer *buffer;
1974
1975 mutex_lock(&client->lock);
1976 if (!ion_handle_validate(client, handle)) {
1977 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1978 goto out_unlock;
1979 }
1980
1981 buffer = handle->buffer;
1982 heap = buffer->heap;
1983
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001984 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001985 pr_err("%s: cannot secure buffer from non secure heap\n",
1986 __func__);
1987 goto out_unlock;
1988 }
1989
1990 BUG_ON(!buffer->heap->ops->unsecure_buffer);
1991 /*
1992 * Protect the handle via the client lock to ensure we aren't
1993 * racing with free
1994 */
1995 ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
1996
1997out_unlock:
1998 mutex_unlock(&client->lock);
1999 return ret;
2000}
2001
Laura Abbott7e446482012-06-13 15:59:39 -07002002int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
2003 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08002004{
2005 struct rb_node *n;
2006 int ret_val = 0;
2007
2008 /*
2009 * traverse the list of heaps available in this system
2010 * and find the heap that is specified.
2011 */
2012 mutex_lock(&dev->lock);
2013 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
2014 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002015 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08002016 continue;
2017 if (ION_HEAP(heap->id) != heap_id)
2018 continue;
2019 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07002020 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08002021 else
2022 ret_val = -EINVAL;
2023 break;
2024 }
2025 mutex_unlock(&dev->lock);
2026 return ret_val;
2027}
Olav Hauganbd453a92012-07-05 14:21:34 -07002028EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08002029
Laura Abbott7e446482012-06-13 15:59:39 -07002030int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
2031 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08002032{
2033 struct rb_node *n;
2034 int ret_val = 0;
2035
2036 /*
2037 * traverse the list of heaps available in this system
2038 * and find the heap that is specified.
2039 */
2040 mutex_lock(&dev->lock);
2041 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
2042 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002043 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08002044 continue;
2045 if (ION_HEAP(heap->id) != heap_id)
2046 continue;
2047 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07002048 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08002049 else
2050 ret_val = -EINVAL;
2051 break;
2052 }
2053 mutex_unlock(&dev->lock);
2054 return ret_val;
2055}
Olav Hauganbd453a92012-07-05 14:21:34 -07002056EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08002057
Laura Abbott404f8242011-10-31 14:22:53 -07002058static int ion_debug_leak_show(struct seq_file *s, void *unused)
2059{
2060 struct ion_device *dev = s->private;
2061 struct rb_node *n;
Laura Abbott404f8242011-10-31 14:22:53 -07002062
Laura Abbott404f8242011-10-31 14:22:53 -07002063 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
2064 "ref cnt");
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002065
Laura Abbott404f8242011-10-31 14:22:53 -07002066 mutex_lock(&dev->lock);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002067 ion_mark_dangling_buffers_locked(dev);
Laura Abbott404f8242011-10-31 14:22:53 -07002068
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002069 /* Anyone still marked as a 1 means a leaked handle somewhere */
Laura Abbott404f8242011-10-31 14:22:53 -07002070 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
2071 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
2072 node);
2073
2074 if (buf->marked == 1)
2075 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
2076 (int)buf, buf->heap->name, buf->size,
2077 atomic_read(&buf->ref.refcount));
2078 }
2079 mutex_unlock(&dev->lock);
2080 return 0;
2081}
2082
2083static int ion_debug_leak_open(struct inode *inode, struct file *file)
2084{
2085 return single_open(file, ion_debug_leak_show, inode->i_private);
2086}
2087
2088static const struct file_operations debug_leak_fops = {
2089 .open = ion_debug_leak_open,
2090 .read = seq_read,
2091 .llseek = seq_lseek,
2092 .release = single_release,
2093};
2094
2095
2096
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07002097struct ion_device *ion_device_create(long (*custom_ioctl)
2098 (struct ion_client *client,
2099 unsigned int cmd,
2100 unsigned long arg))
2101{
2102 struct ion_device *idev;
2103 int ret;
2104
2105 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
2106 if (!idev)
2107 return ERR_PTR(-ENOMEM);
2108
2109 idev->dev.minor = MISC_DYNAMIC_MINOR;
2110 idev->dev.name = "ion";
2111 idev->dev.fops = &ion_fops;
2112 idev->dev.parent = NULL;
2113 ret = misc_register(&idev->dev);
2114 if (ret) {
2115 pr_err("ion: failed to register misc device.\n");
2116 return ERR_PTR(ret);
2117 }
2118
2119 idev->debug_root = debugfs_create_dir("ion", NULL);
2120 if (IS_ERR_OR_NULL(idev->debug_root))
2121 pr_err("ion: failed to create debug files.\n");
2122
2123 idev->custom_ioctl = custom_ioctl;
2124 idev->buffers = RB_ROOT;
2125 mutex_init(&idev->lock);
2126 idev->heaps = RB_ROOT;
Laura Abbottb14ed962012-01-30 14:18:08 -08002127 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07002128 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
2129 &debug_leak_fops);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002130
2131 setup_ion_leak_check(idev->debug_root);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07002132 return idev;
2133}
2134
2135void ion_device_destroy(struct ion_device *dev)
2136{
2137 misc_deregister(&dev->dev);
2138 /* XXX need to free the heaps and clients ? */
2139 kfree(dev);
2140}
Laura Abbottb14ed962012-01-30 14:18:08 -08002141
2142void __init ion_reserve(struct ion_platform_data *data)
2143{
2144 int i, ret;
2145
2146 for (i = 0; i < data->nr; i++) {
2147 if (data->heaps[i].size == 0)
2148 continue;
2149 ret = memblock_reserve(data->heaps[i].base,
2150 data->heaps[i].size);
2151 if (ret)
Laura Abbott1135c9e2013-03-13 15:33:40 -07002152 pr_err("memblock reserve of %x@%pa failed\n",
Laura Abbottb14ed962012-01-30 14:18:08 -08002153 data->heaps[i].size,
Laura Abbott1135c9e2013-03-13 15:33:40 -07002154 &data->heaps[i].base);
Laura Abbottb14ed962012-01-30 14:18:08 -08002155 }
2156}