blob: ce25bfd7de0b2268a484687f3c239b0ea2e3c260 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Liam Markcc2d4bd2013-01-16 10:14:40 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070036#include <linux/msm_ion.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080037#include <trace/events/kmem.h>
38
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070039
Laura Abbott8c017362011-09-22 20:59:12 -070040#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070041#include "ion_priv.h"
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070042
43/**
44 * struct ion_device - the metadata of the ion device node
45 * @dev: the actual misc device
46 * @buffers: an rb tree of all the existing buffers
47 * @lock: lock protecting the buffers & heaps trees
48 * @heaps: list of all the heaps in the system
49 * @user_clients: list of all the clients created from userspace
50 */
51struct ion_device {
52 struct miscdevice dev;
53 struct rb_root buffers;
54 struct mutex lock;
55 struct rb_root heaps;
56 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
57 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080058 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070059 struct dentry *debug_root;
60};
61
62/**
63 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070064 * @node: node in the tree of all clients
65 * @dev: backpointer to ion device
66 * @handles: an rb tree of all the handles in this client
67 * @lock: lock protecting the tree of handles
68 * @heap_mask: mask of all supported heaps
69 * @name: used for debugging
70 * @task: used for debugging
71 *
72 * A client represents a list of buffers this client may access.
73 * The mutex stored here is used to protect both handles tree
74 * as well as the handles themselves, and should be held while modifying either.
75 */
76struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070077 struct rb_node node;
78 struct ion_device *dev;
79 struct rb_root handles;
80 struct mutex lock;
81 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080082 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070083 struct task_struct *task;
84 pid_t pid;
85 struct dentry *debug_root;
86};
87
88/**
89 * ion_handle - a client local reference to a buffer
90 * @ref: reference count
91 * @client: back pointer to the client the buffer resides in
92 * @buffer: pointer to the buffer
93 * @node: node in the client's handle rbtree
94 * @kmap_cnt: count of times this client has mapped to kernel
95 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070096 *
97 * Modifications to node, map_cnt or mapping should be protected by the
98 * lock in the client. Other fields are never changed after initialization.
99 */
100struct ion_handle {
101 struct kref ref;
102 struct ion_client *client;
103 struct ion_buffer *buffer;
104 struct rb_node node;
105 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700106 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700107};
108
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700109bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
110{
111 return ((buffer->flags & ION_FLAG_CACHED) &&
112 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
113}
114
Olav Hauganb3676592012-03-02 15:02:25 -0800115static void ion_iommu_release(struct kref *kref);
116
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700117/* this function should only be called while dev->lock is held */
118static void ion_buffer_add(struct ion_device *dev,
119 struct ion_buffer *buffer)
120{
121 struct rb_node **p = &dev->buffers.rb_node;
122 struct rb_node *parent = NULL;
123 struct ion_buffer *entry;
124
125 while (*p) {
126 parent = *p;
127 entry = rb_entry(parent, struct ion_buffer, node);
128
129 if (buffer < entry) {
130 p = &(*p)->rb_left;
131 } else if (buffer > entry) {
132 p = &(*p)->rb_right;
133 } else {
134 pr_err("%s: buffer already found.", __func__);
135 BUG();
136 }
137 }
138
139 rb_link_node(&buffer->node, parent, p);
140 rb_insert_color(&buffer->node, &dev->buffers);
141}
142
Olav Haugan0fa9b602012-01-25 11:50:38 -0800143static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700144 struct ion_iommu_map *iommu)
145{
146 struct rb_node **p = &buffer->iommu_maps.rb_node;
147 struct rb_node *parent = NULL;
148 struct ion_iommu_map *entry;
149
150 while (*p) {
151 parent = *p;
152 entry = rb_entry(parent, struct ion_iommu_map, node);
153
154 if (iommu->key < entry->key) {
155 p = &(*p)->rb_left;
156 } else if (iommu->key > entry->key) {
157 p = &(*p)->rb_right;
158 } else {
159 pr_err("%s: buffer %p already has mapping for domain %d"
160 " and partition %d\n", __func__,
161 buffer,
162 iommu_map_domain(iommu),
163 iommu_map_partition(iommu));
164 BUG();
165 }
166 }
167
168 rb_link_node(&iommu->node, parent, p);
169 rb_insert_color(&iommu->node, &buffer->iommu_maps);
170
171}
172
173static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
174 unsigned int domain_no,
175 unsigned int partition_no)
176{
177 struct rb_node **p = &buffer->iommu_maps.rb_node;
178 struct rb_node *parent = NULL;
179 struct ion_iommu_map *entry;
180 uint64_t key = domain_no;
181 key = key << 32 | partition_no;
182
183 while (*p) {
184 parent = *p;
185 entry = rb_entry(parent, struct ion_iommu_map, node);
186
187 if (key < entry->key)
188 p = &(*p)->rb_left;
189 else if (key > entry->key)
190 p = &(*p)->rb_right;
191 else
192 return entry;
193 }
194
195 return NULL;
196}
197
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700198static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
199
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700200/* this function should only be called while dev->lock is held */
201static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
202 struct ion_device *dev,
203 unsigned long len,
204 unsigned long align,
205 unsigned long flags)
206{
207 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800208 struct sg_table *table;
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700209 struct scatterlist *sg;
210 int i, ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700211
212 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
213 if (!buffer)
214 return ERR_PTR(-ENOMEM);
215
216 buffer->heap = heap;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700217 buffer->flags = flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700218 kref_init(&buffer->ref);
219
220 ret = heap->ops->allocate(heap, buffer, len, align, flags);
221 if (ret) {
222 kfree(buffer);
223 return ERR_PTR(ret);
224 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800225
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700226 buffer->dev = dev;
227 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800228
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700229 table = heap->ops->map_dma(heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800230 if (IS_ERR_OR_NULL(table)) {
231 heap->ops->free(buffer);
232 kfree(buffer);
233 return ERR_PTR(PTR_ERR(table));
234 }
235 buffer->sg_table = table;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700236 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700237 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
238 i) {
239 if (sg_dma_len(sg) == PAGE_SIZE)
240 continue;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700241 pr_err("%s: cached mappings that will be faulted in "
242 "must have pagewise sg_lists\n", __func__);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700243 ret = -EINVAL;
244 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700245 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800246
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700247 ret = ion_buffer_alloc_dirty(buffer);
248 if (ret)
249 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700250 }
251
252 buffer->dev = dev;
253 buffer->size = len;
254 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700255 mutex_init(&buffer->lock);
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700256 /* this will set up dma addresses for the sglist -- it is not
257 technically correct as per the dma api -- a specific
258 device isn't really taking ownership here. However, in practice on
259 our systems the only dma_address space is physical addresses.
260 Additionally, we can't afford the overhead of invalidating every
261 allocation via dma_map_sg. The implicit contract here is that
262 memory comming from the heaps is ready for dma, ie if it has a
263 cached mapping that mapping has been invalidated */
Laura Abbott6c13b9822013-03-29 18:29:03 -0700264 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
265 if (sg_dma_address(sg) == 0)
266 sg_dma_address(sg) = sg_phys(sg);
267 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700268 ion_buffer_add(dev, buffer);
269 return buffer;
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700270
271err:
272 heap->ops->unmap_dma(heap, buffer);
273 heap->ops->free(buffer);
274 kfree(buffer);
275 return ERR_PTR(ret);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700276}
277
Olav Hauganb3676592012-03-02 15:02:25 -0800278/**
279 * Check for delayed IOMMU unmapping. Also unmap any outstanding
280 * mappings which would otherwise have been leaked.
281 */
282static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
283{
284 struct ion_iommu_map *iommu_map;
285 struct rb_node *node;
286 const struct rb_root *rb = &(buffer->iommu_maps);
287 unsigned long ref_count;
288 unsigned int delayed_unmap;
289
290 mutex_lock(&buffer->lock);
291
292 while ((node = rb_first(rb)) != 0) {
293 iommu_map = rb_entry(node, struct ion_iommu_map, node);
294 ref_count = atomic_read(&iommu_map->ref.refcount);
295 delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
296
297 if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
298 pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
299 __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
300 iommu_map->domain_info[DI_PARTITION_NUM]);
301 }
302 /* set ref count to 1 to force release */
303 kref_init(&iommu_map->ref);
304 kref_put(&iommu_map->ref, ion_iommu_release);
305 }
306
307 mutex_unlock(&buffer->lock);
308}
309
Laura Abbott93619302012-10-11 11:51:40 -0700310static void ion_delayed_unsecure(struct ion_buffer *buffer)
311{
312 if (buffer->heap->ops->unsecure_buffer)
313 buffer->heap->ops->unsecure_buffer(buffer, 1);
314}
315
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700316static void ion_buffer_destroy(struct kref *kref)
317{
318 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
319 struct ion_device *dev = buffer->dev;
320
Laura Abbottb14ed962012-01-30 14:18:08 -0800321 if (WARN_ON(buffer->kmap_cnt > 0))
322 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800323 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
324
Laura Abbott93619302012-10-11 11:51:40 -0700325 ion_delayed_unsecure(buffer);
Olav Hauganb3676592012-03-02 15:02:25 -0800326 ion_iommu_delayed_unmap(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700327 buffer->heap->ops->free(buffer);
328 mutex_lock(&dev->lock);
329 rb_erase(&buffer->node, &dev->buffers);
330 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700331 if (buffer->flags & ION_FLAG_CACHED)
332 kfree(buffer->dirty);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700333 kfree(buffer);
334}
335
336static void ion_buffer_get(struct ion_buffer *buffer)
337{
338 kref_get(&buffer->ref);
339}
340
341static int ion_buffer_put(struct ion_buffer *buffer)
342{
343 return kref_put(&buffer->ref, ion_buffer_destroy);
344}
345
346static struct ion_handle *ion_handle_create(struct ion_client *client,
347 struct ion_buffer *buffer)
348{
349 struct ion_handle *handle;
350
351 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
352 if (!handle)
353 return ERR_PTR(-ENOMEM);
354 kref_init(&handle->ref);
355 rb_init_node(&handle->node);
356 handle->client = client;
357 ion_buffer_get(buffer);
358 handle->buffer = buffer;
359
360 return handle;
361}
362
Laura Abbottb14ed962012-01-30 14:18:08 -0800363static void ion_handle_kmap_put(struct ion_handle *);
364
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700365static void ion_handle_destroy(struct kref *kref)
366{
367 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800368 struct ion_client *client = handle->client;
369 struct ion_buffer *buffer = handle->buffer;
370
Laura Abbottb14ed962012-01-30 14:18:08 -0800371 mutex_lock(&buffer->lock);
372 while (handle->kmap_cnt)
373 ion_handle_kmap_put(handle);
374 mutex_unlock(&buffer->lock);
375
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700376 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800377 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800378
379 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700380 kfree(handle);
381}
382
383struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
384{
385 return handle->buffer;
386}
387
388static void ion_handle_get(struct ion_handle *handle)
389{
390 kref_get(&handle->ref);
391}
392
393static int ion_handle_put(struct ion_handle *handle)
394{
395 return kref_put(&handle->ref, ion_handle_destroy);
396}
397
398static struct ion_handle *ion_handle_lookup(struct ion_client *client,
399 struct ion_buffer *buffer)
400{
401 struct rb_node *n;
402
403 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
404 struct ion_handle *handle = rb_entry(n, struct ion_handle,
405 node);
406 if (handle->buffer == buffer)
407 return handle;
408 }
409 return NULL;
410}
411
412static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
413{
414 struct rb_node *n = client->handles.rb_node;
415
416 while (n) {
417 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
418 node);
419 if (handle < handle_node)
420 n = n->rb_left;
421 else if (handle > handle_node)
422 n = n->rb_right;
423 else
424 return true;
425 }
426 return false;
427}
428
429static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
430{
431 struct rb_node **p = &client->handles.rb_node;
432 struct rb_node *parent = NULL;
433 struct ion_handle *entry;
434
435 while (*p) {
436 parent = *p;
437 entry = rb_entry(parent, struct ion_handle, node);
438
439 if (handle < entry)
440 p = &(*p)->rb_left;
441 else if (handle > entry)
442 p = &(*p)->rb_right;
443 else
444 WARN(1, "%s: buffer already found.", __func__);
445 }
446
447 rb_link_node(&handle->node, parent, p);
448 rb_insert_color(&handle->node, &client->handles);
449}
450
451struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700452 size_t align, unsigned int heap_mask,
453 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700454{
455 struct rb_node *n;
456 struct ion_handle *handle;
457 struct ion_device *dev = client->dev;
458 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800459 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800460 const unsigned int MAX_DBG_STR_LEN = 64;
461 char dbg_str[MAX_DBG_STR_LEN];
462 unsigned int dbg_str_idx = 0;
463
464 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700465
Mitchel Humpherys485650f2013-03-15 16:06:07 -0700466 /*
467 * For now, we don't want to fault in pages individually since
468 * clients are already doing manual cache maintenance. In
469 * other words, the implicit caching infrastructure is in
470 * place (in code) but should not be used.
471 */
472 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
473
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700474 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
475 align, heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700476 /*
477 * traverse the list of heaps available in this system in priority
478 * order. If the heap type is supported by the client, and matches the
479 * request of the caller allocate from it. Repeat until allocate has
480 * succeeded or all heaps have been tried
481 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800482 if (WARN_ON(!len))
483 return ERR_PTR(-EINVAL);
484
485 len = PAGE_ALIGN(len);
486
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700487 mutex_lock(&dev->lock);
488 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
489 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
490 /* if the client doesn't support this heap type */
491 if (!((1 << heap->type) & client->heap_mask))
492 continue;
493 /* if the caller didn't specify this heap type */
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700494 if (!((1 << heap->id) & heap_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700495 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800496 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700497 if (secure_allocation &&
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800498 !ion_heap_allow_secure_allocation(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -0800499 continue;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800500 trace_ion_alloc_buffer_start(client->name, heap->name, len,
501 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700502 buffer = ion_buffer_create(heap, dev, len, align, flags);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800503 trace_ion_alloc_buffer_end(client->name, heap->name, len,
504 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700505 if (!IS_ERR_OR_NULL(buffer))
506 break;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800507
508 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
509 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800510 if (dbg_str_idx < MAX_DBG_STR_LEN) {
511 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
512 int ret_value = snprintf(&dbg_str[dbg_str_idx],
513 len_left, "%s ", heap->name);
514 if (ret_value >= len_left) {
515 /* overflow */
516 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
517 dbg_str_idx = MAX_DBG_STR_LEN;
518 } else if (ret_value >= 0) {
519 dbg_str_idx += ret_value;
520 } else {
521 /* error */
522 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
523 }
524 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700525 }
526 mutex_unlock(&dev->lock);
527
Liam Markcc2d4bd2013-01-16 10:14:40 -0800528 if (buffer == NULL) {
529 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
530 heap_mask, flags, -ENODEV);
Laura Abbottb14ed962012-01-30 14:18:08 -0800531 return ERR_PTR(-ENODEV);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800532 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800533
534 if (IS_ERR(buffer)) {
Liam Markcc2d4bd2013-01-16 10:14:40 -0800535 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
536 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800537 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
538 "0x%x) from heap(s) %sfor client %s with heap "
539 "mask 0x%x\n",
540 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700541 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800542 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700543
544 handle = ion_handle_create(client, buffer);
545
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700546 /*
547 * ion_buffer_create will create a buffer with a ref_cnt of 1,
548 * and ion_handle_create will take a second reference, drop one here
549 */
550 ion_buffer_put(buffer);
551
Laura Abbottb14ed962012-01-30 14:18:08 -0800552 if (!IS_ERR(handle)) {
553 mutex_lock(&client->lock);
554 ion_handle_add(client, handle);
555 mutex_unlock(&client->lock);
556 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700557
Laura Abbottb14ed962012-01-30 14:18:08 -0800558
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700559 return handle;
560}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800561EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700562
563void ion_free(struct ion_client *client, struct ion_handle *handle)
564{
565 bool valid_handle;
566
567 BUG_ON(client != handle->client);
568
569 mutex_lock(&client->lock);
570 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700571 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800572 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700573 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700574 return;
575 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800576 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700577 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700578}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800579EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700580
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700581int ion_phys(struct ion_client *client, struct ion_handle *handle,
582 ion_phys_addr_t *addr, size_t *len)
583{
584 struct ion_buffer *buffer;
585 int ret;
586
587 mutex_lock(&client->lock);
588 if (!ion_handle_validate(client, handle)) {
589 mutex_unlock(&client->lock);
590 return -EINVAL;
591 }
592
593 buffer = handle->buffer;
594
595 if (!buffer->heap->ops->phys) {
596 pr_err("%s: ion_phys is not implemented by this heap.\n",
597 __func__);
598 mutex_unlock(&client->lock);
599 return -ENODEV;
600 }
601 mutex_unlock(&client->lock);
602 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
603 return ret;
604}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800605EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700606
Laura Abbottb14ed962012-01-30 14:18:08 -0800607static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700608{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700609 void *vaddr;
610
Laura Abbottb14ed962012-01-30 14:18:08 -0800611 if (buffer->kmap_cnt) {
612 buffer->kmap_cnt++;
613 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700614 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800615 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
616 if (IS_ERR_OR_NULL(vaddr))
617 return vaddr;
618 buffer->vaddr = vaddr;
619 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700620 return vaddr;
621}
Laura Abbottb14ed962012-01-30 14:18:08 -0800622
623static void *ion_handle_kmap_get(struct ion_handle *handle)
624{
625 struct ion_buffer *buffer = handle->buffer;
626 void *vaddr;
627
628 if (handle->kmap_cnt) {
629 handle->kmap_cnt++;
630 return buffer->vaddr;
631 }
632 vaddr = ion_buffer_kmap_get(buffer);
633 if (IS_ERR_OR_NULL(vaddr))
634 return vaddr;
635 handle->kmap_cnt++;
636 return vaddr;
637}
638
639static void ion_buffer_kmap_put(struct ion_buffer *buffer)
640{
641 buffer->kmap_cnt--;
642 if (!buffer->kmap_cnt) {
643 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
644 buffer->vaddr = NULL;
645 }
646}
647
648static void ion_handle_kmap_put(struct ion_handle *handle)
649{
650 struct ion_buffer *buffer = handle->buffer;
651
652 handle->kmap_cnt--;
653 if (!handle->kmap_cnt)
654 ion_buffer_kmap_put(buffer);
655}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700656
Olav Hauganb3676592012-03-02 15:02:25 -0800657static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700658 int domain_num, int partition_num, unsigned long align,
659 unsigned long iova_length, unsigned long flags,
660 unsigned long *iova)
661{
662 struct ion_iommu_map *data;
663 int ret;
664
665 data = kmalloc(sizeof(*data), GFP_ATOMIC);
666
667 if (!data)
Olav Hauganb3676592012-03-02 15:02:25 -0800668 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700669
670 data->buffer = buffer;
671 iommu_map_domain(data) = domain_num;
672 iommu_map_partition(data) = partition_num;
673
674 ret = buffer->heap->ops->map_iommu(buffer, data,
675 domain_num,
676 partition_num,
677 align,
678 iova_length,
679 flags);
680
681 if (ret)
682 goto out;
683
684 kref_init(&data->ref);
685 *iova = data->iova_addr;
686
687 ion_iommu_add(buffer, data);
688
Olav Hauganb3676592012-03-02 15:02:25 -0800689 return data;
Laura Abbott8c017362011-09-22 20:59:12 -0700690
691out:
Laura Abbott8c017362011-09-22 20:59:12 -0700692 kfree(data);
Olav Hauganb3676592012-03-02 15:02:25 -0800693 return ERR_PTR(ret);
Laura Abbott8c017362011-09-22 20:59:12 -0700694}
695
696int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
697 int domain_num, int partition_num, unsigned long align,
698 unsigned long iova_length, unsigned long *iova,
699 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800700 unsigned long flags, unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700701{
702 struct ion_buffer *buffer;
703 struct ion_iommu_map *iommu_map;
704 int ret = 0;
705
Huaibin Yang89399702013-01-25 15:32:59 -0800706 if (IS_ERR_OR_NULL(client)) {
707 pr_err("%s: client pointer is invalid\n", __func__);
708 return -EINVAL;
709 }
710 if (IS_ERR_OR_NULL(handle)) {
711 pr_err("%s: handle pointer is invalid\n", __func__);
712 return -EINVAL;
713 }
714 if (IS_ERR_OR_NULL(handle->buffer)) {
715 pr_err("%s: buffer pointer is invalid\n", __func__);
716 return -EINVAL;
717 }
718
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800719 if (ION_IS_CACHED(flags)) {
720 pr_err("%s: Cannot map iommu as cached.\n", __func__);
721 return -EINVAL;
722 }
723
Laura Abbott8c017362011-09-22 20:59:12 -0700724 mutex_lock(&client->lock);
725 if (!ion_handle_validate(client, handle)) {
726 pr_err("%s: invalid handle passed to map_kernel.\n",
727 __func__);
728 mutex_unlock(&client->lock);
729 return -EINVAL;
730 }
731
732 buffer = handle->buffer;
733 mutex_lock(&buffer->lock);
734
735 if (!handle->buffer->heap->ops->map_iommu) {
736 pr_err("%s: map_iommu is not implemented by this heap.\n",
737 __func__);
738 ret = -ENODEV;
739 goto out;
740 }
741
Laura Abbott8c017362011-09-22 20:59:12 -0700742 /*
743 * If clients don't want a custom iova length, just use whatever
744 * the buffer size is
745 */
746 if (!iova_length)
747 iova_length = buffer->size;
748
749 if (buffer->size > iova_length) {
750 pr_debug("%s: iova length %lx is not at least buffer size"
751 " %x\n", __func__, iova_length, buffer->size);
752 ret = -EINVAL;
753 goto out;
754 }
755
756 if (buffer->size & ~PAGE_MASK) {
757 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
758 buffer->size, PAGE_SIZE);
759 ret = -EINVAL;
760 goto out;
761 }
762
763 if (iova_length & ~PAGE_MASK) {
764 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
765 iova_length, PAGE_SIZE);
766 ret = -EINVAL;
767 goto out;
768 }
769
770 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
Olav Hauganb3676592012-03-02 15:02:25 -0800771 if (!iommu_map) {
772 iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
773 align, iova_length, flags, iova);
Laura Abbottb14ed962012-01-30 14:18:08 -0800774 if (!IS_ERR_OR_NULL(iommu_map)) {
Olav Hauganb3676592012-03-02 15:02:25 -0800775 iommu_map->flags = iommu_flags;
776
777 if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
778 kref_get(&iommu_map->ref);
Laura Abbott11bca602012-09-14 12:48:18 -0700779 } else {
780 ret = PTR_ERR(iommu_map);
Olav Hauganb3676592012-03-02 15:02:25 -0800781 }
Laura Abbott8c017362011-09-22 20:59:12 -0700782 } else {
Olav Hauganb3676592012-03-02 15:02:25 -0800783 if (iommu_map->flags != iommu_flags) {
784 pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
785 __func__, handle,
786 iommu_map->flags, iommu_flags);
Olav Hauganb3676592012-03-02 15:02:25 -0800787 ret = -EINVAL;
788 } else if (iommu_map->mapped_size != iova_length) {
Laura Abbott8c017362011-09-22 20:59:12 -0700789 pr_err("%s: handle %p is already mapped with length"
Olav Hauganb3676592012-03-02 15:02:25 -0800790 " %x, trying to map with length %lx\n",
Laura Abbott8c017362011-09-22 20:59:12 -0700791 __func__, handle, iommu_map->mapped_size,
792 iova_length);
Laura Abbott8c017362011-09-22 20:59:12 -0700793 ret = -EINVAL;
794 } else {
795 kref_get(&iommu_map->ref);
796 *iova = iommu_map->iova_addr;
797 }
798 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800799 if (!ret)
800 buffer->iommu_map_cnt++;
Laura Abbott8c017362011-09-22 20:59:12 -0700801 *buffer_size = buffer->size;
802out:
803 mutex_unlock(&buffer->lock);
804 mutex_unlock(&client->lock);
805 return ret;
806}
807EXPORT_SYMBOL(ion_map_iommu);
808
809static void ion_iommu_release(struct kref *kref)
810{
811 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
812 ref);
813 struct ion_buffer *buffer = map->buffer;
814
815 rb_erase(&map->node, &buffer->iommu_maps);
816 buffer->heap->ops->unmap_iommu(map);
817 kfree(map);
818}
819
820void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
821 int domain_num, int partition_num)
822{
823 struct ion_iommu_map *iommu_map;
824 struct ion_buffer *buffer;
825
Huaibin Yang89399702013-01-25 15:32:59 -0800826 if (IS_ERR_OR_NULL(client)) {
827 pr_err("%s: client pointer is invalid\n", __func__);
828 return;
829 }
830 if (IS_ERR_OR_NULL(handle)) {
831 pr_err("%s: handle pointer is invalid\n", __func__);
832 return;
833 }
834 if (IS_ERR_OR_NULL(handle->buffer)) {
835 pr_err("%s: buffer pointer is invalid\n", __func__);
836 return;
837 }
838
Laura Abbott8c017362011-09-22 20:59:12 -0700839 mutex_lock(&client->lock);
840 buffer = handle->buffer;
841
842 mutex_lock(&buffer->lock);
843
844 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
845
846 if (!iommu_map) {
847 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
848 domain_num, partition_num, buffer);
849 goto out;
850 }
851
Laura Abbott8c017362011-09-22 20:59:12 -0700852 kref_put(&iommu_map->ref, ion_iommu_release);
853
Laura Abbottb14ed962012-01-30 14:18:08 -0800854 buffer->iommu_map_cnt--;
Laura Abbott8c017362011-09-22 20:59:12 -0700855out:
856 mutex_unlock(&buffer->lock);
857
858 mutex_unlock(&client->lock);
859
860}
861EXPORT_SYMBOL(ion_unmap_iommu);
862
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700863void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700864{
865 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800866 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700867
868 mutex_lock(&client->lock);
869 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800870 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700871 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700872 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700873 return ERR_PTR(-EINVAL);
874 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700875
Laura Abbottb14ed962012-01-30 14:18:08 -0800876 buffer = handle->buffer;
877
878 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700879 pr_err("%s: map_kernel is not implemented by this heap.\n",
880 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700881 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700882 return ERR_PTR(-ENODEV);
883 }
Laura Abbott894fd582011-08-19 13:33:56 -0700884
Laura Abbottb14ed962012-01-30 14:18:08 -0800885 mutex_lock(&buffer->lock);
886 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700887 mutex_unlock(&buffer->lock);
888 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800889 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700890}
Olav Hauganbd453a92012-07-05 14:21:34 -0700891EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700892
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700893void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
894{
895 struct ion_buffer *buffer;
896
897 mutex_lock(&client->lock);
898 buffer = handle->buffer;
899 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800900 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700901 mutex_unlock(&buffer->lock);
902 mutex_unlock(&client->lock);
903}
Olav Hauganbd453a92012-07-05 14:21:34 -0700904EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700905
Olav Haugan41f85792012-02-08 15:28:05 -0800906int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700907 void *uaddr, unsigned long offset, unsigned long len,
908 unsigned int cmd)
909{
910 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700911 int ret = -EINVAL;
912
913 mutex_lock(&client->lock);
914 if (!ion_handle_validate(client, handle)) {
915 pr_err("%s: invalid handle passed to do_cache_op.\n",
916 __func__);
917 mutex_unlock(&client->lock);
918 return -EINVAL;
919 }
920 buffer = handle->buffer;
921 mutex_lock(&buffer->lock);
922
Laura Abbottcbaa6682011-10-19 12:14:14 -0700923 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700924 ret = 0;
925 goto out;
926 }
927
928 if (!handle->buffer->heap->ops->cache_op) {
929 pr_err("%s: cache_op is not implemented by this heap.\n",
930 __func__);
931 ret = -ENODEV;
932 goto out;
933 }
934
Laura Abbottabcb6f72011-10-04 16:26:49 -0700935
936 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
937 offset, len, cmd);
938
939out:
940 mutex_unlock(&buffer->lock);
941 mutex_unlock(&client->lock);
942 return ret;
943
944}
Olav Hauganbd453a92012-07-05 14:21:34 -0700945EXPORT_SYMBOL(ion_do_cache_op);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700946
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700947static int ion_debug_client_show(struct seq_file *s, void *unused)
948{
949 struct ion_client *client = s->private;
950 struct rb_node *n;
Olav Haugan854c9e12012-05-16 16:34:28 -0700951 struct rb_node *n2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700952
Olav Haugan854c9e12012-05-16 16:34:28 -0700953 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
954 "heap_name", "size_in_bytes", "handle refcount",
955 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700956
957 mutex_lock(&client->lock);
958 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
959 struct ion_handle *handle = rb_entry(n, struct ion_handle,
960 node);
961 enum ion_heap_type type = handle->buffer->heap->type;
962
Olav Haugan854c9e12012-05-16 16:34:28 -0700963 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700964 handle->buffer->heap->name,
965 handle->buffer->size,
966 atomic_read(&handle->ref.refcount),
967 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700968
969 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
970 type == ION_HEAP_TYPE_CARVEOUT ||
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700971 type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
Laura Abbott1135c9e2013-03-13 15:33:40 -0700972 seq_printf(s, " : %12pa", &handle->buffer->priv_phys);
Olav Haugan854c9e12012-05-16 16:34:28 -0700973 else
974 seq_printf(s, " : %12s", "N/A");
975
976 for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
977 n2 = rb_next(n2)) {
978 struct ion_iommu_map *imap =
979 rb_entry(n2, struct ion_iommu_map, node);
980 seq_printf(s, " : [%d,%d] - %8lx",
981 imap->domain_info[DI_DOMAIN_NUM],
982 imap->domain_info[DI_PARTITION_NUM],
983 imap->iova_addr);
984 }
985 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700986 }
987 mutex_unlock(&client->lock);
988
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700989 return 0;
990}
991
992static int ion_debug_client_open(struct inode *inode, struct file *file)
993{
994 return single_open(file, ion_debug_client_show, inode->i_private);
995}
996
997static const struct file_operations debug_client_fops = {
998 .open = ion_debug_client_open,
999 .read = seq_read,
1000 .llseek = seq_lseek,
1001 .release = single_release,
1002};
1003
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001004struct ion_client *ion_client_create(struct ion_device *dev,
1005 unsigned int heap_mask,
1006 const char *name)
1007{
1008 struct ion_client *client;
1009 struct task_struct *task;
1010 struct rb_node **p;
1011 struct rb_node *parent = NULL;
1012 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001013 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -07001014 unsigned int name_len;
1015
1016 if (!name) {
1017 pr_err("%s: Name cannot be null\n", __func__);
1018 return ERR_PTR(-EINVAL);
1019 }
1020 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001021
1022 get_task_struct(current->group_leader);
1023 task_lock(current->group_leader);
1024 pid = task_pid_nr(current->group_leader);
1025 /* don't bother to store task struct for kernel threads,
1026 they can't be killed anyway */
1027 if (current->group_leader->flags & PF_KTHREAD) {
1028 put_task_struct(current->group_leader);
1029 task = NULL;
1030 } else {
1031 task = current->group_leader;
1032 }
1033 task_unlock(current->group_leader);
1034
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001035 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1036 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001037 if (task)
1038 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001039 return ERR_PTR(-ENOMEM);
1040 }
1041
1042 client->dev = dev;
1043 client->handles = RB_ROOT;
1044 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001045
Olav Haugan6625c7d12012-01-24 13:50:43 -08001046 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001047 if (!client->name) {
1048 put_task_struct(current->group_leader);
1049 kfree(client);
1050 return ERR_PTR(-ENOMEM);
1051 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -08001052 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001053 }
1054
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001055 client->heap_mask = heap_mask;
1056 client->task = task;
1057 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001058
1059 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001060 p = &dev->clients.rb_node;
1061 while (*p) {
1062 parent = *p;
1063 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001064
Laura Abbottb14ed962012-01-30 14:18:08 -08001065 if (client < entry)
1066 p = &(*p)->rb_left;
1067 else if (client > entry)
1068 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001069 }
Laura Abbottb14ed962012-01-30 14:18:08 -08001070 rb_link_node(&client->node, parent, p);
1071 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001072
Laura Abbotteed86032011-12-05 15:32:36 -08001073
1074 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001075 dev->debug_root, client,
1076 &debug_client_fops);
1077 mutex_unlock(&dev->lock);
1078
1079 return client;
1080}
1081
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001082/**
1083 * ion_mark_dangling_buffers_locked() - Mark dangling buffers
1084 * @dev: the ion device whose buffers will be searched
1085 *
1086 * Sets marked=1 for all known buffers associated with `dev' that no
1087 * longer have a handle pointing to them. dev->lock should be held
1088 * across a call to this function (and should only be unlocked after
1089 * checking for marked buffers).
1090 */
1091static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
1092{
1093 struct rb_node *n, *n2;
1094 /* mark all buffers as 1 */
1095 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1096 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1097 node);
1098
1099 buf->marked = 1;
1100 }
1101
1102 /* now see which buffers we can access */
1103 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1104 struct ion_client *client = rb_entry(n, struct ion_client,
1105 node);
1106
1107 mutex_lock(&client->lock);
1108 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1109 struct ion_handle *handle
1110 = rb_entry(n2, struct ion_handle, node);
1111
1112 handle->buffer->marked = 0;
1113
1114 }
1115 mutex_unlock(&client->lock);
1116
1117 }
1118}
1119
1120#ifdef CONFIG_ION_LEAK_CHECK
1121static u32 ion_debug_check_leaks_on_destroy;
1122
1123static int ion_check_for_and_print_leaks(struct ion_device *dev)
1124{
1125 struct rb_node *n;
1126 int num_leaks = 0;
1127
1128 if (!ion_debug_check_leaks_on_destroy)
1129 return 0;
1130
1131 /* check for leaked buffers (those that no longer have a
1132 * handle pointing to them) */
1133 ion_mark_dangling_buffers_locked(dev);
1134
1135 /* Anyone still marked as a 1 means a leaked handle somewhere */
1136 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1137 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1138 node);
1139
1140 if (buf->marked == 1) {
1141 pr_info("Leaked ion buffer at %p\n", buf);
1142 num_leaks++;
1143 }
1144 }
1145 return num_leaks;
1146}
1147static void setup_ion_leak_check(struct dentry *debug_root)
1148{
1149 debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
1150 &ion_debug_check_leaks_on_destroy);
1151}
1152#else
1153static int ion_check_for_and_print_leaks(struct ion_device *dev)
1154{
1155 return 0;
1156}
1157static void setup_ion_leak_check(struct dentry *debug_root)
1158{
1159}
1160#endif
1161
Laura Abbottb14ed962012-01-30 14:18:08 -08001162void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001163{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001164 struct ion_device *dev = client->dev;
1165 struct rb_node *n;
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001166 int num_leaks;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001167
1168 pr_debug("%s: %d\n", __func__, __LINE__);
1169 while ((n = rb_first(&client->handles))) {
1170 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1171 node);
1172 ion_handle_destroy(&handle->ref);
1173 }
1174 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001175 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001176 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -08001177 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001178 debugfs_remove_recursive(client->debug_root);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001179
1180 num_leaks = ion_check_for_and_print_leaks(dev);
1181
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001182 mutex_unlock(&dev->lock);
1183
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001184 if (num_leaks) {
1185 struct task_struct *current_task = current;
1186 char current_task_name[TASK_COMM_LEN];
1187 get_task_comm(current_task_name, current_task);
1188 WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
1189 __func__, num_leaks, num_leaks == 1 ? "" : "s");
1190 pr_info("task name at time of leak: %s, pid: %d\n",
1191 current_task_name, current_task->pid);
1192 }
1193
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001194 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001195 kfree(client);
1196}
Olav Hauganbd453a92012-07-05 14:21:34 -07001197EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001198
Laura Abbott273dd8e2011-10-12 14:26:33 -07001199int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1200 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001201{
1202 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001203
1204 mutex_lock(&client->lock);
1205 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -07001206 pr_err("%s: invalid handle passed to %s.\n",
1207 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001208 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001209 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001210 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001211 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001212 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001213 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001214 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001215 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001216
Laura Abbott273dd8e2011-10-12 14:26:33 -07001217 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001218}
Laura Abbott273dd8e2011-10-12 14:26:33 -07001219EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001220
Laura Abbott8c017362011-09-22 20:59:12 -07001221int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1222 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001223{
Laura Abbott8c017362011-09-22 20:59:12 -07001224 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001225
Laura Abbott8c017362011-09-22 20:59:12 -07001226 mutex_lock(&client->lock);
1227 if (!ion_handle_validate(client, handle)) {
1228 pr_err("%s: invalid handle passed to %s.\n",
1229 __func__, __func__);
1230 mutex_unlock(&client->lock);
1231 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001232 }
Laura Abbott8c017362011-09-22 20:59:12 -07001233 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001234 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001235 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001236 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001237 mutex_unlock(&client->lock);
1238
1239 return 0;
1240}
1241EXPORT_SYMBOL(ion_handle_get_size);
1242
Laura Abbottb14ed962012-01-30 14:18:08 -08001243struct sg_table *ion_sg_table(struct ion_client *client,
1244 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001245{
Laura Abbottb14ed962012-01-30 14:18:08 -08001246 struct ion_buffer *buffer;
1247 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001248
Laura Abbottb14ed962012-01-30 14:18:08 -08001249 mutex_lock(&client->lock);
1250 if (!ion_handle_validate(client, handle)) {
1251 pr_err("%s: invalid handle passed to map_dma.\n",
1252 __func__);
1253 mutex_unlock(&client->lock);
1254 return ERR_PTR(-EINVAL);
1255 }
1256 buffer = handle->buffer;
1257 table = buffer->sg_table;
1258 mutex_unlock(&client->lock);
1259 return table;
1260}
Olav Hauganbd453a92012-07-05 14:21:34 -07001261EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -08001262
Mitchel Humpherys0432d692013-01-08 17:03:10 -08001263struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
1264 size_t chunk_size, size_t total_size)
1265{
1266 struct sg_table *table;
1267 int i, n_chunks, ret;
1268 struct scatterlist *sg;
1269
1270 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1271 if (!table)
1272 return ERR_PTR(-ENOMEM);
1273
1274 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
1275 pr_debug("creating sg_table with %d chunks\n", n_chunks);
1276
1277 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
1278 if (ret)
1279 goto err0;
1280
1281 for_each_sg(table->sgl, sg, table->nents, i) {
1282 dma_addr_t addr = buffer_base + i * chunk_size;
1283 sg_dma_address(sg) = addr;
Olav Hauganbbdc30a2013-03-30 06:48:35 -07001284 sg_dma_len(sg) = chunk_size;
Mitchel Humpherys0432d692013-01-08 17:03:10 -08001285 }
1286
1287 return table;
1288err0:
1289 kfree(table);
1290 return ERR_PTR(ret);
1291}
1292
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001293static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1294 struct device *dev,
1295 enum dma_data_direction direction);
1296
Laura Abbottb14ed962012-01-30 14:18:08 -08001297static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1298 enum dma_data_direction direction)
1299{
1300 struct dma_buf *dmabuf = attachment->dmabuf;
1301 struct ion_buffer *buffer = dmabuf->priv;
1302
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001303 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbottb14ed962012-01-30 14:18:08 -08001304 return buffer->sg_table;
1305}
1306
1307static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1308 struct sg_table *table,
1309 enum dma_data_direction direction)
1310{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001311}
1312
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001313static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
1314{
1315 unsigned long pages = buffer->sg_table->nents;
1316 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
1317
1318 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
1319 if (!buffer->dirty)
1320 return -ENOMEM;
1321 return 0;
1322}
1323
1324struct ion_vma_list {
1325 struct list_head list;
1326 struct vm_area_struct *vma;
1327};
1328
1329static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1330 struct device *dev,
1331 enum dma_data_direction dir)
1332{
1333 struct scatterlist *sg;
1334 int i;
1335 struct ion_vma_list *vma_list;
1336
1337 pr_debug("%s: syncing for device %s\n", __func__,
1338 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001339
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001340 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001341 return;
1342
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001343 mutex_lock(&buffer->lock);
1344 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1345 if (!test_bit(i, buffer->dirty))
1346 continue;
1347 dma_sync_sg_for_device(dev, sg, 1, dir);
1348 clear_bit(i, buffer->dirty);
1349 }
1350 list_for_each_entry(vma_list, &buffer->vmas, list) {
1351 struct vm_area_struct *vma = vma_list->vma;
1352
1353 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1354 NULL);
1355 }
1356 mutex_unlock(&buffer->lock);
1357}
1358
1359int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001360{
Laura Abbottb14ed962012-01-30 14:18:08 -08001361 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001362 struct scatterlist *sg;
1363 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001364
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001365 mutex_lock(&buffer->lock);
1366 set_bit(vmf->pgoff, buffer->dirty);
1367
1368 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1369 if (i != vmf->pgoff)
1370 continue;
1371 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
1372 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
1373 sg_page(sg));
1374 break;
1375 }
1376 mutex_unlock(&buffer->lock);
1377 return VM_FAULT_NOPAGE;
1378}
1379
1380static void ion_vm_open(struct vm_area_struct *vma)
1381{
1382 struct ion_buffer *buffer = vma->vm_private_data;
1383 struct ion_vma_list *vma_list;
1384
1385 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1386 if (!vma_list)
1387 return;
1388 vma_list->vma = vma;
1389 mutex_lock(&buffer->lock);
1390 list_add(&vma_list->list, &buffer->vmas);
1391 mutex_unlock(&buffer->lock);
1392 pr_debug("%s: adding %p\n", __func__, vma);
1393}
1394
1395static void ion_vm_close(struct vm_area_struct *vma)
1396{
1397 struct ion_buffer *buffer = vma->vm_private_data;
1398 struct ion_vma_list *vma_list, *tmp;
1399
1400 pr_debug("%s\n", __func__);
1401 mutex_lock(&buffer->lock);
1402 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1403 if (vma_list->vma != vma)
1404 continue;
1405 list_del(&vma_list->list);
1406 kfree(vma_list);
1407 pr_debug("%s: deleting %p\n", __func__, vma);
1408 break;
1409 }
1410 mutex_unlock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001411
Laura Abbotta6835092011-11-14 15:27:02 -08001412 if (buffer->heap->ops->unmap_user)
1413 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001414}
1415
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001416struct vm_operations_struct ion_vma_ops = {
1417 .open = ion_vm_open,
1418 .close = ion_vm_close,
1419 .fault = ion_vm_fault,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001420};
1421
Laura Abbottb14ed962012-01-30 14:18:08 -08001422static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001423{
Laura Abbottb14ed962012-01-30 14:18:08 -08001424 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001425 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001426
Laura Abbottb14ed962012-01-30 14:18:08 -08001427 if (!buffer->heap->ops->map_user) {
1428 pr_err("%s: this heap does not define a method for mapping "
1429 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001430 return -EINVAL;
1431 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001432
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001433 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001434 vma->vm_private_data = buffer;
1435 vma->vm_ops = &ion_vma_ops;
Mitchel Humpherys7ce0fe42013-01-10 11:30:26 -08001436 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001437 ion_vm_open(vma);
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001438 return 0;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001439 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001440
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001441 if (!(buffer->flags & ION_FLAG_CACHED))
1442 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1443
1444 mutex_lock(&buffer->lock);
1445 /* now map it to userspace */
1446 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1447 mutex_unlock(&buffer->lock);
1448
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001449 if (ret)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001450 pr_err("%s: failure mapping buffer to userspace\n",
1451 __func__);
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001452
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001453 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001454}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001455
Laura Abbottb14ed962012-01-30 14:18:08 -08001456static void ion_dma_buf_release(struct dma_buf *dmabuf)
1457{
1458 struct ion_buffer *buffer = dmabuf->priv;
1459 ion_buffer_put(buffer);
1460}
1461
1462static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1463{
1464 struct ion_buffer *buffer = dmabuf->priv;
1465 return buffer->vaddr + offset;
1466}
1467
1468static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1469 void *ptr)
1470{
1471 return;
1472}
1473
1474static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1475 size_t len,
1476 enum dma_data_direction direction)
1477{
1478 struct ion_buffer *buffer = dmabuf->priv;
1479 void *vaddr;
1480
1481 if (!buffer->heap->ops->map_kernel) {
1482 pr_err("%s: map kernel is not implemented by this heap.\n",
1483 __func__);
1484 return -ENODEV;
1485 }
1486
1487 mutex_lock(&buffer->lock);
1488 vaddr = ion_buffer_kmap_get(buffer);
1489 mutex_unlock(&buffer->lock);
1490 if (IS_ERR(vaddr))
1491 return PTR_ERR(vaddr);
1492 if (!vaddr)
1493 return -ENOMEM;
1494 return 0;
1495}
1496
1497static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1498 size_t len,
1499 enum dma_data_direction direction)
1500{
1501 struct ion_buffer *buffer = dmabuf->priv;
1502
1503 mutex_lock(&buffer->lock);
1504 ion_buffer_kmap_put(buffer);
1505 mutex_unlock(&buffer->lock);
1506}
1507
1508struct dma_buf_ops dma_buf_ops = {
1509 .map_dma_buf = ion_map_dma_buf,
1510 .unmap_dma_buf = ion_unmap_dma_buf,
1511 .mmap = ion_mmap,
1512 .release = ion_dma_buf_release,
1513 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1514 .end_cpu_access = ion_dma_buf_end_cpu_access,
1515 .kmap_atomic = ion_dma_buf_kmap,
1516 .kunmap_atomic = ion_dma_buf_kunmap,
1517 .kmap = ion_dma_buf_kmap,
1518 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001519};
1520
Laura Abbottb14ed962012-01-30 14:18:08 -08001521int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1522{
1523 struct ion_buffer *buffer;
1524 struct dma_buf *dmabuf;
1525 bool valid_handle;
1526 int fd;
1527
1528 mutex_lock(&client->lock);
1529 valid_handle = ion_handle_validate(client, handle);
1530 mutex_unlock(&client->lock);
1531 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001532 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001533 return -EINVAL;
1534 }
1535
1536 buffer = handle->buffer;
1537 ion_buffer_get(buffer);
1538 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1539 if (IS_ERR(dmabuf)) {
1540 ion_buffer_put(buffer);
1541 return PTR_ERR(dmabuf);
1542 }
1543 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001544 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001545 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001546
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001547 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001548}
Olav Hauganbd453a92012-07-05 14:21:34 -07001549EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001550
Laura Abbottb14ed962012-01-30 14:18:08 -08001551struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1552{
1553 struct dma_buf *dmabuf;
1554 struct ion_buffer *buffer;
1555 struct ion_handle *handle;
1556
1557 dmabuf = dma_buf_get(fd);
1558 if (IS_ERR_OR_NULL(dmabuf))
1559 return ERR_PTR(PTR_ERR(dmabuf));
1560 /* if this memory came from ion */
1561
1562 if (dmabuf->ops != &dma_buf_ops) {
1563 pr_err("%s: can not import dmabuf from another exporter\n",
1564 __func__);
1565 dma_buf_put(dmabuf);
1566 return ERR_PTR(-EINVAL);
1567 }
1568 buffer = dmabuf->priv;
1569
1570 mutex_lock(&client->lock);
1571 /* if a handle exists for this buffer just take a reference to it */
1572 handle = ion_handle_lookup(client, buffer);
1573 if (!IS_ERR_OR_NULL(handle)) {
1574 ion_handle_get(handle);
1575 goto end;
1576 }
1577 handle = ion_handle_create(client, buffer);
1578 if (IS_ERR_OR_NULL(handle))
1579 goto end;
1580 ion_handle_add(client, handle);
1581end:
1582 mutex_unlock(&client->lock);
1583 dma_buf_put(dmabuf);
1584 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001585}
Olav Hauganbd453a92012-07-05 14:21:34 -07001586EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001587
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001588static int ion_sync_for_device(struct ion_client *client, int fd)
1589{
1590 struct dma_buf *dmabuf;
1591 struct ion_buffer *buffer;
1592
1593 dmabuf = dma_buf_get(fd);
1594 if (IS_ERR_OR_NULL(dmabuf))
1595 return PTR_ERR(dmabuf);
1596
1597 /* if this memory came from ion */
1598 if (dmabuf->ops != &dma_buf_ops) {
1599 pr_err("%s: can not sync dmabuf from another exporter\n",
1600 __func__);
1601 dma_buf_put(dmabuf);
1602 return -EINVAL;
1603 }
1604 buffer = dmabuf->priv;
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001605
1606 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1607 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001608 dma_buf_put(dmabuf);
1609 return 0;
1610}
1611
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001612static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1613{
1614 struct ion_client *client = filp->private_data;
1615
1616 switch (cmd) {
1617 case ION_IOC_ALLOC:
1618 {
1619 struct ion_allocation_data data;
1620
1621 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1622 return -EFAULT;
1623 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh7d72bad2012-08-29 18:39:44 -07001624 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001625
Laura Abbottb14ed962012-01-30 14:18:08 -08001626 if (IS_ERR(data.handle))
1627 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001628
Laura Abbottb14ed962012-01-30 14:18:08 -08001629 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1630 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001631 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001632 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001633 break;
1634 }
1635 case ION_IOC_FREE:
1636 {
1637 struct ion_handle_data data;
1638 bool valid;
1639
1640 if (copy_from_user(&data, (void __user *)arg,
1641 sizeof(struct ion_handle_data)))
1642 return -EFAULT;
1643 mutex_lock(&client->lock);
1644 valid = ion_handle_validate(client, data.handle);
1645 mutex_unlock(&client->lock);
1646 if (!valid)
1647 return -EINVAL;
1648 ion_free(client, data.handle);
1649 break;
1650 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001651 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001652 case ION_IOC_SHARE:
1653 {
1654 struct ion_fd_data data;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001655 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1656 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001657
Laura Abbottb14ed962012-01-30 14:18:08 -08001658 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001659 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1660 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001661 if (data.fd < 0)
1662 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001663 break;
1664 }
1665 case ION_IOC_IMPORT:
1666 {
1667 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001668 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001669 if (copy_from_user(&data, (void __user *)arg,
1670 sizeof(struct ion_fd_data)))
1671 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001672 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan865e97f2012-05-15 14:40:11 -07001673 if (IS_ERR(data.handle)) {
1674 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001675 data.handle = NULL;
Olav Haugan865e97f2012-05-15 14:40:11 -07001676 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001677 if (copy_to_user((void __user *)arg, &data,
1678 sizeof(struct ion_fd_data)))
1679 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001680 if (ret < 0)
1681 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001682 break;
1683 }
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001684 case ION_IOC_SYNC:
1685 {
1686 struct ion_fd_data data;
1687 if (copy_from_user(&data, (void __user *)arg,
1688 sizeof(struct ion_fd_data)))
1689 return -EFAULT;
1690 ion_sync_for_device(client, data.fd);
1691 break;
1692 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001693 case ION_IOC_CUSTOM:
1694 {
1695 struct ion_device *dev = client->dev;
1696 struct ion_custom_data data;
1697
1698 if (!dev->custom_ioctl)
1699 return -ENOTTY;
1700 if (copy_from_user(&data, (void __user *)arg,
1701 sizeof(struct ion_custom_data)))
1702 return -EFAULT;
1703 return dev->custom_ioctl(client, data.cmd, data.arg);
1704 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001705 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001706 return client->dev->custom_ioctl(client,
1707 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001708 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001709 return client->dev->custom_ioctl(client,
1710 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001711 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001712 return client->dev->custom_ioctl(client,
1713 ION_IOC_CLEAN_INV_CACHES, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001714 default:
1715 return -ENOTTY;
1716 }
1717 return 0;
1718}
1719
1720static int ion_release(struct inode *inode, struct file *file)
1721{
1722 struct ion_client *client = file->private_data;
1723
1724 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001725 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001726 return 0;
1727}
1728
1729static int ion_open(struct inode *inode, struct file *file)
1730{
1731 struct miscdevice *miscdev = file->private_data;
1732 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1733 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001734 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001735
1736 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001737 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1738 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001739 if (IS_ERR_OR_NULL(client))
1740 return PTR_ERR(client);
1741 file->private_data = client;
1742
1743 return 0;
1744}
1745
1746static const struct file_operations ion_fops = {
1747 .owner = THIS_MODULE,
1748 .open = ion_open,
1749 .release = ion_release,
1750 .unlocked_ioctl = ion_ioctl,
1751};
1752
1753static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001754 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001755{
1756 size_t size = 0;
1757 struct rb_node *n;
1758
1759 mutex_lock(&client->lock);
1760 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1761 struct ion_handle *handle = rb_entry(n,
1762 struct ion_handle,
1763 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001764 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001765 size += handle->buffer->size;
1766 }
1767 mutex_unlock(&client->lock);
1768 return size;
1769}
1770
Olav Haugan0671b9a2012-05-25 11:58:56 -07001771/**
1772 * Searches through a clients handles to find if the buffer is owned
1773 * by this client. Used for debug output.
1774 * @param client pointer to candidate owner of buffer
1775 * @param buf pointer to buffer that we are trying to find the owner of
1776 * @return 1 if found, 0 otherwise
1777 */
1778static int ion_debug_find_buffer_owner(const struct ion_client *client,
1779 const struct ion_buffer *buf)
1780{
1781 struct rb_node *n;
1782
1783 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1784 const struct ion_handle *handle = rb_entry(n,
1785 const struct ion_handle,
1786 node);
1787 if (handle->buffer == buf)
1788 return 1;
1789 }
1790 return 0;
1791}
1792
1793/**
1794 * Adds mem_map_data pointer to the tree of mem_map
1795 * Used for debug output.
1796 * @param mem_map The mem_map tree
1797 * @param data The new data to add to the tree
1798 */
1799static void ion_debug_mem_map_add(struct rb_root *mem_map,
1800 struct mem_map_data *data)
1801{
1802 struct rb_node **p = &mem_map->rb_node;
1803 struct rb_node *parent = NULL;
1804 struct mem_map_data *entry;
1805
1806 while (*p) {
1807 parent = *p;
1808 entry = rb_entry(parent, struct mem_map_data, node);
1809
1810 if (data->addr < entry->addr) {
1811 p = &(*p)->rb_left;
1812 } else if (data->addr > entry->addr) {
1813 p = &(*p)->rb_right;
1814 } else {
1815 pr_err("%s: mem_map_data already found.", __func__);
1816 BUG();
1817 }
1818 }
1819 rb_link_node(&data->node, parent, p);
1820 rb_insert_color(&data->node, mem_map);
1821}
1822
1823/**
1824 * Search for an owner of a buffer by iterating over all ION clients.
1825 * @param dev ion device containing pointers to all the clients.
1826 * @param buffer pointer to buffer we are trying to find the owner of.
1827 * @return name of owner.
1828 */
1829const char *ion_debug_locate_owner(const struct ion_device *dev,
1830 const struct ion_buffer *buffer)
1831{
1832 struct rb_node *j;
1833 const char *client_name = NULL;
1834
Laura Abbottb14ed962012-01-30 14:18:08 -08001835 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001836 j = rb_next(j)) {
1837 struct ion_client *client = rb_entry(j, struct ion_client,
1838 node);
1839 if (ion_debug_find_buffer_owner(client, buffer))
1840 client_name = client->name;
1841 }
1842 return client_name;
1843}
1844
1845/**
1846 * Create a mem_map of the heap.
1847 * @param s seq_file to log error message to.
1848 * @param heap The heap to create mem_map for.
1849 * @param mem_map The mem map to be created.
1850 */
1851void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1852 struct rb_root *mem_map)
1853{
1854 struct ion_device *dev = heap->dev;
1855 struct rb_node *n;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301856 size_t size;
1857
1858 if (!heap->ops->phys)
1859 return;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001860
1861 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1862 struct ion_buffer *buffer =
1863 rb_entry(n, struct ion_buffer, node);
1864 if (buffer->heap->id == heap->id) {
1865 struct mem_map_data *data =
1866 kzalloc(sizeof(*data), GFP_KERNEL);
1867 if (!data) {
1868 seq_printf(s, "ERROR: out of memory. "
1869 "Part of memory map will not be logged\n");
1870 break;
1871 }
Chintan Pandyadaf75622013-01-29 19:40:01 +05301872
1873 buffer->heap->ops->phys(buffer->heap, buffer,
1874 &(data->addr), &size);
1875 data->size = (unsigned long) size;
1876 data->addr_end = data->addr + data->size - 1;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001877 data->client_name = ion_debug_locate_owner(dev, buffer);
1878 ion_debug_mem_map_add(mem_map, data);
1879 }
1880 }
1881}
1882
1883/**
1884 * Free the memory allocated by ion_debug_mem_map_create
1885 * @param mem_map The mem map to free.
1886 */
1887static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1888{
1889 if (mem_map) {
1890 struct rb_node *n;
1891 while ((n = rb_first(mem_map)) != 0) {
1892 struct mem_map_data *data =
1893 rb_entry(n, struct mem_map_data, node);
1894 rb_erase(&data->node, mem_map);
1895 kfree(data);
1896 }
1897 }
1898}
1899
1900/**
1901 * Print heap debug information.
1902 * @param s seq_file to log message to.
1903 * @param heap pointer to heap that we will print debug information for.
1904 */
1905static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1906{
1907 if (heap->ops->print_debug) {
1908 struct rb_root mem_map = RB_ROOT;
1909 ion_debug_mem_map_create(s, heap, &mem_map);
1910 heap->ops->print_debug(heap, s, &mem_map);
1911 ion_debug_mem_map_destroy(&mem_map);
1912 }
1913}
1914
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001915static int ion_debug_heap_show(struct seq_file *s, void *unused)
1916{
1917 struct ion_heap *heap = s->private;
1918 struct ion_device *dev = heap->dev;
1919 struct rb_node *n;
1920
Olav Haugane4900b52012-05-25 11:58:03 -07001921 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001922 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001923
Laura Abbottb14ed962012-01-30 14:18:08 -08001924 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001925 struct ion_client *client = rb_entry(n, struct ion_client,
1926 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001927 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001928 if (!size)
1929 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001930 if (client->task) {
1931 char task_comm[TASK_COMM_LEN];
1932
1933 get_task_comm(task_comm, client->task);
1934 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1935 client->pid, size);
1936 } else {
1937 seq_printf(s, "%16.s %16u %16u\n", client->name,
1938 client->pid, size);
1939 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001940 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001941 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001942 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001943 return 0;
1944}
1945
1946static int ion_debug_heap_open(struct inode *inode, struct file *file)
1947{
1948 return single_open(file, ion_debug_heap_show, inode->i_private);
1949}
1950
1951static const struct file_operations debug_heap_fops = {
1952 .open = ion_debug_heap_open,
1953 .read = seq_read,
1954 .llseek = seq_lseek,
1955 .release = single_release,
1956};
1957
1958void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1959{
1960 struct rb_node **p = &dev->heaps.rb_node;
1961 struct rb_node *parent = NULL;
1962 struct ion_heap *entry;
1963
Laura Abbottb14ed962012-01-30 14:18:08 -08001964 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1965 !heap->ops->unmap_dma)
1966 pr_err("%s: can not add heap with invalid ops struct.\n",
1967 __func__);
1968
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001969 heap->dev = dev;
1970 mutex_lock(&dev->lock);
1971 while (*p) {
1972 parent = *p;
1973 entry = rb_entry(parent, struct ion_heap, node);
1974
1975 if (heap->id < entry->id) {
1976 p = &(*p)->rb_left;
1977 } else if (heap->id > entry->id ) {
1978 p = &(*p)->rb_right;
1979 } else {
1980 pr_err("%s: can not insert multiple heaps with "
1981 "id %d\n", __func__, heap->id);
1982 goto end;
1983 }
1984 }
1985
1986 rb_link_node(&heap->node, parent, p);
1987 rb_insert_color(&heap->node, &dev->heaps);
1988 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1989 &debug_heap_fops);
1990end:
1991 mutex_unlock(&dev->lock);
1992}
1993
Laura Abbott93619302012-10-11 11:51:40 -07001994int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
1995 int version, void *data, int flags)
1996{
1997 int ret = -EINVAL;
1998 struct ion_heap *heap;
1999 struct ion_buffer *buffer;
2000
2001 mutex_lock(&client->lock);
2002 if (!ion_handle_validate(client, handle)) {
2003 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
2004 goto out_unlock;
2005 }
2006
2007 buffer = handle->buffer;
2008 heap = buffer->heap;
2009
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002010 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07002011 pr_err("%s: cannot secure buffer from non secure heap\n",
2012 __func__);
2013 goto out_unlock;
2014 }
2015
2016 BUG_ON(!buffer->heap->ops->secure_buffer);
2017 /*
2018 * Protect the handle via the client lock to ensure we aren't
2019 * racing with free
2020 */
2021 ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
2022
2023out_unlock:
2024 mutex_unlock(&client->lock);
2025 return ret;
2026}
2027
2028int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
2029{
2030 int ret = -EINVAL;
2031 struct ion_heap *heap;
2032 struct ion_buffer *buffer;
2033
2034 mutex_lock(&client->lock);
2035 if (!ion_handle_validate(client, handle)) {
2036 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
2037 goto out_unlock;
2038 }
2039
2040 buffer = handle->buffer;
2041 heap = buffer->heap;
2042
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002043 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07002044 pr_err("%s: cannot secure buffer from non secure heap\n",
2045 __func__);
2046 goto out_unlock;
2047 }
2048
2049 BUG_ON(!buffer->heap->ops->unsecure_buffer);
2050 /*
2051 * Protect the handle via the client lock to ensure we aren't
2052 * racing with free
2053 */
2054 ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
2055
2056out_unlock:
2057 mutex_unlock(&client->lock);
2058 return ret;
2059}
2060
Laura Abbott7e446482012-06-13 15:59:39 -07002061int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
2062 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08002063{
2064 struct rb_node *n;
2065 int ret_val = 0;
2066
2067 /*
2068 * traverse the list of heaps available in this system
2069 * and find the heap that is specified.
2070 */
2071 mutex_lock(&dev->lock);
2072 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
2073 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002074 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08002075 continue;
2076 if (ION_HEAP(heap->id) != heap_id)
2077 continue;
2078 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07002079 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08002080 else
2081 ret_val = -EINVAL;
2082 break;
2083 }
2084 mutex_unlock(&dev->lock);
2085 return ret_val;
2086}
Olav Hauganbd453a92012-07-05 14:21:34 -07002087EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08002088
Laura Abbott7e446482012-06-13 15:59:39 -07002089int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
2090 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08002091{
2092 struct rb_node *n;
2093 int ret_val = 0;
2094
2095 /*
2096 * traverse the list of heaps available in this system
2097 * and find the heap that is specified.
2098 */
2099 mutex_lock(&dev->lock);
2100 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
2101 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002102 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08002103 continue;
2104 if (ION_HEAP(heap->id) != heap_id)
2105 continue;
2106 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07002107 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08002108 else
2109 ret_val = -EINVAL;
2110 break;
2111 }
2112 mutex_unlock(&dev->lock);
2113 return ret_val;
2114}
Olav Hauganbd453a92012-07-05 14:21:34 -07002115EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08002116
Laura Abbott404f8242011-10-31 14:22:53 -07002117static int ion_debug_leak_show(struct seq_file *s, void *unused)
2118{
2119 struct ion_device *dev = s->private;
2120 struct rb_node *n;
Laura Abbott404f8242011-10-31 14:22:53 -07002121
Laura Abbott404f8242011-10-31 14:22:53 -07002122 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
2123 "ref cnt");
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002124
Laura Abbott404f8242011-10-31 14:22:53 -07002125 mutex_lock(&dev->lock);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002126 ion_mark_dangling_buffers_locked(dev);
Laura Abbott404f8242011-10-31 14:22:53 -07002127
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002128 /* Anyone still marked as a 1 means a leaked handle somewhere */
Laura Abbott404f8242011-10-31 14:22:53 -07002129 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
2130 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
2131 node);
2132
2133 if (buf->marked == 1)
2134 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
2135 (int)buf, buf->heap->name, buf->size,
2136 atomic_read(&buf->ref.refcount));
2137 }
2138 mutex_unlock(&dev->lock);
2139 return 0;
2140}
2141
2142static int ion_debug_leak_open(struct inode *inode, struct file *file)
2143{
2144 return single_open(file, ion_debug_leak_show, inode->i_private);
2145}
2146
2147static const struct file_operations debug_leak_fops = {
2148 .open = ion_debug_leak_open,
2149 .read = seq_read,
2150 .llseek = seq_lseek,
2151 .release = single_release,
2152};
2153
2154
2155
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07002156struct ion_device *ion_device_create(long (*custom_ioctl)
2157 (struct ion_client *client,
2158 unsigned int cmd,
2159 unsigned long arg))
2160{
2161 struct ion_device *idev;
2162 int ret;
2163
2164 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
2165 if (!idev)
2166 return ERR_PTR(-ENOMEM);
2167
2168 idev->dev.minor = MISC_DYNAMIC_MINOR;
2169 idev->dev.name = "ion";
2170 idev->dev.fops = &ion_fops;
2171 idev->dev.parent = NULL;
2172 ret = misc_register(&idev->dev);
2173 if (ret) {
2174 pr_err("ion: failed to register misc device.\n");
2175 return ERR_PTR(ret);
2176 }
2177
2178 idev->debug_root = debugfs_create_dir("ion", NULL);
2179 if (IS_ERR_OR_NULL(idev->debug_root))
2180 pr_err("ion: failed to create debug files.\n");
2181
2182 idev->custom_ioctl = custom_ioctl;
2183 idev->buffers = RB_ROOT;
2184 mutex_init(&idev->lock);
2185 idev->heaps = RB_ROOT;
Laura Abbottb14ed962012-01-30 14:18:08 -08002186 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07002187 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
2188 &debug_leak_fops);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002189
2190 setup_ion_leak_check(idev->debug_root);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07002191 return idev;
2192}
2193
2194void ion_device_destroy(struct ion_device *dev)
2195{
2196 misc_deregister(&dev->dev);
2197 /* XXX need to free the heaps and clients ? */
2198 kfree(dev);
2199}
Laura Abbottb14ed962012-01-30 14:18:08 -08002200
2201void __init ion_reserve(struct ion_platform_data *data)
2202{
2203 int i, ret;
2204
2205 for (i = 0; i < data->nr; i++) {
2206 if (data->heaps[i].size == 0)
2207 continue;
2208 ret = memblock_reserve(data->heaps[i].base,
2209 data->heaps[i].size);
2210 if (ret)
Laura Abbott1135c9e2013-03-13 15:33:40 -07002211 pr_err("memblock reserve of %x@%pa failed\n",
Laura Abbottb14ed962012-01-30 14:18:08 -08002212 data->heaps[i].size,
Laura Abbott1135c9e2013-03-13 15:33:40 -07002213 &data->heaps[i].base);
Laura Abbottb14ed962012-01-30 14:18:08 -08002214 }
2215}