blob: 16c290039aabf48a33b49716c3993ee4e6eb0def [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Liam Markcc2d4bd2013-01-16 10:14:40 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070036#include <linux/msm_ion.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080037#include <trace/events/kmem.h>
38
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070039
Laura Abbott8c017362011-09-22 20:59:12 -070040#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070041#include "ion_priv.h"
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070042
43/**
44 * struct ion_device - the metadata of the ion device node
45 * @dev: the actual misc device
46 * @buffers: an rb tree of all the existing buffers
47 * @lock: lock protecting the buffers & heaps trees
48 * @heaps: list of all the heaps in the system
49 * @user_clients: list of all the clients created from userspace
50 */
51struct ion_device {
52 struct miscdevice dev;
53 struct rb_root buffers;
54 struct mutex lock;
55 struct rb_root heaps;
56 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
57 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080058 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070059 struct dentry *debug_root;
60};
61
62/**
63 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070064 * @node: node in the tree of all clients
65 * @dev: backpointer to ion device
66 * @handles: an rb tree of all the handles in this client
67 * @lock: lock protecting the tree of handles
68 * @heap_mask: mask of all supported heaps
69 * @name: used for debugging
70 * @task: used for debugging
71 *
72 * A client represents a list of buffers this client may access.
73 * The mutex stored here is used to protect both handles tree
74 * as well as the handles themselves, and should be held while modifying either.
75 */
76struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070077 struct rb_node node;
78 struct ion_device *dev;
79 struct rb_root handles;
80 struct mutex lock;
81 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080082 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070083 struct task_struct *task;
84 pid_t pid;
85 struct dentry *debug_root;
86};
87
88/**
89 * ion_handle - a client local reference to a buffer
90 * @ref: reference count
91 * @client: back pointer to the client the buffer resides in
92 * @buffer: pointer to the buffer
93 * @node: node in the client's handle rbtree
94 * @kmap_cnt: count of times this client has mapped to kernel
95 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070096 *
97 * Modifications to node, map_cnt or mapping should be protected by the
98 * lock in the client. Other fields are never changed after initialization.
99 */
100struct ion_handle {
101 struct kref ref;
102 struct ion_client *client;
103 struct ion_buffer *buffer;
104 struct rb_node node;
105 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700106 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700107};
108
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700109bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
110{
111 return ((buffer->flags & ION_FLAG_CACHED) &&
112 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
113}
114
Olav Hauganb3676592012-03-02 15:02:25 -0800115static void ion_iommu_release(struct kref *kref);
116
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700117/* this function should only be called while dev->lock is held */
118static void ion_buffer_add(struct ion_device *dev,
119 struct ion_buffer *buffer)
120{
121 struct rb_node **p = &dev->buffers.rb_node;
122 struct rb_node *parent = NULL;
123 struct ion_buffer *entry;
124
125 while (*p) {
126 parent = *p;
127 entry = rb_entry(parent, struct ion_buffer, node);
128
129 if (buffer < entry) {
130 p = &(*p)->rb_left;
131 } else if (buffer > entry) {
132 p = &(*p)->rb_right;
133 } else {
134 pr_err("%s: buffer already found.", __func__);
135 BUG();
136 }
137 }
138
139 rb_link_node(&buffer->node, parent, p);
140 rb_insert_color(&buffer->node, &dev->buffers);
141}
142
Olav Haugan0fa9b602012-01-25 11:50:38 -0800143static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700144 struct ion_iommu_map *iommu)
145{
146 struct rb_node **p = &buffer->iommu_maps.rb_node;
147 struct rb_node *parent = NULL;
148 struct ion_iommu_map *entry;
149
150 while (*p) {
151 parent = *p;
152 entry = rb_entry(parent, struct ion_iommu_map, node);
153
154 if (iommu->key < entry->key) {
155 p = &(*p)->rb_left;
156 } else if (iommu->key > entry->key) {
157 p = &(*p)->rb_right;
158 } else {
159 pr_err("%s: buffer %p already has mapping for domain %d"
160 " and partition %d\n", __func__,
161 buffer,
162 iommu_map_domain(iommu),
163 iommu_map_partition(iommu));
164 BUG();
165 }
166 }
167
168 rb_link_node(&iommu->node, parent, p);
169 rb_insert_color(&iommu->node, &buffer->iommu_maps);
170
171}
172
173static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
174 unsigned int domain_no,
175 unsigned int partition_no)
176{
177 struct rb_node **p = &buffer->iommu_maps.rb_node;
178 struct rb_node *parent = NULL;
179 struct ion_iommu_map *entry;
180 uint64_t key = domain_no;
181 key = key << 32 | partition_no;
182
183 while (*p) {
184 parent = *p;
185 entry = rb_entry(parent, struct ion_iommu_map, node);
186
187 if (key < entry->key)
188 p = &(*p)->rb_left;
189 else if (key > entry->key)
190 p = &(*p)->rb_right;
191 else
192 return entry;
193 }
194
195 return NULL;
196}
197
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700198static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
199
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700200/* this function should only be called while dev->lock is held */
201static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
202 struct ion_device *dev,
203 unsigned long len,
204 unsigned long align,
205 unsigned long flags)
206{
207 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800208 struct sg_table *table;
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700209 struct scatterlist *sg;
210 int i, ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700211
212 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
213 if (!buffer)
214 return ERR_PTR(-ENOMEM);
215
216 buffer->heap = heap;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700217 buffer->flags = flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700218 kref_init(&buffer->ref);
219
220 ret = heap->ops->allocate(heap, buffer, len, align, flags);
221 if (ret) {
222 kfree(buffer);
223 return ERR_PTR(ret);
224 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800225
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700226 buffer->dev = dev;
227 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800228
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700229 table = heap->ops->map_dma(heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800230 if (IS_ERR_OR_NULL(table)) {
231 heap->ops->free(buffer);
232 kfree(buffer);
233 return ERR_PTR(PTR_ERR(table));
234 }
235 buffer->sg_table = table;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700236 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700237 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
238 i) {
239 if (sg_dma_len(sg) == PAGE_SIZE)
240 continue;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700241 pr_err("%s: cached mappings that will be faulted in "
242 "must have pagewise sg_lists\n", __func__);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700243 ret = -EINVAL;
244 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700245 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800246
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700247 ret = ion_buffer_alloc_dirty(buffer);
248 if (ret)
249 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700250 }
251
252 buffer->dev = dev;
253 buffer->size = len;
254 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700255 mutex_init(&buffer->lock);
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700256 /* this will set up dma addresses for the sglist -- it is not
257 technically correct as per the dma api -- a specific
258 device isn't really taking ownership here. However, in practice on
259 our systems the only dma_address space is physical addresses.
260 Additionally, we can't afford the overhead of invalidating every
261 allocation via dma_map_sg. The implicit contract here is that
262 memory comming from the heaps is ready for dma, ie if it has a
263 cached mapping that mapping has been invalidated */
Laura Abbott6c13b9822013-03-29 18:29:03 -0700264 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
265 if (sg_dma_address(sg) == 0)
266 sg_dma_address(sg) = sg_phys(sg);
267 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700268 ion_buffer_add(dev, buffer);
269 return buffer;
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700270
271err:
272 heap->ops->unmap_dma(heap, buffer);
273 heap->ops->free(buffer);
274 kfree(buffer);
275 return ERR_PTR(ret);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700276}
277
Olav Hauganb3676592012-03-02 15:02:25 -0800278/**
279 * Check for delayed IOMMU unmapping. Also unmap any outstanding
280 * mappings which would otherwise have been leaked.
281 */
282static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
283{
284 struct ion_iommu_map *iommu_map;
285 struct rb_node *node;
286 const struct rb_root *rb = &(buffer->iommu_maps);
287 unsigned long ref_count;
288 unsigned int delayed_unmap;
289
290 mutex_lock(&buffer->lock);
291
292 while ((node = rb_first(rb)) != 0) {
293 iommu_map = rb_entry(node, struct ion_iommu_map, node);
294 ref_count = atomic_read(&iommu_map->ref.refcount);
295 delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
296
297 if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
298 pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
299 __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
300 iommu_map->domain_info[DI_PARTITION_NUM]);
301 }
302 /* set ref count to 1 to force release */
303 kref_init(&iommu_map->ref);
304 kref_put(&iommu_map->ref, ion_iommu_release);
305 }
306
307 mutex_unlock(&buffer->lock);
308}
309
Laura Abbott93619302012-10-11 11:51:40 -0700310static void ion_delayed_unsecure(struct ion_buffer *buffer)
311{
312 if (buffer->heap->ops->unsecure_buffer)
313 buffer->heap->ops->unsecure_buffer(buffer, 1);
314}
315
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700316static void ion_buffer_destroy(struct kref *kref)
317{
318 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
319 struct ion_device *dev = buffer->dev;
320
Laura Abbottb14ed962012-01-30 14:18:08 -0800321 if (WARN_ON(buffer->kmap_cnt > 0))
322 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800323 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
324
Laura Abbott93619302012-10-11 11:51:40 -0700325 ion_delayed_unsecure(buffer);
Olav Hauganb3676592012-03-02 15:02:25 -0800326 ion_iommu_delayed_unmap(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700327 buffer->heap->ops->free(buffer);
328 mutex_lock(&dev->lock);
329 rb_erase(&buffer->node, &dev->buffers);
330 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700331 if (buffer->flags & ION_FLAG_CACHED)
332 kfree(buffer->dirty);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700333 kfree(buffer);
334}
335
336static void ion_buffer_get(struct ion_buffer *buffer)
337{
338 kref_get(&buffer->ref);
339}
340
341static int ion_buffer_put(struct ion_buffer *buffer)
342{
343 return kref_put(&buffer->ref, ion_buffer_destroy);
344}
345
346static struct ion_handle *ion_handle_create(struct ion_client *client,
347 struct ion_buffer *buffer)
348{
349 struct ion_handle *handle;
350
351 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
352 if (!handle)
353 return ERR_PTR(-ENOMEM);
354 kref_init(&handle->ref);
355 rb_init_node(&handle->node);
356 handle->client = client;
357 ion_buffer_get(buffer);
358 handle->buffer = buffer;
359
360 return handle;
361}
362
Laura Abbottb14ed962012-01-30 14:18:08 -0800363static void ion_handle_kmap_put(struct ion_handle *);
364
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700365static void ion_handle_destroy(struct kref *kref)
366{
367 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800368 struct ion_client *client = handle->client;
369 struct ion_buffer *buffer = handle->buffer;
370
Laura Abbottb14ed962012-01-30 14:18:08 -0800371 mutex_lock(&buffer->lock);
372 while (handle->kmap_cnt)
373 ion_handle_kmap_put(handle);
374 mutex_unlock(&buffer->lock);
375
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700376 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800377 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800378
379 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700380 kfree(handle);
381}
382
383struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
384{
385 return handle->buffer;
386}
387
388static void ion_handle_get(struct ion_handle *handle)
389{
390 kref_get(&handle->ref);
391}
392
393static int ion_handle_put(struct ion_handle *handle)
394{
395 return kref_put(&handle->ref, ion_handle_destroy);
396}
397
398static struct ion_handle *ion_handle_lookup(struct ion_client *client,
399 struct ion_buffer *buffer)
400{
401 struct rb_node *n;
402
403 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
404 struct ion_handle *handle = rb_entry(n, struct ion_handle,
405 node);
406 if (handle->buffer == buffer)
407 return handle;
408 }
409 return NULL;
410}
411
412static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
413{
414 struct rb_node *n = client->handles.rb_node;
415
416 while (n) {
417 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
418 node);
419 if (handle < handle_node)
420 n = n->rb_left;
421 else if (handle > handle_node)
422 n = n->rb_right;
423 else
424 return true;
425 }
426 return false;
427}
428
429static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
430{
431 struct rb_node **p = &client->handles.rb_node;
432 struct rb_node *parent = NULL;
433 struct ion_handle *entry;
434
435 while (*p) {
436 parent = *p;
437 entry = rb_entry(parent, struct ion_handle, node);
438
439 if (handle < entry)
440 p = &(*p)->rb_left;
441 else if (handle > entry)
442 p = &(*p)->rb_right;
443 else
444 WARN(1, "%s: buffer already found.", __func__);
445 }
446
447 rb_link_node(&handle->node, parent, p);
448 rb_insert_color(&handle->node, &client->handles);
449}
450
451struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700452 size_t align, unsigned int heap_mask,
453 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700454{
455 struct rb_node *n;
456 struct ion_handle *handle;
457 struct ion_device *dev = client->dev;
458 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800459 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800460 const unsigned int MAX_DBG_STR_LEN = 64;
461 char dbg_str[MAX_DBG_STR_LEN];
462 unsigned int dbg_str_idx = 0;
463
464 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700465
Mitchel Humpherys485650f2013-03-15 16:06:07 -0700466 /*
467 * For now, we don't want to fault in pages individually since
468 * clients are already doing manual cache maintenance. In
469 * other words, the implicit caching infrastructure is in
470 * place (in code) but should not be used.
471 */
472 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
473
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700474 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
475 align, heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700476 /*
477 * traverse the list of heaps available in this system in priority
478 * order. If the heap type is supported by the client, and matches the
479 * request of the caller allocate from it. Repeat until allocate has
480 * succeeded or all heaps have been tried
481 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800482 if (WARN_ON(!len))
483 return ERR_PTR(-EINVAL);
484
485 len = PAGE_ALIGN(len);
486
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700487 mutex_lock(&dev->lock);
488 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
489 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
490 /* if the client doesn't support this heap type */
491 if (!((1 << heap->type) & client->heap_mask))
492 continue;
493 /* if the caller didn't specify this heap type */
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700494 if (!((1 << heap->id) & heap_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700495 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800496 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700497 if (secure_allocation &&
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800498 !ion_heap_allow_secure_allocation(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -0800499 continue;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800500 trace_ion_alloc_buffer_start(client->name, heap->name, len,
501 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700502 buffer = ion_buffer_create(heap, dev, len, align, flags);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800503 trace_ion_alloc_buffer_end(client->name, heap->name, len,
504 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700505 if (!IS_ERR_OR_NULL(buffer))
506 break;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800507
508 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
509 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800510 if (dbg_str_idx < MAX_DBG_STR_LEN) {
511 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
512 int ret_value = snprintf(&dbg_str[dbg_str_idx],
513 len_left, "%s ", heap->name);
514 if (ret_value >= len_left) {
515 /* overflow */
516 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
517 dbg_str_idx = MAX_DBG_STR_LEN;
518 } else if (ret_value >= 0) {
519 dbg_str_idx += ret_value;
520 } else {
521 /* error */
522 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
523 }
524 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700525 }
526 mutex_unlock(&dev->lock);
527
Liam Markcc2d4bd2013-01-16 10:14:40 -0800528 if (buffer == NULL) {
529 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
530 heap_mask, flags, -ENODEV);
Laura Abbottb14ed962012-01-30 14:18:08 -0800531 return ERR_PTR(-ENODEV);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800532 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800533
534 if (IS_ERR(buffer)) {
Liam Markcc2d4bd2013-01-16 10:14:40 -0800535 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
536 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800537 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
538 "0x%x) from heap(s) %sfor client %s with heap "
539 "mask 0x%x\n",
540 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700541 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800542 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700543
544 handle = ion_handle_create(client, buffer);
545
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700546 /*
547 * ion_buffer_create will create a buffer with a ref_cnt of 1,
548 * and ion_handle_create will take a second reference, drop one here
549 */
550 ion_buffer_put(buffer);
551
Laura Abbottb14ed962012-01-30 14:18:08 -0800552 if (!IS_ERR(handle)) {
553 mutex_lock(&client->lock);
554 ion_handle_add(client, handle);
555 mutex_unlock(&client->lock);
556 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700557
Laura Abbottb14ed962012-01-30 14:18:08 -0800558
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700559 return handle;
560}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800561EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700562
563void ion_free(struct ion_client *client, struct ion_handle *handle)
564{
565 bool valid_handle;
566
567 BUG_ON(client != handle->client);
568
569 mutex_lock(&client->lock);
570 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700571 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800572 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700573 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700574 return;
575 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800576 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700577 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700578}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800579EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700580
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700581int ion_phys(struct ion_client *client, struct ion_handle *handle,
582 ion_phys_addr_t *addr, size_t *len)
583{
584 struct ion_buffer *buffer;
585 int ret;
586
587 mutex_lock(&client->lock);
588 if (!ion_handle_validate(client, handle)) {
589 mutex_unlock(&client->lock);
590 return -EINVAL;
591 }
592
593 buffer = handle->buffer;
594
595 if (!buffer->heap->ops->phys) {
596 pr_err("%s: ion_phys is not implemented by this heap.\n",
597 __func__);
598 mutex_unlock(&client->lock);
599 return -ENODEV;
600 }
601 mutex_unlock(&client->lock);
602 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
603 return ret;
604}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800605EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700606
Laura Abbottb14ed962012-01-30 14:18:08 -0800607static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700608{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700609 void *vaddr;
610
Laura Abbottb14ed962012-01-30 14:18:08 -0800611 if (buffer->kmap_cnt) {
612 buffer->kmap_cnt++;
613 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700614 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800615 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
616 if (IS_ERR_OR_NULL(vaddr))
617 return vaddr;
618 buffer->vaddr = vaddr;
619 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700620 return vaddr;
621}
Laura Abbottb14ed962012-01-30 14:18:08 -0800622
623static void *ion_handle_kmap_get(struct ion_handle *handle)
624{
625 struct ion_buffer *buffer = handle->buffer;
626 void *vaddr;
627
628 if (handle->kmap_cnt) {
629 handle->kmap_cnt++;
630 return buffer->vaddr;
631 }
632 vaddr = ion_buffer_kmap_get(buffer);
633 if (IS_ERR_OR_NULL(vaddr))
634 return vaddr;
635 handle->kmap_cnt++;
636 return vaddr;
637}
638
639static void ion_buffer_kmap_put(struct ion_buffer *buffer)
640{
641 buffer->kmap_cnt--;
642 if (!buffer->kmap_cnt) {
643 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
644 buffer->vaddr = NULL;
645 }
646}
647
648static void ion_handle_kmap_put(struct ion_handle *handle)
649{
650 struct ion_buffer *buffer = handle->buffer;
651
652 handle->kmap_cnt--;
653 if (!handle->kmap_cnt)
654 ion_buffer_kmap_put(buffer);
655}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700656
Olav Hauganb3676592012-03-02 15:02:25 -0800657static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700658 int domain_num, int partition_num, unsigned long align,
659 unsigned long iova_length, unsigned long flags,
660 unsigned long *iova)
661{
662 struct ion_iommu_map *data;
663 int ret;
664
665 data = kmalloc(sizeof(*data), GFP_ATOMIC);
666
667 if (!data)
Olav Hauganb3676592012-03-02 15:02:25 -0800668 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700669
670 data->buffer = buffer;
671 iommu_map_domain(data) = domain_num;
672 iommu_map_partition(data) = partition_num;
673
674 ret = buffer->heap->ops->map_iommu(buffer, data,
675 domain_num,
676 partition_num,
677 align,
678 iova_length,
679 flags);
680
681 if (ret)
682 goto out;
683
684 kref_init(&data->ref);
685 *iova = data->iova_addr;
686
687 ion_iommu_add(buffer, data);
688
Olav Hauganb3676592012-03-02 15:02:25 -0800689 return data;
Laura Abbott8c017362011-09-22 20:59:12 -0700690
691out:
Laura Abbott8c017362011-09-22 20:59:12 -0700692 kfree(data);
Olav Hauganb3676592012-03-02 15:02:25 -0800693 return ERR_PTR(ret);
Laura Abbott8c017362011-09-22 20:59:12 -0700694}
695
696int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
697 int domain_num, int partition_num, unsigned long align,
698 unsigned long iova_length, unsigned long *iova,
699 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800700 unsigned long flags, unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700701{
702 struct ion_buffer *buffer;
703 struct ion_iommu_map *iommu_map;
704 int ret = 0;
705
Huaibin Yang89399702013-01-25 15:32:59 -0800706 if (IS_ERR_OR_NULL(client)) {
707 pr_err("%s: client pointer is invalid\n", __func__);
708 return -EINVAL;
709 }
710 if (IS_ERR_OR_NULL(handle)) {
711 pr_err("%s: handle pointer is invalid\n", __func__);
712 return -EINVAL;
713 }
714 if (IS_ERR_OR_NULL(handle->buffer)) {
715 pr_err("%s: buffer pointer is invalid\n", __func__);
716 return -EINVAL;
717 }
718
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800719 if (ION_IS_CACHED(flags)) {
720 pr_err("%s: Cannot map iommu as cached.\n", __func__);
721 return -EINVAL;
722 }
723
Laura Abbott8c017362011-09-22 20:59:12 -0700724 mutex_lock(&client->lock);
725 if (!ion_handle_validate(client, handle)) {
726 pr_err("%s: invalid handle passed to map_kernel.\n",
727 __func__);
728 mutex_unlock(&client->lock);
729 return -EINVAL;
730 }
731
732 buffer = handle->buffer;
733 mutex_lock(&buffer->lock);
734
735 if (!handle->buffer->heap->ops->map_iommu) {
736 pr_err("%s: map_iommu is not implemented by this heap.\n",
737 __func__);
738 ret = -ENODEV;
739 goto out;
740 }
741
Laura Abbott8c017362011-09-22 20:59:12 -0700742 /*
743 * If clients don't want a custom iova length, just use whatever
744 * the buffer size is
745 */
746 if (!iova_length)
747 iova_length = buffer->size;
748
749 if (buffer->size > iova_length) {
750 pr_debug("%s: iova length %lx is not at least buffer size"
751 " %x\n", __func__, iova_length, buffer->size);
752 ret = -EINVAL;
753 goto out;
754 }
755
756 if (buffer->size & ~PAGE_MASK) {
757 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
758 buffer->size, PAGE_SIZE);
759 ret = -EINVAL;
760 goto out;
761 }
762
763 if (iova_length & ~PAGE_MASK) {
764 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
765 iova_length, PAGE_SIZE);
766 ret = -EINVAL;
767 goto out;
768 }
769
770 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
Olav Hauganb3676592012-03-02 15:02:25 -0800771 if (!iommu_map) {
772 iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
773 align, iova_length, flags, iova);
Laura Abbottb14ed962012-01-30 14:18:08 -0800774 if (!IS_ERR_OR_NULL(iommu_map)) {
Olav Hauganb3676592012-03-02 15:02:25 -0800775 iommu_map->flags = iommu_flags;
776
777 if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
778 kref_get(&iommu_map->ref);
Laura Abbott11bca602012-09-14 12:48:18 -0700779 } else {
780 ret = PTR_ERR(iommu_map);
Olav Hauganb3676592012-03-02 15:02:25 -0800781 }
Laura Abbott8c017362011-09-22 20:59:12 -0700782 } else {
Olav Hauganb3676592012-03-02 15:02:25 -0800783 if (iommu_map->flags != iommu_flags) {
784 pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
785 __func__, handle,
786 iommu_map->flags, iommu_flags);
Olav Hauganb3676592012-03-02 15:02:25 -0800787 ret = -EINVAL;
788 } else if (iommu_map->mapped_size != iova_length) {
Laura Abbott8c017362011-09-22 20:59:12 -0700789 pr_err("%s: handle %p is already mapped with length"
Olav Hauganb3676592012-03-02 15:02:25 -0800790 " %x, trying to map with length %lx\n",
Laura Abbott8c017362011-09-22 20:59:12 -0700791 __func__, handle, iommu_map->mapped_size,
792 iova_length);
Laura Abbott8c017362011-09-22 20:59:12 -0700793 ret = -EINVAL;
794 } else {
795 kref_get(&iommu_map->ref);
796 *iova = iommu_map->iova_addr;
797 }
798 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800799 if (!ret)
800 buffer->iommu_map_cnt++;
Laura Abbott8c017362011-09-22 20:59:12 -0700801 *buffer_size = buffer->size;
802out:
803 mutex_unlock(&buffer->lock);
804 mutex_unlock(&client->lock);
805 return ret;
806}
807EXPORT_SYMBOL(ion_map_iommu);
808
809static void ion_iommu_release(struct kref *kref)
810{
811 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
812 ref);
813 struct ion_buffer *buffer = map->buffer;
814
815 rb_erase(&map->node, &buffer->iommu_maps);
816 buffer->heap->ops->unmap_iommu(map);
817 kfree(map);
818}
819
820void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
821 int domain_num, int partition_num)
822{
823 struct ion_iommu_map *iommu_map;
824 struct ion_buffer *buffer;
825
Huaibin Yang89399702013-01-25 15:32:59 -0800826 if (IS_ERR_OR_NULL(client)) {
827 pr_err("%s: client pointer is invalid\n", __func__);
828 return;
829 }
830 if (IS_ERR_OR_NULL(handle)) {
831 pr_err("%s: handle pointer is invalid\n", __func__);
832 return;
833 }
834 if (IS_ERR_OR_NULL(handle->buffer)) {
835 pr_err("%s: buffer pointer is invalid\n", __func__);
836 return;
837 }
838
Laura Abbott8c017362011-09-22 20:59:12 -0700839 mutex_lock(&client->lock);
840 buffer = handle->buffer;
841
842 mutex_lock(&buffer->lock);
843
844 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
845
846 if (!iommu_map) {
847 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
848 domain_num, partition_num, buffer);
849 goto out;
850 }
851
Laura Abbott8c017362011-09-22 20:59:12 -0700852 kref_put(&iommu_map->ref, ion_iommu_release);
853
Laura Abbottb14ed962012-01-30 14:18:08 -0800854 buffer->iommu_map_cnt--;
Laura Abbott8c017362011-09-22 20:59:12 -0700855out:
856 mutex_unlock(&buffer->lock);
857
858 mutex_unlock(&client->lock);
859
860}
861EXPORT_SYMBOL(ion_unmap_iommu);
862
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700863void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700864{
865 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800866 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700867
868 mutex_lock(&client->lock);
869 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800870 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700871 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700872 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700873 return ERR_PTR(-EINVAL);
874 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700875
Laura Abbottb14ed962012-01-30 14:18:08 -0800876 buffer = handle->buffer;
877
878 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700879 pr_err("%s: map_kernel is not implemented by this heap.\n",
880 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700881 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700882 return ERR_PTR(-ENODEV);
883 }
Laura Abbott894fd582011-08-19 13:33:56 -0700884
Laura Abbottb14ed962012-01-30 14:18:08 -0800885 mutex_lock(&buffer->lock);
886 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700887 mutex_unlock(&buffer->lock);
888 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800889 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700890}
Olav Hauganbd453a92012-07-05 14:21:34 -0700891EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700892
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700893void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
894{
895 struct ion_buffer *buffer;
896
897 mutex_lock(&client->lock);
898 buffer = handle->buffer;
899 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800900 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700901 mutex_unlock(&buffer->lock);
902 mutex_unlock(&client->lock);
903}
Olav Hauganbd453a92012-07-05 14:21:34 -0700904EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700905
Olav Haugan41f85792012-02-08 15:28:05 -0800906int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700907 void *uaddr, unsigned long offset, unsigned long len,
908 unsigned int cmd)
909{
910 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700911 int ret = -EINVAL;
912
913 mutex_lock(&client->lock);
914 if (!ion_handle_validate(client, handle)) {
915 pr_err("%s: invalid handle passed to do_cache_op.\n",
916 __func__);
917 mutex_unlock(&client->lock);
918 return -EINVAL;
919 }
920 buffer = handle->buffer;
921 mutex_lock(&buffer->lock);
922
Laura Abbottcbaa6682011-10-19 12:14:14 -0700923 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700924 ret = 0;
925 goto out;
926 }
927
928 if (!handle->buffer->heap->ops->cache_op) {
929 pr_err("%s: cache_op is not implemented by this heap.\n",
930 __func__);
931 ret = -ENODEV;
932 goto out;
933 }
934
Laura Abbottabcb6f72011-10-04 16:26:49 -0700935
936 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
937 offset, len, cmd);
938
939out:
940 mutex_unlock(&buffer->lock);
941 mutex_unlock(&client->lock);
942 return ret;
943
944}
Olav Hauganbd453a92012-07-05 14:21:34 -0700945EXPORT_SYMBOL(ion_do_cache_op);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700946
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700947static int ion_debug_client_show(struct seq_file *s, void *unused)
948{
949 struct ion_client *client = s->private;
950 struct rb_node *n;
Olav Haugan854c9e12012-05-16 16:34:28 -0700951 struct rb_node *n2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700952
Olav Haugan854c9e12012-05-16 16:34:28 -0700953 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
954 "heap_name", "size_in_bytes", "handle refcount",
955 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700956
957 mutex_lock(&client->lock);
958 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
959 struct ion_handle *handle = rb_entry(n, struct ion_handle,
960 node);
961 enum ion_heap_type type = handle->buffer->heap->type;
962
Olav Haugan854c9e12012-05-16 16:34:28 -0700963 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700964 handle->buffer->heap->name,
965 handle->buffer->size,
966 atomic_read(&handle->ref.refcount),
967 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700968
969 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
970 type == ION_HEAP_TYPE_CARVEOUT ||
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700971 type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
Laura Abbott1135c9e2013-03-13 15:33:40 -0700972 seq_printf(s, " : %12pa", &handle->buffer->priv_phys);
Olav Haugan854c9e12012-05-16 16:34:28 -0700973 else
974 seq_printf(s, " : %12s", "N/A");
975
976 for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
977 n2 = rb_next(n2)) {
978 struct ion_iommu_map *imap =
979 rb_entry(n2, struct ion_iommu_map, node);
980 seq_printf(s, " : [%d,%d] - %8lx",
981 imap->domain_info[DI_DOMAIN_NUM],
982 imap->domain_info[DI_PARTITION_NUM],
983 imap->iova_addr);
984 }
985 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700986 }
987 mutex_unlock(&client->lock);
988
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700989 return 0;
990}
991
992static int ion_debug_client_open(struct inode *inode, struct file *file)
993{
994 return single_open(file, ion_debug_client_show, inode->i_private);
995}
996
997static const struct file_operations debug_client_fops = {
998 .open = ion_debug_client_open,
999 .read = seq_read,
1000 .llseek = seq_lseek,
1001 .release = single_release,
1002};
1003
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001004struct ion_client *ion_client_create(struct ion_device *dev,
1005 unsigned int heap_mask,
1006 const char *name)
1007{
1008 struct ion_client *client;
1009 struct task_struct *task;
1010 struct rb_node **p;
1011 struct rb_node *parent = NULL;
1012 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001013 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -07001014 unsigned int name_len;
1015
1016 if (!name) {
1017 pr_err("%s: Name cannot be null\n", __func__);
1018 return ERR_PTR(-EINVAL);
1019 }
1020 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001021
1022 get_task_struct(current->group_leader);
1023 task_lock(current->group_leader);
1024 pid = task_pid_nr(current->group_leader);
1025 /* don't bother to store task struct for kernel threads,
1026 they can't be killed anyway */
1027 if (current->group_leader->flags & PF_KTHREAD) {
1028 put_task_struct(current->group_leader);
1029 task = NULL;
1030 } else {
1031 task = current->group_leader;
1032 }
1033 task_unlock(current->group_leader);
1034
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001035 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1036 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001037 if (task)
1038 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001039 return ERR_PTR(-ENOMEM);
1040 }
1041
1042 client->dev = dev;
1043 client->handles = RB_ROOT;
1044 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001045
Olav Haugan6625c7d12012-01-24 13:50:43 -08001046 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001047 if (!client->name) {
1048 put_task_struct(current->group_leader);
1049 kfree(client);
1050 return ERR_PTR(-ENOMEM);
1051 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -08001052 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001053 }
1054
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001055 client->heap_mask = heap_mask;
1056 client->task = task;
1057 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001058
1059 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001060 p = &dev->clients.rb_node;
1061 while (*p) {
1062 parent = *p;
1063 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001064
Laura Abbottb14ed962012-01-30 14:18:08 -08001065 if (client < entry)
1066 p = &(*p)->rb_left;
1067 else if (client > entry)
1068 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001069 }
Laura Abbottb14ed962012-01-30 14:18:08 -08001070 rb_link_node(&client->node, parent, p);
1071 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001072
Laura Abbotteed86032011-12-05 15:32:36 -08001073
1074 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001075 dev->debug_root, client,
1076 &debug_client_fops);
1077 mutex_unlock(&dev->lock);
1078
1079 return client;
1080}
1081
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001082/**
1083 * ion_mark_dangling_buffers_locked() - Mark dangling buffers
1084 * @dev: the ion device whose buffers will be searched
1085 *
1086 * Sets marked=1 for all known buffers associated with `dev' that no
1087 * longer have a handle pointing to them. dev->lock should be held
1088 * across a call to this function (and should only be unlocked after
1089 * checking for marked buffers).
1090 */
1091static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
1092{
1093 struct rb_node *n, *n2;
1094 /* mark all buffers as 1 */
1095 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1096 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1097 node);
1098
1099 buf->marked = 1;
1100 }
1101
1102 /* now see which buffers we can access */
1103 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1104 struct ion_client *client = rb_entry(n, struct ion_client,
1105 node);
1106
1107 mutex_lock(&client->lock);
1108 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1109 struct ion_handle *handle
1110 = rb_entry(n2, struct ion_handle, node);
1111
1112 handle->buffer->marked = 0;
1113
1114 }
1115 mutex_unlock(&client->lock);
1116
1117 }
1118}
1119
1120#ifdef CONFIG_ION_LEAK_CHECK
1121static u32 ion_debug_check_leaks_on_destroy;
1122
1123static int ion_check_for_and_print_leaks(struct ion_device *dev)
1124{
1125 struct rb_node *n;
1126 int num_leaks = 0;
1127
1128 if (!ion_debug_check_leaks_on_destroy)
1129 return 0;
1130
1131 /* check for leaked buffers (those that no longer have a
1132 * handle pointing to them) */
1133 ion_mark_dangling_buffers_locked(dev);
1134
1135 /* Anyone still marked as a 1 means a leaked handle somewhere */
1136 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1137 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1138 node);
1139
1140 if (buf->marked == 1) {
1141 pr_info("Leaked ion buffer at %p\n", buf);
1142 num_leaks++;
1143 }
1144 }
1145 return num_leaks;
1146}
1147static void setup_ion_leak_check(struct dentry *debug_root)
1148{
1149 debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
1150 &ion_debug_check_leaks_on_destroy);
1151}
1152#else
1153static int ion_check_for_and_print_leaks(struct ion_device *dev)
1154{
1155 return 0;
1156}
1157static void setup_ion_leak_check(struct dentry *debug_root)
1158{
1159}
1160#endif
1161
Laura Abbottb14ed962012-01-30 14:18:08 -08001162void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001163{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001164 struct ion_device *dev = client->dev;
1165 struct rb_node *n;
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001166 int num_leaks;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001167
1168 pr_debug("%s: %d\n", __func__, __LINE__);
1169 while ((n = rb_first(&client->handles))) {
1170 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1171 node);
1172 ion_handle_destroy(&handle->ref);
1173 }
1174 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001175 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001176 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -08001177 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001178 debugfs_remove_recursive(client->debug_root);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001179
1180 num_leaks = ion_check_for_and_print_leaks(dev);
1181
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001182 mutex_unlock(&dev->lock);
1183
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001184 if (num_leaks) {
1185 struct task_struct *current_task = current;
1186 char current_task_name[TASK_COMM_LEN];
1187 get_task_comm(current_task_name, current_task);
1188 WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
1189 __func__, num_leaks, num_leaks == 1 ? "" : "s");
1190 pr_info("task name at time of leak: %s, pid: %d\n",
1191 current_task_name, current_task->pid);
1192 }
1193
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001194 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001195 kfree(client);
1196}
Olav Hauganbd453a92012-07-05 14:21:34 -07001197EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001198
Laura Abbott273dd8e2011-10-12 14:26:33 -07001199int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1200 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001201{
1202 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001203
1204 mutex_lock(&client->lock);
1205 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -07001206 pr_err("%s: invalid handle passed to %s.\n",
1207 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001208 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001209 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001210 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001211 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001212 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001213 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001214 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001215 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001216
Laura Abbott273dd8e2011-10-12 14:26:33 -07001217 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001218}
Laura Abbott273dd8e2011-10-12 14:26:33 -07001219EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001220
Laura Abbott8c017362011-09-22 20:59:12 -07001221int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1222 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001223{
Laura Abbott8c017362011-09-22 20:59:12 -07001224 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001225
Laura Abbott8c017362011-09-22 20:59:12 -07001226 mutex_lock(&client->lock);
1227 if (!ion_handle_validate(client, handle)) {
1228 pr_err("%s: invalid handle passed to %s.\n",
1229 __func__, __func__);
1230 mutex_unlock(&client->lock);
1231 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001232 }
Laura Abbott8c017362011-09-22 20:59:12 -07001233 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001234 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001235 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001236 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001237 mutex_unlock(&client->lock);
1238
1239 return 0;
1240}
1241EXPORT_SYMBOL(ion_handle_get_size);
1242
Laura Abbottb14ed962012-01-30 14:18:08 -08001243struct sg_table *ion_sg_table(struct ion_client *client,
1244 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001245{
Laura Abbottb14ed962012-01-30 14:18:08 -08001246 struct ion_buffer *buffer;
1247 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001248
Laura Abbottb14ed962012-01-30 14:18:08 -08001249 mutex_lock(&client->lock);
1250 if (!ion_handle_validate(client, handle)) {
1251 pr_err("%s: invalid handle passed to map_dma.\n",
1252 __func__);
1253 mutex_unlock(&client->lock);
1254 return ERR_PTR(-EINVAL);
1255 }
1256 buffer = handle->buffer;
1257 table = buffer->sg_table;
1258 mutex_unlock(&client->lock);
1259 return table;
1260}
Olav Hauganbd453a92012-07-05 14:21:34 -07001261EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -08001262
Mitchel Humpherys0432d692013-01-08 17:03:10 -08001263struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
1264 size_t chunk_size, size_t total_size)
1265{
1266 struct sg_table *table;
1267 int i, n_chunks, ret;
1268 struct scatterlist *sg;
1269
1270 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1271 if (!table)
1272 return ERR_PTR(-ENOMEM);
1273
1274 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
1275 pr_debug("creating sg_table with %d chunks\n", n_chunks);
1276
1277 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
1278 if (ret)
1279 goto err0;
1280
1281 for_each_sg(table->sgl, sg, table->nents, i) {
1282 dma_addr_t addr = buffer_base + i * chunk_size;
1283 sg_dma_address(sg) = addr;
1284 }
1285
1286 return table;
1287err0:
1288 kfree(table);
1289 return ERR_PTR(ret);
1290}
1291
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001292static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1293 struct device *dev,
1294 enum dma_data_direction direction);
1295
Laura Abbottb14ed962012-01-30 14:18:08 -08001296static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1297 enum dma_data_direction direction)
1298{
1299 struct dma_buf *dmabuf = attachment->dmabuf;
1300 struct ion_buffer *buffer = dmabuf->priv;
1301
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001302 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbottb14ed962012-01-30 14:18:08 -08001303 return buffer->sg_table;
1304}
1305
1306static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1307 struct sg_table *table,
1308 enum dma_data_direction direction)
1309{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001310}
1311
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001312static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
1313{
1314 unsigned long pages = buffer->sg_table->nents;
1315 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
1316
1317 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
1318 if (!buffer->dirty)
1319 return -ENOMEM;
1320 return 0;
1321}
1322
1323struct ion_vma_list {
1324 struct list_head list;
1325 struct vm_area_struct *vma;
1326};
1327
1328static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1329 struct device *dev,
1330 enum dma_data_direction dir)
1331{
1332 struct scatterlist *sg;
1333 int i;
1334 struct ion_vma_list *vma_list;
1335
1336 pr_debug("%s: syncing for device %s\n", __func__,
1337 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001338
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001339 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001340 return;
1341
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001342 mutex_lock(&buffer->lock);
1343 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1344 if (!test_bit(i, buffer->dirty))
1345 continue;
1346 dma_sync_sg_for_device(dev, sg, 1, dir);
1347 clear_bit(i, buffer->dirty);
1348 }
1349 list_for_each_entry(vma_list, &buffer->vmas, list) {
1350 struct vm_area_struct *vma = vma_list->vma;
1351
1352 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1353 NULL);
1354 }
1355 mutex_unlock(&buffer->lock);
1356}
1357
1358int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001359{
Laura Abbottb14ed962012-01-30 14:18:08 -08001360 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001361 struct scatterlist *sg;
1362 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001363
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001364 mutex_lock(&buffer->lock);
1365 set_bit(vmf->pgoff, buffer->dirty);
1366
1367 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1368 if (i != vmf->pgoff)
1369 continue;
1370 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
1371 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
1372 sg_page(sg));
1373 break;
1374 }
1375 mutex_unlock(&buffer->lock);
1376 return VM_FAULT_NOPAGE;
1377}
1378
1379static void ion_vm_open(struct vm_area_struct *vma)
1380{
1381 struct ion_buffer *buffer = vma->vm_private_data;
1382 struct ion_vma_list *vma_list;
1383
1384 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1385 if (!vma_list)
1386 return;
1387 vma_list->vma = vma;
1388 mutex_lock(&buffer->lock);
1389 list_add(&vma_list->list, &buffer->vmas);
1390 mutex_unlock(&buffer->lock);
1391 pr_debug("%s: adding %p\n", __func__, vma);
1392}
1393
1394static void ion_vm_close(struct vm_area_struct *vma)
1395{
1396 struct ion_buffer *buffer = vma->vm_private_data;
1397 struct ion_vma_list *vma_list, *tmp;
1398
1399 pr_debug("%s\n", __func__);
1400 mutex_lock(&buffer->lock);
1401 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1402 if (vma_list->vma != vma)
1403 continue;
1404 list_del(&vma_list->list);
1405 kfree(vma_list);
1406 pr_debug("%s: deleting %p\n", __func__, vma);
1407 break;
1408 }
1409 mutex_unlock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001410
Laura Abbotta6835092011-11-14 15:27:02 -08001411 if (buffer->heap->ops->unmap_user)
1412 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001413}
1414
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001415struct vm_operations_struct ion_vma_ops = {
1416 .open = ion_vm_open,
1417 .close = ion_vm_close,
1418 .fault = ion_vm_fault,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001419};
1420
Laura Abbottb14ed962012-01-30 14:18:08 -08001421static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001422{
Laura Abbottb14ed962012-01-30 14:18:08 -08001423 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001424 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001425
Laura Abbottb14ed962012-01-30 14:18:08 -08001426 if (!buffer->heap->ops->map_user) {
1427 pr_err("%s: this heap does not define a method for mapping "
1428 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001429 return -EINVAL;
1430 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001431
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001432 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001433 vma->vm_private_data = buffer;
1434 vma->vm_ops = &ion_vma_ops;
Mitchel Humpherys7ce0fe42013-01-10 11:30:26 -08001435 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001436 ion_vm_open(vma);
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001437 return 0;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001438 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001439
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001440 if (!(buffer->flags & ION_FLAG_CACHED))
1441 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1442
1443 mutex_lock(&buffer->lock);
1444 /* now map it to userspace */
1445 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1446 mutex_unlock(&buffer->lock);
1447
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001448 if (ret)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001449 pr_err("%s: failure mapping buffer to userspace\n",
1450 __func__);
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001451
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001452 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001453}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001454
Laura Abbottb14ed962012-01-30 14:18:08 -08001455static void ion_dma_buf_release(struct dma_buf *dmabuf)
1456{
1457 struct ion_buffer *buffer = dmabuf->priv;
1458 ion_buffer_put(buffer);
1459}
1460
1461static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1462{
1463 struct ion_buffer *buffer = dmabuf->priv;
1464 return buffer->vaddr + offset;
1465}
1466
1467static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1468 void *ptr)
1469{
1470 return;
1471}
1472
1473static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1474 size_t len,
1475 enum dma_data_direction direction)
1476{
1477 struct ion_buffer *buffer = dmabuf->priv;
1478 void *vaddr;
1479
1480 if (!buffer->heap->ops->map_kernel) {
1481 pr_err("%s: map kernel is not implemented by this heap.\n",
1482 __func__);
1483 return -ENODEV;
1484 }
1485
1486 mutex_lock(&buffer->lock);
1487 vaddr = ion_buffer_kmap_get(buffer);
1488 mutex_unlock(&buffer->lock);
1489 if (IS_ERR(vaddr))
1490 return PTR_ERR(vaddr);
1491 if (!vaddr)
1492 return -ENOMEM;
1493 return 0;
1494}
1495
1496static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1497 size_t len,
1498 enum dma_data_direction direction)
1499{
1500 struct ion_buffer *buffer = dmabuf->priv;
1501
1502 mutex_lock(&buffer->lock);
1503 ion_buffer_kmap_put(buffer);
1504 mutex_unlock(&buffer->lock);
1505}
1506
1507struct dma_buf_ops dma_buf_ops = {
1508 .map_dma_buf = ion_map_dma_buf,
1509 .unmap_dma_buf = ion_unmap_dma_buf,
1510 .mmap = ion_mmap,
1511 .release = ion_dma_buf_release,
1512 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1513 .end_cpu_access = ion_dma_buf_end_cpu_access,
1514 .kmap_atomic = ion_dma_buf_kmap,
1515 .kunmap_atomic = ion_dma_buf_kunmap,
1516 .kmap = ion_dma_buf_kmap,
1517 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001518};
1519
Laura Abbottb14ed962012-01-30 14:18:08 -08001520int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1521{
1522 struct ion_buffer *buffer;
1523 struct dma_buf *dmabuf;
1524 bool valid_handle;
1525 int fd;
1526
1527 mutex_lock(&client->lock);
1528 valid_handle = ion_handle_validate(client, handle);
1529 mutex_unlock(&client->lock);
1530 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001531 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001532 return -EINVAL;
1533 }
1534
1535 buffer = handle->buffer;
1536 ion_buffer_get(buffer);
1537 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1538 if (IS_ERR(dmabuf)) {
1539 ion_buffer_put(buffer);
1540 return PTR_ERR(dmabuf);
1541 }
1542 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001543 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001544 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001545
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001546 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001547}
Olav Hauganbd453a92012-07-05 14:21:34 -07001548EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001549
Laura Abbottb14ed962012-01-30 14:18:08 -08001550struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1551{
1552 struct dma_buf *dmabuf;
1553 struct ion_buffer *buffer;
1554 struct ion_handle *handle;
1555
1556 dmabuf = dma_buf_get(fd);
1557 if (IS_ERR_OR_NULL(dmabuf))
1558 return ERR_PTR(PTR_ERR(dmabuf));
1559 /* if this memory came from ion */
1560
1561 if (dmabuf->ops != &dma_buf_ops) {
1562 pr_err("%s: can not import dmabuf from another exporter\n",
1563 __func__);
1564 dma_buf_put(dmabuf);
1565 return ERR_PTR(-EINVAL);
1566 }
1567 buffer = dmabuf->priv;
1568
1569 mutex_lock(&client->lock);
1570 /* if a handle exists for this buffer just take a reference to it */
1571 handle = ion_handle_lookup(client, buffer);
1572 if (!IS_ERR_OR_NULL(handle)) {
1573 ion_handle_get(handle);
1574 goto end;
1575 }
1576 handle = ion_handle_create(client, buffer);
1577 if (IS_ERR_OR_NULL(handle))
1578 goto end;
1579 ion_handle_add(client, handle);
1580end:
1581 mutex_unlock(&client->lock);
1582 dma_buf_put(dmabuf);
1583 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001584}
Olav Hauganbd453a92012-07-05 14:21:34 -07001585EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001586
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001587static int ion_sync_for_device(struct ion_client *client, int fd)
1588{
1589 struct dma_buf *dmabuf;
1590 struct ion_buffer *buffer;
1591
1592 dmabuf = dma_buf_get(fd);
1593 if (IS_ERR_OR_NULL(dmabuf))
1594 return PTR_ERR(dmabuf);
1595
1596 /* if this memory came from ion */
1597 if (dmabuf->ops != &dma_buf_ops) {
1598 pr_err("%s: can not sync dmabuf from another exporter\n",
1599 __func__);
1600 dma_buf_put(dmabuf);
1601 return -EINVAL;
1602 }
1603 buffer = dmabuf->priv;
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001604
1605 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1606 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001607 dma_buf_put(dmabuf);
1608 return 0;
1609}
1610
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001611static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1612{
1613 struct ion_client *client = filp->private_data;
1614
1615 switch (cmd) {
1616 case ION_IOC_ALLOC:
1617 {
1618 struct ion_allocation_data data;
1619
1620 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1621 return -EFAULT;
1622 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh7d72bad2012-08-29 18:39:44 -07001623 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001624
Laura Abbottb14ed962012-01-30 14:18:08 -08001625 if (IS_ERR(data.handle))
1626 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001627
Laura Abbottb14ed962012-01-30 14:18:08 -08001628 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1629 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001630 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001631 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001632 break;
1633 }
1634 case ION_IOC_FREE:
1635 {
1636 struct ion_handle_data data;
1637 bool valid;
1638
1639 if (copy_from_user(&data, (void __user *)arg,
1640 sizeof(struct ion_handle_data)))
1641 return -EFAULT;
1642 mutex_lock(&client->lock);
1643 valid = ion_handle_validate(client, data.handle);
1644 mutex_unlock(&client->lock);
1645 if (!valid)
1646 return -EINVAL;
1647 ion_free(client, data.handle);
1648 break;
1649 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001650 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001651 case ION_IOC_SHARE:
1652 {
1653 struct ion_fd_data data;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001654 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1655 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001656
Laura Abbottb14ed962012-01-30 14:18:08 -08001657 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001658 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1659 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001660 if (data.fd < 0)
1661 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001662 break;
1663 }
1664 case ION_IOC_IMPORT:
1665 {
1666 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001667 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001668 if (copy_from_user(&data, (void __user *)arg,
1669 sizeof(struct ion_fd_data)))
1670 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001671 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan865e97f2012-05-15 14:40:11 -07001672 if (IS_ERR(data.handle)) {
1673 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001674 data.handle = NULL;
Olav Haugan865e97f2012-05-15 14:40:11 -07001675 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001676 if (copy_to_user((void __user *)arg, &data,
1677 sizeof(struct ion_fd_data)))
1678 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001679 if (ret < 0)
1680 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001681 break;
1682 }
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001683 case ION_IOC_SYNC:
1684 {
1685 struct ion_fd_data data;
1686 if (copy_from_user(&data, (void __user *)arg,
1687 sizeof(struct ion_fd_data)))
1688 return -EFAULT;
1689 ion_sync_for_device(client, data.fd);
1690 break;
1691 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001692 case ION_IOC_CUSTOM:
1693 {
1694 struct ion_device *dev = client->dev;
1695 struct ion_custom_data data;
1696
1697 if (!dev->custom_ioctl)
1698 return -ENOTTY;
1699 if (copy_from_user(&data, (void __user *)arg,
1700 sizeof(struct ion_custom_data)))
1701 return -EFAULT;
1702 return dev->custom_ioctl(client, data.cmd, data.arg);
1703 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001704 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001705 return client->dev->custom_ioctl(client,
1706 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001707 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001708 return client->dev->custom_ioctl(client,
1709 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001710 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001711 return client->dev->custom_ioctl(client,
1712 ION_IOC_CLEAN_INV_CACHES, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001713 default:
1714 return -ENOTTY;
1715 }
1716 return 0;
1717}
1718
1719static int ion_release(struct inode *inode, struct file *file)
1720{
1721 struct ion_client *client = file->private_data;
1722
1723 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001724 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001725 return 0;
1726}
1727
1728static int ion_open(struct inode *inode, struct file *file)
1729{
1730 struct miscdevice *miscdev = file->private_data;
1731 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1732 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001733 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001734
1735 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001736 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1737 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001738 if (IS_ERR_OR_NULL(client))
1739 return PTR_ERR(client);
1740 file->private_data = client;
1741
1742 return 0;
1743}
1744
1745static const struct file_operations ion_fops = {
1746 .owner = THIS_MODULE,
1747 .open = ion_open,
1748 .release = ion_release,
1749 .unlocked_ioctl = ion_ioctl,
1750};
1751
1752static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001753 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001754{
1755 size_t size = 0;
1756 struct rb_node *n;
1757
1758 mutex_lock(&client->lock);
1759 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1760 struct ion_handle *handle = rb_entry(n,
1761 struct ion_handle,
1762 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001763 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001764 size += handle->buffer->size;
1765 }
1766 mutex_unlock(&client->lock);
1767 return size;
1768}
1769
Olav Haugan0671b9a2012-05-25 11:58:56 -07001770/**
1771 * Searches through a clients handles to find if the buffer is owned
1772 * by this client. Used for debug output.
1773 * @param client pointer to candidate owner of buffer
1774 * @param buf pointer to buffer that we are trying to find the owner of
1775 * @return 1 if found, 0 otherwise
1776 */
1777static int ion_debug_find_buffer_owner(const struct ion_client *client,
1778 const struct ion_buffer *buf)
1779{
1780 struct rb_node *n;
1781
1782 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1783 const struct ion_handle *handle = rb_entry(n,
1784 const struct ion_handle,
1785 node);
1786 if (handle->buffer == buf)
1787 return 1;
1788 }
1789 return 0;
1790}
1791
1792/**
1793 * Adds mem_map_data pointer to the tree of mem_map
1794 * Used for debug output.
1795 * @param mem_map The mem_map tree
1796 * @param data The new data to add to the tree
1797 */
1798static void ion_debug_mem_map_add(struct rb_root *mem_map,
1799 struct mem_map_data *data)
1800{
1801 struct rb_node **p = &mem_map->rb_node;
1802 struct rb_node *parent = NULL;
1803 struct mem_map_data *entry;
1804
1805 while (*p) {
1806 parent = *p;
1807 entry = rb_entry(parent, struct mem_map_data, node);
1808
1809 if (data->addr < entry->addr) {
1810 p = &(*p)->rb_left;
1811 } else if (data->addr > entry->addr) {
1812 p = &(*p)->rb_right;
1813 } else {
1814 pr_err("%s: mem_map_data already found.", __func__);
1815 BUG();
1816 }
1817 }
1818 rb_link_node(&data->node, parent, p);
1819 rb_insert_color(&data->node, mem_map);
1820}
1821
1822/**
1823 * Search for an owner of a buffer by iterating over all ION clients.
1824 * @param dev ion device containing pointers to all the clients.
1825 * @param buffer pointer to buffer we are trying to find the owner of.
1826 * @return name of owner.
1827 */
1828const char *ion_debug_locate_owner(const struct ion_device *dev,
1829 const struct ion_buffer *buffer)
1830{
1831 struct rb_node *j;
1832 const char *client_name = NULL;
1833
Laura Abbottb14ed962012-01-30 14:18:08 -08001834 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001835 j = rb_next(j)) {
1836 struct ion_client *client = rb_entry(j, struct ion_client,
1837 node);
1838 if (ion_debug_find_buffer_owner(client, buffer))
1839 client_name = client->name;
1840 }
1841 return client_name;
1842}
1843
1844/**
1845 * Create a mem_map of the heap.
1846 * @param s seq_file to log error message to.
1847 * @param heap The heap to create mem_map for.
1848 * @param mem_map The mem map to be created.
1849 */
1850void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1851 struct rb_root *mem_map)
1852{
1853 struct ion_device *dev = heap->dev;
1854 struct rb_node *n;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301855 size_t size;
1856
1857 if (!heap->ops->phys)
1858 return;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001859
1860 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1861 struct ion_buffer *buffer =
1862 rb_entry(n, struct ion_buffer, node);
1863 if (buffer->heap->id == heap->id) {
1864 struct mem_map_data *data =
1865 kzalloc(sizeof(*data), GFP_KERNEL);
1866 if (!data) {
1867 seq_printf(s, "ERROR: out of memory. "
1868 "Part of memory map will not be logged\n");
1869 break;
1870 }
Chintan Pandyadaf75622013-01-29 19:40:01 +05301871
1872 buffer->heap->ops->phys(buffer->heap, buffer,
1873 &(data->addr), &size);
1874 data->size = (unsigned long) size;
1875 data->addr_end = data->addr + data->size - 1;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001876 data->client_name = ion_debug_locate_owner(dev, buffer);
1877 ion_debug_mem_map_add(mem_map, data);
1878 }
1879 }
1880}
1881
1882/**
1883 * Free the memory allocated by ion_debug_mem_map_create
1884 * @param mem_map The mem map to free.
1885 */
1886static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1887{
1888 if (mem_map) {
1889 struct rb_node *n;
1890 while ((n = rb_first(mem_map)) != 0) {
1891 struct mem_map_data *data =
1892 rb_entry(n, struct mem_map_data, node);
1893 rb_erase(&data->node, mem_map);
1894 kfree(data);
1895 }
1896 }
1897}
1898
1899/**
1900 * Print heap debug information.
1901 * @param s seq_file to log message to.
1902 * @param heap pointer to heap that we will print debug information for.
1903 */
1904static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1905{
1906 if (heap->ops->print_debug) {
1907 struct rb_root mem_map = RB_ROOT;
1908 ion_debug_mem_map_create(s, heap, &mem_map);
1909 heap->ops->print_debug(heap, s, &mem_map);
1910 ion_debug_mem_map_destroy(&mem_map);
1911 }
1912}
1913
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001914static int ion_debug_heap_show(struct seq_file *s, void *unused)
1915{
1916 struct ion_heap *heap = s->private;
1917 struct ion_device *dev = heap->dev;
1918 struct rb_node *n;
1919
Olav Haugane4900b52012-05-25 11:58:03 -07001920 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001921 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001922
Laura Abbottb14ed962012-01-30 14:18:08 -08001923 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001924 struct ion_client *client = rb_entry(n, struct ion_client,
1925 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001926 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001927 if (!size)
1928 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001929 if (client->task) {
1930 char task_comm[TASK_COMM_LEN];
1931
1932 get_task_comm(task_comm, client->task);
1933 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1934 client->pid, size);
1935 } else {
1936 seq_printf(s, "%16.s %16u %16u\n", client->name,
1937 client->pid, size);
1938 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001939 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001940 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001941 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001942 return 0;
1943}
1944
1945static int ion_debug_heap_open(struct inode *inode, struct file *file)
1946{
1947 return single_open(file, ion_debug_heap_show, inode->i_private);
1948}
1949
1950static const struct file_operations debug_heap_fops = {
1951 .open = ion_debug_heap_open,
1952 .read = seq_read,
1953 .llseek = seq_lseek,
1954 .release = single_release,
1955};
1956
1957void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1958{
1959 struct rb_node **p = &dev->heaps.rb_node;
1960 struct rb_node *parent = NULL;
1961 struct ion_heap *entry;
1962
Laura Abbottb14ed962012-01-30 14:18:08 -08001963 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1964 !heap->ops->unmap_dma)
1965 pr_err("%s: can not add heap with invalid ops struct.\n",
1966 __func__);
1967
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001968 heap->dev = dev;
1969 mutex_lock(&dev->lock);
1970 while (*p) {
1971 parent = *p;
1972 entry = rb_entry(parent, struct ion_heap, node);
1973
1974 if (heap->id < entry->id) {
1975 p = &(*p)->rb_left;
1976 } else if (heap->id > entry->id ) {
1977 p = &(*p)->rb_right;
1978 } else {
1979 pr_err("%s: can not insert multiple heaps with "
1980 "id %d\n", __func__, heap->id);
1981 goto end;
1982 }
1983 }
1984
1985 rb_link_node(&heap->node, parent, p);
1986 rb_insert_color(&heap->node, &dev->heaps);
1987 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1988 &debug_heap_fops);
1989end:
1990 mutex_unlock(&dev->lock);
1991}
1992
Laura Abbott93619302012-10-11 11:51:40 -07001993int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
1994 int version, void *data, int flags)
1995{
1996 int ret = -EINVAL;
1997 struct ion_heap *heap;
1998 struct ion_buffer *buffer;
1999
2000 mutex_lock(&client->lock);
2001 if (!ion_handle_validate(client, handle)) {
2002 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
2003 goto out_unlock;
2004 }
2005
2006 buffer = handle->buffer;
2007 heap = buffer->heap;
2008
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002009 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07002010 pr_err("%s: cannot secure buffer from non secure heap\n",
2011 __func__);
2012 goto out_unlock;
2013 }
2014
2015 BUG_ON(!buffer->heap->ops->secure_buffer);
2016 /*
2017 * Protect the handle via the client lock to ensure we aren't
2018 * racing with free
2019 */
2020 ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
2021
2022out_unlock:
2023 mutex_unlock(&client->lock);
2024 return ret;
2025}
2026
2027int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
2028{
2029 int ret = -EINVAL;
2030 struct ion_heap *heap;
2031 struct ion_buffer *buffer;
2032
2033 mutex_lock(&client->lock);
2034 if (!ion_handle_validate(client, handle)) {
2035 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
2036 goto out_unlock;
2037 }
2038
2039 buffer = handle->buffer;
2040 heap = buffer->heap;
2041
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002042 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07002043 pr_err("%s: cannot secure buffer from non secure heap\n",
2044 __func__);
2045 goto out_unlock;
2046 }
2047
2048 BUG_ON(!buffer->heap->ops->unsecure_buffer);
2049 /*
2050 * Protect the handle via the client lock to ensure we aren't
2051 * racing with free
2052 */
2053 ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
2054
2055out_unlock:
2056 mutex_unlock(&client->lock);
2057 return ret;
2058}
2059
Laura Abbott7e446482012-06-13 15:59:39 -07002060int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
2061 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08002062{
2063 struct rb_node *n;
2064 int ret_val = 0;
2065
2066 /*
2067 * traverse the list of heaps available in this system
2068 * and find the heap that is specified.
2069 */
2070 mutex_lock(&dev->lock);
2071 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
2072 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002073 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08002074 continue;
2075 if (ION_HEAP(heap->id) != heap_id)
2076 continue;
2077 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07002078 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08002079 else
2080 ret_val = -EINVAL;
2081 break;
2082 }
2083 mutex_unlock(&dev->lock);
2084 return ret_val;
2085}
Olav Hauganbd453a92012-07-05 14:21:34 -07002086EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08002087
Laura Abbott7e446482012-06-13 15:59:39 -07002088int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
2089 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08002090{
2091 struct rb_node *n;
2092 int ret_val = 0;
2093
2094 /*
2095 * traverse the list of heaps available in this system
2096 * and find the heap that is specified.
2097 */
2098 mutex_lock(&dev->lock);
2099 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
2100 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Laura Abbott4afbd8b2013-02-15 09:21:33 -08002101 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08002102 continue;
2103 if (ION_HEAP(heap->id) != heap_id)
2104 continue;
2105 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07002106 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08002107 else
2108 ret_val = -EINVAL;
2109 break;
2110 }
2111 mutex_unlock(&dev->lock);
2112 return ret_val;
2113}
Olav Hauganbd453a92012-07-05 14:21:34 -07002114EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08002115
Laura Abbott404f8242011-10-31 14:22:53 -07002116static int ion_debug_leak_show(struct seq_file *s, void *unused)
2117{
2118 struct ion_device *dev = s->private;
2119 struct rb_node *n;
Laura Abbott404f8242011-10-31 14:22:53 -07002120
Laura Abbott404f8242011-10-31 14:22:53 -07002121 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
2122 "ref cnt");
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002123
Laura Abbott404f8242011-10-31 14:22:53 -07002124 mutex_lock(&dev->lock);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002125 ion_mark_dangling_buffers_locked(dev);
Laura Abbott404f8242011-10-31 14:22:53 -07002126
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002127 /* Anyone still marked as a 1 means a leaked handle somewhere */
Laura Abbott404f8242011-10-31 14:22:53 -07002128 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
2129 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
2130 node);
2131
2132 if (buf->marked == 1)
2133 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
2134 (int)buf, buf->heap->name, buf->size,
2135 atomic_read(&buf->ref.refcount));
2136 }
2137 mutex_unlock(&dev->lock);
2138 return 0;
2139}
2140
2141static int ion_debug_leak_open(struct inode *inode, struct file *file)
2142{
2143 return single_open(file, ion_debug_leak_show, inode->i_private);
2144}
2145
2146static const struct file_operations debug_leak_fops = {
2147 .open = ion_debug_leak_open,
2148 .read = seq_read,
2149 .llseek = seq_lseek,
2150 .release = single_release,
2151};
2152
2153
2154
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07002155struct ion_device *ion_device_create(long (*custom_ioctl)
2156 (struct ion_client *client,
2157 unsigned int cmd,
2158 unsigned long arg))
2159{
2160 struct ion_device *idev;
2161 int ret;
2162
2163 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
2164 if (!idev)
2165 return ERR_PTR(-ENOMEM);
2166
2167 idev->dev.minor = MISC_DYNAMIC_MINOR;
2168 idev->dev.name = "ion";
2169 idev->dev.fops = &ion_fops;
2170 idev->dev.parent = NULL;
2171 ret = misc_register(&idev->dev);
2172 if (ret) {
2173 pr_err("ion: failed to register misc device.\n");
2174 return ERR_PTR(ret);
2175 }
2176
2177 idev->debug_root = debugfs_create_dir("ion", NULL);
2178 if (IS_ERR_OR_NULL(idev->debug_root))
2179 pr_err("ion: failed to create debug files.\n");
2180
2181 idev->custom_ioctl = custom_ioctl;
2182 idev->buffers = RB_ROOT;
2183 mutex_init(&idev->lock);
2184 idev->heaps = RB_ROOT;
Laura Abbottb14ed962012-01-30 14:18:08 -08002185 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07002186 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
2187 &debug_leak_fops);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08002188
2189 setup_ion_leak_check(idev->debug_root);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07002190 return idev;
2191}
2192
2193void ion_device_destroy(struct ion_device *dev)
2194{
2195 misc_deregister(&dev->dev);
2196 /* XXX need to free the heaps and clients ? */
2197 kfree(dev);
2198}
Laura Abbottb14ed962012-01-30 14:18:08 -08002199
2200void __init ion_reserve(struct ion_platform_data *data)
2201{
2202 int i, ret;
2203
2204 for (i = 0; i < data->nr; i++) {
2205 if (data->heaps[i].size == 0)
2206 continue;
2207 ret = memblock_reserve(data->heaps[i].base,
2208 data->heaps[i].size);
2209 if (ret)
Laura Abbott1135c9e2013-03-13 15:33:40 -07002210 pr_err("memblock reserve of %x@%pa failed\n",
Laura Abbottb14ed962012-01-30 14:18:08 -08002211 data->heaps[i].size,
Laura Abbott1135c9e2013-03-13 15:33:40 -07002212 &data->heaps[i].base);
Laura Abbottb14ed962012-01-30 14:18:08 -08002213 }
2214}