blob: 0904f9fe218e52238f412cc3f5d122d1b4af8d69 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Liam Markcc2d4bd2013-01-16 10:14:40 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070036#include <linux/msm_ion.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080037#include <trace/events/kmem.h>
38
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070039
Laura Abbott8c017362011-09-22 20:59:12 -070040#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070041#include "ion_priv.h"
42#define DEBUG
43
44/**
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
47 * @buffers: an rb tree of all the existing buffers
48 * @lock: lock protecting the buffers & heaps trees
49 * @heaps: list of all the heaps in the system
50 * @user_clients: list of all the clients created from userspace
51 */
52struct ion_device {
53 struct miscdevice dev;
54 struct rb_root buffers;
55 struct mutex lock;
56 struct rb_root heaps;
57 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
58 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080059 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070060 struct dentry *debug_root;
61};
62
63/**
64 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070065 * @node: node in the tree of all clients
66 * @dev: backpointer to ion device
67 * @handles: an rb tree of all the handles in this client
68 * @lock: lock protecting the tree of handles
69 * @heap_mask: mask of all supported heaps
70 * @name: used for debugging
71 * @task: used for debugging
72 *
73 * A client represents a list of buffers this client may access.
74 * The mutex stored here is used to protect both handles tree
75 * as well as the handles themselves, and should be held while modifying either.
76 */
77struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070078 struct rb_node node;
79 struct ion_device *dev;
80 struct rb_root handles;
81 struct mutex lock;
82 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080083 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070084 struct task_struct *task;
85 pid_t pid;
86 struct dentry *debug_root;
87};
88
89/**
90 * ion_handle - a client local reference to a buffer
91 * @ref: reference count
92 * @client: back pointer to the client the buffer resides in
93 * @buffer: pointer to the buffer
94 * @node: node in the client's handle rbtree
95 * @kmap_cnt: count of times this client has mapped to kernel
96 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070097 *
98 * Modifications to node, map_cnt or mapping should be protected by the
99 * lock in the client. Other fields are never changed after initialization.
100 */
101struct ion_handle {
102 struct kref ref;
103 struct ion_client *client;
104 struct ion_buffer *buffer;
105 struct rb_node node;
106 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700107 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700108};
109
Olav Hauganb3676592012-03-02 15:02:25 -0800110static void ion_iommu_release(struct kref *kref);
111
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700112/* this function should only be called while dev->lock is held */
113static void ion_buffer_add(struct ion_device *dev,
114 struct ion_buffer *buffer)
115{
116 struct rb_node **p = &dev->buffers.rb_node;
117 struct rb_node *parent = NULL;
118 struct ion_buffer *entry;
119
120 while (*p) {
121 parent = *p;
122 entry = rb_entry(parent, struct ion_buffer, node);
123
124 if (buffer < entry) {
125 p = &(*p)->rb_left;
126 } else if (buffer > entry) {
127 p = &(*p)->rb_right;
128 } else {
129 pr_err("%s: buffer already found.", __func__);
130 BUG();
131 }
132 }
133
134 rb_link_node(&buffer->node, parent, p);
135 rb_insert_color(&buffer->node, &dev->buffers);
136}
137
Olav Haugan0fa9b602012-01-25 11:50:38 -0800138static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700139 struct ion_iommu_map *iommu)
140{
141 struct rb_node **p = &buffer->iommu_maps.rb_node;
142 struct rb_node *parent = NULL;
143 struct ion_iommu_map *entry;
144
145 while (*p) {
146 parent = *p;
147 entry = rb_entry(parent, struct ion_iommu_map, node);
148
149 if (iommu->key < entry->key) {
150 p = &(*p)->rb_left;
151 } else if (iommu->key > entry->key) {
152 p = &(*p)->rb_right;
153 } else {
154 pr_err("%s: buffer %p already has mapping for domain %d"
155 " and partition %d\n", __func__,
156 buffer,
157 iommu_map_domain(iommu),
158 iommu_map_partition(iommu));
159 BUG();
160 }
161 }
162
163 rb_link_node(&iommu->node, parent, p);
164 rb_insert_color(&iommu->node, &buffer->iommu_maps);
165
166}
167
168static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
169 unsigned int domain_no,
170 unsigned int partition_no)
171{
172 struct rb_node **p = &buffer->iommu_maps.rb_node;
173 struct rb_node *parent = NULL;
174 struct ion_iommu_map *entry;
175 uint64_t key = domain_no;
176 key = key << 32 | partition_no;
177
178 while (*p) {
179 parent = *p;
180 entry = rb_entry(parent, struct ion_iommu_map, node);
181
182 if (key < entry->key)
183 p = &(*p)->rb_left;
184 else if (key > entry->key)
185 p = &(*p)->rb_right;
186 else
187 return entry;
188 }
189
190 return NULL;
191}
192
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700193/* this function should only be called while dev->lock is held */
194static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
195 struct ion_device *dev,
196 unsigned long len,
197 unsigned long align,
198 unsigned long flags)
199{
200 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800201 struct sg_table *table;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700202 int ret;
203
204 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
205 if (!buffer)
206 return ERR_PTR(-ENOMEM);
207
208 buffer->heap = heap;
209 kref_init(&buffer->ref);
210
211 ret = heap->ops->allocate(heap, buffer, len, align, flags);
212 if (ret) {
213 kfree(buffer);
214 return ERR_PTR(ret);
215 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800216
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700217 buffer->dev = dev;
218 buffer->size = len;
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700219 buffer->flags = flags;
Laura Abbottb14ed962012-01-30 14:18:08 -0800220
221 table = buffer->heap->ops->map_dma(buffer->heap, buffer);
222 if (IS_ERR_OR_NULL(table)) {
223 heap->ops->free(buffer);
224 kfree(buffer);
225 return ERR_PTR(PTR_ERR(table));
226 }
227 buffer->sg_table = table;
228
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700229 mutex_init(&buffer->lock);
230 ion_buffer_add(dev, buffer);
231 return buffer;
232}
233
Olav Hauganb3676592012-03-02 15:02:25 -0800234/**
235 * Check for delayed IOMMU unmapping. Also unmap any outstanding
236 * mappings which would otherwise have been leaked.
237 */
238static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
239{
240 struct ion_iommu_map *iommu_map;
241 struct rb_node *node;
242 const struct rb_root *rb = &(buffer->iommu_maps);
243 unsigned long ref_count;
244 unsigned int delayed_unmap;
245
246 mutex_lock(&buffer->lock);
247
248 while ((node = rb_first(rb)) != 0) {
249 iommu_map = rb_entry(node, struct ion_iommu_map, node);
250 ref_count = atomic_read(&iommu_map->ref.refcount);
251 delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
252
253 if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
254 pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
255 __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
256 iommu_map->domain_info[DI_PARTITION_NUM]);
257 }
258 /* set ref count to 1 to force release */
259 kref_init(&iommu_map->ref);
260 kref_put(&iommu_map->ref, ion_iommu_release);
261 }
262
263 mutex_unlock(&buffer->lock);
264}
265
Laura Abbott93619302012-10-11 11:51:40 -0700266static void ion_delayed_unsecure(struct ion_buffer *buffer)
267{
268 if (buffer->heap->ops->unsecure_buffer)
269 buffer->heap->ops->unsecure_buffer(buffer, 1);
270}
271
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700272static void ion_buffer_destroy(struct kref *kref)
273{
274 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
275 struct ion_device *dev = buffer->dev;
276
Laura Abbottb14ed962012-01-30 14:18:08 -0800277 if (WARN_ON(buffer->kmap_cnt > 0))
278 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
279
280 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
281
Laura Abbott93619302012-10-11 11:51:40 -0700282 ion_delayed_unsecure(buffer);
Olav Hauganb3676592012-03-02 15:02:25 -0800283 ion_iommu_delayed_unmap(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700284 buffer->heap->ops->free(buffer);
285 mutex_lock(&dev->lock);
286 rb_erase(&buffer->node, &dev->buffers);
287 mutex_unlock(&dev->lock);
288 kfree(buffer);
289}
290
291static void ion_buffer_get(struct ion_buffer *buffer)
292{
293 kref_get(&buffer->ref);
294}
295
296static int ion_buffer_put(struct ion_buffer *buffer)
297{
298 return kref_put(&buffer->ref, ion_buffer_destroy);
299}
300
301static struct ion_handle *ion_handle_create(struct ion_client *client,
302 struct ion_buffer *buffer)
303{
304 struct ion_handle *handle;
305
306 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
307 if (!handle)
308 return ERR_PTR(-ENOMEM);
309 kref_init(&handle->ref);
310 rb_init_node(&handle->node);
311 handle->client = client;
312 ion_buffer_get(buffer);
313 handle->buffer = buffer;
314
315 return handle;
316}
317
Laura Abbottb14ed962012-01-30 14:18:08 -0800318static void ion_handle_kmap_put(struct ion_handle *);
319
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700320static void ion_handle_destroy(struct kref *kref)
321{
322 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800323 struct ion_client *client = handle->client;
324 struct ion_buffer *buffer = handle->buffer;
325
Laura Abbottb14ed962012-01-30 14:18:08 -0800326 mutex_lock(&buffer->lock);
327 while (handle->kmap_cnt)
328 ion_handle_kmap_put(handle);
329 mutex_unlock(&buffer->lock);
330
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700331 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800332 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800333
334 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700335 kfree(handle);
336}
337
338struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
339{
340 return handle->buffer;
341}
342
343static void ion_handle_get(struct ion_handle *handle)
344{
345 kref_get(&handle->ref);
346}
347
348static int ion_handle_put(struct ion_handle *handle)
349{
350 return kref_put(&handle->ref, ion_handle_destroy);
351}
352
353static struct ion_handle *ion_handle_lookup(struct ion_client *client,
354 struct ion_buffer *buffer)
355{
356 struct rb_node *n;
357
358 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
359 struct ion_handle *handle = rb_entry(n, struct ion_handle,
360 node);
361 if (handle->buffer == buffer)
362 return handle;
363 }
364 return NULL;
365}
366
367static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
368{
369 struct rb_node *n = client->handles.rb_node;
370
371 while (n) {
372 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
373 node);
374 if (handle < handle_node)
375 n = n->rb_left;
376 else if (handle > handle_node)
377 n = n->rb_right;
378 else
379 return true;
380 }
381 return false;
382}
383
384static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
385{
386 struct rb_node **p = &client->handles.rb_node;
387 struct rb_node *parent = NULL;
388 struct ion_handle *entry;
389
390 while (*p) {
391 parent = *p;
392 entry = rb_entry(parent, struct ion_handle, node);
393
394 if (handle < entry)
395 p = &(*p)->rb_left;
396 else if (handle > entry)
397 p = &(*p)->rb_right;
398 else
399 WARN(1, "%s: buffer already found.", __func__);
400 }
401
402 rb_link_node(&handle->node, parent, p);
403 rb_insert_color(&handle->node, &client->handles);
404}
405
406struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700407 size_t align, unsigned int heap_mask,
408 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700409{
410 struct rb_node *n;
411 struct ion_handle *handle;
412 struct ion_device *dev = client->dev;
413 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800414 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800415 const unsigned int MAX_DBG_STR_LEN = 64;
416 char dbg_str[MAX_DBG_STR_LEN];
417 unsigned int dbg_str_idx = 0;
418
419 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700420
421 /*
422 * traverse the list of heaps available in this system in priority
423 * order. If the heap type is supported by the client, and matches the
424 * request of the caller allocate from it. Repeat until allocate has
425 * succeeded or all heaps have been tried
426 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800427 if (WARN_ON(!len))
428 return ERR_PTR(-EINVAL);
429
430 len = PAGE_ALIGN(len);
431
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700432 mutex_lock(&dev->lock);
433 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
434 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
435 /* if the client doesn't support this heap type */
436 if (!((1 << heap->type) & client->heap_mask))
437 continue;
438 /* if the caller didn't specify this heap type */
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700439 if (!((1 << heap->id) & heap_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700440 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800441 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700442 if (secure_allocation &&
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800443 !ion_heap_allow_secure_allocation(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -0800444 continue;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800445 trace_ion_alloc_buffer_start(client->name, heap->name, len,
446 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700447 buffer = ion_buffer_create(heap, dev, len, align, flags);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800448 trace_ion_alloc_buffer_end(client->name, heap->name, len,
449 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700450 if (!IS_ERR_OR_NULL(buffer))
451 break;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800452
453 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
454 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800455 if (dbg_str_idx < MAX_DBG_STR_LEN) {
456 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
457 int ret_value = snprintf(&dbg_str[dbg_str_idx],
458 len_left, "%s ", heap->name);
459 if (ret_value >= len_left) {
460 /* overflow */
461 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
462 dbg_str_idx = MAX_DBG_STR_LEN;
463 } else if (ret_value >= 0) {
464 dbg_str_idx += ret_value;
465 } else {
466 /* error */
467 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
468 }
469 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700470 }
471 mutex_unlock(&dev->lock);
472
Liam Markcc2d4bd2013-01-16 10:14:40 -0800473 if (buffer == NULL) {
474 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
475 heap_mask, flags, -ENODEV);
Laura Abbottb14ed962012-01-30 14:18:08 -0800476 return ERR_PTR(-ENODEV);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800477 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800478
479 if (IS_ERR(buffer)) {
Liam Markcc2d4bd2013-01-16 10:14:40 -0800480 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
481 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800482 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
483 "0x%x) from heap(s) %sfor client %s with heap "
484 "mask 0x%x\n",
485 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700486 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800487 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700488
489 handle = ion_handle_create(client, buffer);
490
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700491 /*
492 * ion_buffer_create will create a buffer with a ref_cnt of 1,
493 * and ion_handle_create will take a second reference, drop one here
494 */
495 ion_buffer_put(buffer);
496
Laura Abbottb14ed962012-01-30 14:18:08 -0800497 if (!IS_ERR(handle)) {
498 mutex_lock(&client->lock);
499 ion_handle_add(client, handle);
500 mutex_unlock(&client->lock);
501 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700502
Laura Abbottb14ed962012-01-30 14:18:08 -0800503
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700504 return handle;
505}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800506EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700507
508void ion_free(struct ion_client *client, struct ion_handle *handle)
509{
510 bool valid_handle;
511
512 BUG_ON(client != handle->client);
513
514 mutex_lock(&client->lock);
515 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700516 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800517 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700518 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700519 return;
520 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800521 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700522 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700523}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800524EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700525
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700526int ion_phys(struct ion_client *client, struct ion_handle *handle,
527 ion_phys_addr_t *addr, size_t *len)
528{
529 struct ion_buffer *buffer;
530 int ret;
531
532 mutex_lock(&client->lock);
533 if (!ion_handle_validate(client, handle)) {
534 mutex_unlock(&client->lock);
535 return -EINVAL;
536 }
537
538 buffer = handle->buffer;
539
540 if (!buffer->heap->ops->phys) {
541 pr_err("%s: ion_phys is not implemented by this heap.\n",
542 __func__);
543 mutex_unlock(&client->lock);
544 return -ENODEV;
545 }
546 mutex_unlock(&client->lock);
547 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
548 return ret;
549}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800550EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700551
Laura Abbottb14ed962012-01-30 14:18:08 -0800552static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700553{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700554 void *vaddr;
555
Laura Abbottb14ed962012-01-30 14:18:08 -0800556 if (buffer->kmap_cnt) {
557 buffer->kmap_cnt++;
558 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700559 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800560 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
561 if (IS_ERR_OR_NULL(vaddr))
562 return vaddr;
563 buffer->vaddr = vaddr;
564 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700565 return vaddr;
566}
Laura Abbottb14ed962012-01-30 14:18:08 -0800567
568static void *ion_handle_kmap_get(struct ion_handle *handle)
569{
570 struct ion_buffer *buffer = handle->buffer;
571 void *vaddr;
572
573 if (handle->kmap_cnt) {
574 handle->kmap_cnt++;
575 return buffer->vaddr;
576 }
577 vaddr = ion_buffer_kmap_get(buffer);
578 if (IS_ERR_OR_NULL(vaddr))
579 return vaddr;
580 handle->kmap_cnt++;
581 return vaddr;
582}
583
584static void ion_buffer_kmap_put(struct ion_buffer *buffer)
585{
586 buffer->kmap_cnt--;
587 if (!buffer->kmap_cnt) {
588 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
589 buffer->vaddr = NULL;
590 }
591}
592
593static void ion_handle_kmap_put(struct ion_handle *handle)
594{
595 struct ion_buffer *buffer = handle->buffer;
596
597 handle->kmap_cnt--;
598 if (!handle->kmap_cnt)
599 ion_buffer_kmap_put(buffer);
600}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700601
Olav Hauganb3676592012-03-02 15:02:25 -0800602static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700603 int domain_num, int partition_num, unsigned long align,
604 unsigned long iova_length, unsigned long flags,
605 unsigned long *iova)
606{
607 struct ion_iommu_map *data;
608 int ret;
609
610 data = kmalloc(sizeof(*data), GFP_ATOMIC);
611
612 if (!data)
Olav Hauganb3676592012-03-02 15:02:25 -0800613 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700614
615 data->buffer = buffer;
616 iommu_map_domain(data) = domain_num;
617 iommu_map_partition(data) = partition_num;
618
619 ret = buffer->heap->ops->map_iommu(buffer, data,
620 domain_num,
621 partition_num,
622 align,
623 iova_length,
624 flags);
625
626 if (ret)
627 goto out;
628
629 kref_init(&data->ref);
630 *iova = data->iova_addr;
631
632 ion_iommu_add(buffer, data);
633
Olav Hauganb3676592012-03-02 15:02:25 -0800634 return data;
Laura Abbott8c017362011-09-22 20:59:12 -0700635
636out:
Laura Abbott8c017362011-09-22 20:59:12 -0700637 kfree(data);
Olav Hauganb3676592012-03-02 15:02:25 -0800638 return ERR_PTR(ret);
Laura Abbott8c017362011-09-22 20:59:12 -0700639}
640
641int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
642 int domain_num, int partition_num, unsigned long align,
643 unsigned long iova_length, unsigned long *iova,
644 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800645 unsigned long flags, unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700646{
647 struct ion_buffer *buffer;
648 struct ion_iommu_map *iommu_map;
649 int ret = 0;
650
Huaibin Yang89399702013-01-25 15:32:59 -0800651 if (IS_ERR_OR_NULL(client)) {
652 pr_err("%s: client pointer is invalid\n", __func__);
653 return -EINVAL;
654 }
655 if (IS_ERR_OR_NULL(handle)) {
656 pr_err("%s: handle pointer is invalid\n", __func__);
657 return -EINVAL;
658 }
659 if (IS_ERR_OR_NULL(handle->buffer)) {
660 pr_err("%s: buffer pointer is invalid\n", __func__);
661 return -EINVAL;
662 }
663
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800664 if (ION_IS_CACHED(flags)) {
665 pr_err("%s: Cannot map iommu as cached.\n", __func__);
666 return -EINVAL;
667 }
668
Laura Abbott8c017362011-09-22 20:59:12 -0700669 mutex_lock(&client->lock);
670 if (!ion_handle_validate(client, handle)) {
671 pr_err("%s: invalid handle passed to map_kernel.\n",
672 __func__);
673 mutex_unlock(&client->lock);
674 return -EINVAL;
675 }
676
677 buffer = handle->buffer;
678 mutex_lock(&buffer->lock);
679
680 if (!handle->buffer->heap->ops->map_iommu) {
681 pr_err("%s: map_iommu is not implemented by this heap.\n",
682 __func__);
683 ret = -ENODEV;
684 goto out;
685 }
686
Laura Abbott8c017362011-09-22 20:59:12 -0700687 /*
688 * If clients don't want a custom iova length, just use whatever
689 * the buffer size is
690 */
691 if (!iova_length)
692 iova_length = buffer->size;
693
694 if (buffer->size > iova_length) {
695 pr_debug("%s: iova length %lx is not at least buffer size"
696 " %x\n", __func__, iova_length, buffer->size);
697 ret = -EINVAL;
698 goto out;
699 }
700
701 if (buffer->size & ~PAGE_MASK) {
702 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
703 buffer->size, PAGE_SIZE);
704 ret = -EINVAL;
705 goto out;
706 }
707
708 if (iova_length & ~PAGE_MASK) {
709 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
710 iova_length, PAGE_SIZE);
711 ret = -EINVAL;
712 goto out;
713 }
714
715 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
Olav Hauganb3676592012-03-02 15:02:25 -0800716 if (!iommu_map) {
717 iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
718 align, iova_length, flags, iova);
Laura Abbottb14ed962012-01-30 14:18:08 -0800719 if (!IS_ERR_OR_NULL(iommu_map)) {
Olav Hauganb3676592012-03-02 15:02:25 -0800720 iommu_map->flags = iommu_flags;
721
722 if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
723 kref_get(&iommu_map->ref);
Laura Abbott11bca602012-09-14 12:48:18 -0700724 } else {
725 ret = PTR_ERR(iommu_map);
Olav Hauganb3676592012-03-02 15:02:25 -0800726 }
Laura Abbott8c017362011-09-22 20:59:12 -0700727 } else {
Olav Hauganb3676592012-03-02 15:02:25 -0800728 if (iommu_map->flags != iommu_flags) {
729 pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
730 __func__, handle,
731 iommu_map->flags, iommu_flags);
Olav Hauganb3676592012-03-02 15:02:25 -0800732 ret = -EINVAL;
733 } else if (iommu_map->mapped_size != iova_length) {
Laura Abbott8c017362011-09-22 20:59:12 -0700734 pr_err("%s: handle %p is already mapped with length"
Olav Hauganb3676592012-03-02 15:02:25 -0800735 " %x, trying to map with length %lx\n",
Laura Abbott8c017362011-09-22 20:59:12 -0700736 __func__, handle, iommu_map->mapped_size,
737 iova_length);
Laura Abbott8c017362011-09-22 20:59:12 -0700738 ret = -EINVAL;
739 } else {
740 kref_get(&iommu_map->ref);
741 *iova = iommu_map->iova_addr;
742 }
743 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800744 if (!ret)
745 buffer->iommu_map_cnt++;
Laura Abbott8c017362011-09-22 20:59:12 -0700746 *buffer_size = buffer->size;
747out:
748 mutex_unlock(&buffer->lock);
749 mutex_unlock(&client->lock);
750 return ret;
751}
752EXPORT_SYMBOL(ion_map_iommu);
753
754static void ion_iommu_release(struct kref *kref)
755{
756 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
757 ref);
758 struct ion_buffer *buffer = map->buffer;
759
760 rb_erase(&map->node, &buffer->iommu_maps);
761 buffer->heap->ops->unmap_iommu(map);
762 kfree(map);
763}
764
765void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
766 int domain_num, int partition_num)
767{
768 struct ion_iommu_map *iommu_map;
769 struct ion_buffer *buffer;
770
Huaibin Yang89399702013-01-25 15:32:59 -0800771 if (IS_ERR_OR_NULL(client)) {
772 pr_err("%s: client pointer is invalid\n", __func__);
773 return;
774 }
775 if (IS_ERR_OR_NULL(handle)) {
776 pr_err("%s: handle pointer is invalid\n", __func__);
777 return;
778 }
779 if (IS_ERR_OR_NULL(handle->buffer)) {
780 pr_err("%s: buffer pointer is invalid\n", __func__);
781 return;
782 }
783
Laura Abbott8c017362011-09-22 20:59:12 -0700784 mutex_lock(&client->lock);
785 buffer = handle->buffer;
786
787 mutex_lock(&buffer->lock);
788
789 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
790
791 if (!iommu_map) {
792 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
793 domain_num, partition_num, buffer);
794 goto out;
795 }
796
Laura Abbott8c017362011-09-22 20:59:12 -0700797 kref_put(&iommu_map->ref, ion_iommu_release);
798
Laura Abbottb14ed962012-01-30 14:18:08 -0800799 buffer->iommu_map_cnt--;
Laura Abbott8c017362011-09-22 20:59:12 -0700800out:
801 mutex_unlock(&buffer->lock);
802
803 mutex_unlock(&client->lock);
804
805}
806EXPORT_SYMBOL(ion_unmap_iommu);
807
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700808void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700809{
810 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800811 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700812
813 mutex_lock(&client->lock);
814 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800815 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700816 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700817 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700818 return ERR_PTR(-EINVAL);
819 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700820
Laura Abbottb14ed962012-01-30 14:18:08 -0800821 buffer = handle->buffer;
822
823 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700824 pr_err("%s: map_kernel is not implemented by this heap.\n",
825 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700826 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700827 return ERR_PTR(-ENODEV);
828 }
Laura Abbott894fd582011-08-19 13:33:56 -0700829
Laura Abbottb14ed962012-01-30 14:18:08 -0800830 mutex_lock(&buffer->lock);
831 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700832 mutex_unlock(&buffer->lock);
833 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800834 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700835}
Olav Hauganbd453a92012-07-05 14:21:34 -0700836EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700837
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700838void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
839{
840 struct ion_buffer *buffer;
841
842 mutex_lock(&client->lock);
843 buffer = handle->buffer;
844 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800845 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700846 mutex_unlock(&buffer->lock);
847 mutex_unlock(&client->lock);
848}
Olav Hauganbd453a92012-07-05 14:21:34 -0700849EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700850
Olav Haugan41f85792012-02-08 15:28:05 -0800851int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700852 void *uaddr, unsigned long offset, unsigned long len,
853 unsigned int cmd)
854{
855 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700856 int ret = -EINVAL;
857
858 mutex_lock(&client->lock);
859 if (!ion_handle_validate(client, handle)) {
860 pr_err("%s: invalid handle passed to do_cache_op.\n",
861 __func__);
862 mutex_unlock(&client->lock);
863 return -EINVAL;
864 }
865 buffer = handle->buffer;
866 mutex_lock(&buffer->lock);
867
Laura Abbottcbaa6682011-10-19 12:14:14 -0700868 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700869 ret = 0;
870 goto out;
871 }
872
873 if (!handle->buffer->heap->ops->cache_op) {
874 pr_err("%s: cache_op is not implemented by this heap.\n",
875 __func__);
876 ret = -ENODEV;
877 goto out;
878 }
879
Laura Abbottabcb6f72011-10-04 16:26:49 -0700880
881 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
882 offset, len, cmd);
883
884out:
885 mutex_unlock(&buffer->lock);
886 mutex_unlock(&client->lock);
887 return ret;
888
889}
Olav Hauganbd453a92012-07-05 14:21:34 -0700890EXPORT_SYMBOL(ion_do_cache_op);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700891
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700892static int ion_debug_client_show(struct seq_file *s, void *unused)
893{
894 struct ion_client *client = s->private;
895 struct rb_node *n;
Olav Haugan854c9e12012-05-16 16:34:28 -0700896 struct rb_node *n2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700897
Olav Haugan854c9e12012-05-16 16:34:28 -0700898 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
899 "heap_name", "size_in_bytes", "handle refcount",
900 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700901
902 mutex_lock(&client->lock);
903 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
904 struct ion_handle *handle = rb_entry(n, struct ion_handle,
905 node);
906 enum ion_heap_type type = handle->buffer->heap->type;
907
Olav Haugan854c9e12012-05-16 16:34:28 -0700908 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700909 handle->buffer->heap->name,
910 handle->buffer->size,
911 atomic_read(&handle->ref.refcount),
912 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700913
914 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
915 type == ION_HEAP_TYPE_CARVEOUT ||
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700916 type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
Olav Haugan854c9e12012-05-16 16:34:28 -0700917 seq_printf(s, " : %12lx", handle->buffer->priv_phys);
918 else
919 seq_printf(s, " : %12s", "N/A");
920
921 for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
922 n2 = rb_next(n2)) {
923 struct ion_iommu_map *imap =
924 rb_entry(n2, struct ion_iommu_map, node);
925 seq_printf(s, " : [%d,%d] - %8lx",
926 imap->domain_info[DI_DOMAIN_NUM],
927 imap->domain_info[DI_PARTITION_NUM],
928 imap->iova_addr);
929 }
930 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700931 }
932 mutex_unlock(&client->lock);
933
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700934 return 0;
935}
936
937static int ion_debug_client_open(struct inode *inode, struct file *file)
938{
939 return single_open(file, ion_debug_client_show, inode->i_private);
940}
941
942static const struct file_operations debug_client_fops = {
943 .open = ion_debug_client_open,
944 .read = seq_read,
945 .llseek = seq_lseek,
946 .release = single_release,
947};
948
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700949struct ion_client *ion_client_create(struct ion_device *dev,
950 unsigned int heap_mask,
951 const char *name)
952{
953 struct ion_client *client;
954 struct task_struct *task;
955 struct rb_node **p;
956 struct rb_node *parent = NULL;
957 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700958 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700959 unsigned int name_len;
960
961 if (!name) {
962 pr_err("%s: Name cannot be null\n", __func__);
963 return ERR_PTR(-EINVAL);
964 }
965 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700966
967 get_task_struct(current->group_leader);
968 task_lock(current->group_leader);
969 pid = task_pid_nr(current->group_leader);
970 /* don't bother to store task struct for kernel threads,
971 they can't be killed anyway */
972 if (current->group_leader->flags & PF_KTHREAD) {
973 put_task_struct(current->group_leader);
974 task = NULL;
975 } else {
976 task = current->group_leader;
977 }
978 task_unlock(current->group_leader);
979
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700980 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
981 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800982 if (task)
983 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700984 return ERR_PTR(-ENOMEM);
985 }
986
987 client->dev = dev;
988 client->handles = RB_ROOT;
989 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800990
Olav Haugan6625c7d12012-01-24 13:50:43 -0800991 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800992 if (!client->name) {
993 put_task_struct(current->group_leader);
994 kfree(client);
995 return ERR_PTR(-ENOMEM);
996 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -0800997 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800998 }
999
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001000 client->heap_mask = heap_mask;
1001 client->task = task;
1002 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001003
1004 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001005 p = &dev->clients.rb_node;
1006 while (*p) {
1007 parent = *p;
1008 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001009
Laura Abbottb14ed962012-01-30 14:18:08 -08001010 if (client < entry)
1011 p = &(*p)->rb_left;
1012 else if (client > entry)
1013 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001014 }
Laura Abbottb14ed962012-01-30 14:18:08 -08001015 rb_link_node(&client->node, parent, p);
1016 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001017
Laura Abbotteed86032011-12-05 15:32:36 -08001018
1019 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001020 dev->debug_root, client,
1021 &debug_client_fops);
1022 mutex_unlock(&dev->lock);
1023
1024 return client;
1025}
1026
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001027/**
1028 * ion_mark_dangling_buffers_locked() - Mark dangling buffers
1029 * @dev: the ion device whose buffers will be searched
1030 *
1031 * Sets marked=1 for all known buffers associated with `dev' that no
1032 * longer have a handle pointing to them. dev->lock should be held
1033 * across a call to this function (and should only be unlocked after
1034 * checking for marked buffers).
1035 */
1036static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
1037{
1038 struct rb_node *n, *n2;
1039 /* mark all buffers as 1 */
1040 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1041 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1042 node);
1043
1044 buf->marked = 1;
1045 }
1046
1047 /* now see which buffers we can access */
1048 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1049 struct ion_client *client = rb_entry(n, struct ion_client,
1050 node);
1051
1052 mutex_lock(&client->lock);
1053 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1054 struct ion_handle *handle
1055 = rb_entry(n2, struct ion_handle, node);
1056
1057 handle->buffer->marked = 0;
1058
1059 }
1060 mutex_unlock(&client->lock);
1061
1062 }
1063}
1064
1065#ifdef CONFIG_ION_LEAK_CHECK
1066static u32 ion_debug_check_leaks_on_destroy;
1067
1068static int ion_check_for_and_print_leaks(struct ion_device *dev)
1069{
1070 struct rb_node *n;
1071 int num_leaks = 0;
1072
1073 if (!ion_debug_check_leaks_on_destroy)
1074 return 0;
1075
1076 /* check for leaked buffers (those that no longer have a
1077 * handle pointing to them) */
1078 ion_mark_dangling_buffers_locked(dev);
1079
1080 /* Anyone still marked as a 1 means a leaked handle somewhere */
1081 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1082 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1083 node);
1084
1085 if (buf->marked == 1) {
1086 pr_info("Leaked ion buffer at %p\n", buf);
1087 num_leaks++;
1088 }
1089 }
1090 return num_leaks;
1091}
1092static void setup_ion_leak_check(struct dentry *debug_root)
1093{
1094 debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
1095 &ion_debug_check_leaks_on_destroy);
1096}
1097#else
1098static int ion_check_for_and_print_leaks(struct ion_device *dev)
1099{
1100 return 0;
1101}
1102static void setup_ion_leak_check(struct dentry *debug_root)
1103{
1104}
1105#endif
1106
Laura Abbottb14ed962012-01-30 14:18:08 -08001107void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001108{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001109 struct ion_device *dev = client->dev;
1110 struct rb_node *n;
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001111 int num_leaks;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001112
1113 pr_debug("%s: %d\n", __func__, __LINE__);
1114 while ((n = rb_first(&client->handles))) {
1115 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1116 node);
1117 ion_handle_destroy(&handle->ref);
1118 }
1119 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001120 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001121 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -08001122 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001123 debugfs_remove_recursive(client->debug_root);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001124
1125 num_leaks = ion_check_for_and_print_leaks(dev);
1126
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001127 mutex_unlock(&dev->lock);
1128
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001129 if (num_leaks) {
1130 struct task_struct *current_task = current;
1131 char current_task_name[TASK_COMM_LEN];
1132 get_task_comm(current_task_name, current_task);
1133 WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
1134 __func__, num_leaks, num_leaks == 1 ? "" : "s");
1135 pr_info("task name at time of leak: %s, pid: %d\n",
1136 current_task_name, current_task->pid);
1137 }
1138
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001139 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001140 kfree(client);
1141}
Olav Hauganbd453a92012-07-05 14:21:34 -07001142EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001143
Laura Abbott273dd8e2011-10-12 14:26:33 -07001144int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1145 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001146{
1147 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001148
1149 mutex_lock(&client->lock);
1150 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -07001151 pr_err("%s: invalid handle passed to %s.\n",
1152 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001153 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001154 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001155 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001156 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001157 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001158 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001159 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001160 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001161
Laura Abbott273dd8e2011-10-12 14:26:33 -07001162 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001163}
Laura Abbott273dd8e2011-10-12 14:26:33 -07001164EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001165
Laura Abbott8c017362011-09-22 20:59:12 -07001166int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1167 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001168{
Laura Abbott8c017362011-09-22 20:59:12 -07001169 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001170
Laura Abbott8c017362011-09-22 20:59:12 -07001171 mutex_lock(&client->lock);
1172 if (!ion_handle_validate(client, handle)) {
1173 pr_err("%s: invalid handle passed to %s.\n",
1174 __func__, __func__);
1175 mutex_unlock(&client->lock);
1176 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001177 }
Laura Abbott8c017362011-09-22 20:59:12 -07001178 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001179 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001180 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001181 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001182 mutex_unlock(&client->lock);
1183
1184 return 0;
1185}
1186EXPORT_SYMBOL(ion_handle_get_size);
1187
Laura Abbottb14ed962012-01-30 14:18:08 -08001188struct sg_table *ion_sg_table(struct ion_client *client,
1189 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001190{
Laura Abbottb14ed962012-01-30 14:18:08 -08001191 struct ion_buffer *buffer;
1192 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001193
Laura Abbottb14ed962012-01-30 14:18:08 -08001194 mutex_lock(&client->lock);
1195 if (!ion_handle_validate(client, handle)) {
1196 pr_err("%s: invalid handle passed to map_dma.\n",
1197 __func__);
1198 mutex_unlock(&client->lock);
1199 return ERR_PTR(-EINVAL);
1200 }
1201 buffer = handle->buffer;
1202 table = buffer->sg_table;
1203 mutex_unlock(&client->lock);
1204 return table;
1205}
Olav Hauganbd453a92012-07-05 14:21:34 -07001206EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -08001207
1208static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1209 enum dma_data_direction direction)
1210{
1211 struct dma_buf *dmabuf = attachment->dmabuf;
1212 struct ion_buffer *buffer = dmabuf->priv;
1213
1214 return buffer->sg_table;
1215}
1216
1217static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1218 struct sg_table *table,
1219 enum dma_data_direction direction)
1220{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001221}
1222
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001223static void ion_vma_close(struct vm_area_struct *vma)
1224{
Laura Abbottb14ed962012-01-30 14:18:08 -08001225 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001226
1227 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001228
Laura Abbotta6835092011-11-14 15:27:02 -08001229 if (buffer->heap->ops->unmap_user)
1230 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001231}
1232
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001233static struct vm_operations_struct ion_vm_ops = {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001234 .close = ion_vma_close,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001235};
1236
Laura Abbottb14ed962012-01-30 14:18:08 -08001237static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001238{
Laura Abbottb14ed962012-01-30 14:18:08 -08001239 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001240 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001241
Laura Abbottb14ed962012-01-30 14:18:08 -08001242 if (!buffer->heap->ops->map_user) {
1243 pr_err("%s: this heap does not define a method for mapping "
1244 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001245 return -EINVAL;
1246 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001247
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001248 mutex_lock(&buffer->lock);
1249 /* now map it to userspace */
Laura Abbottb14ed962012-01-30 14:18:08 -08001250 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001251
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001252 if (ret) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001253 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001254 pr_err("%s: failure mapping buffer to userspace\n",
1255 __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001256 } else {
Laura Abbottb14ed962012-01-30 14:18:08 -08001257 mutex_unlock(&buffer->lock);
1258
1259 vma->vm_ops = &ion_vm_ops;
1260 /*
1261 * move the buffer into the vm_private_data so we can access it
1262 * from vma_open/close
1263 */
1264 vma->vm_private_data = buffer;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001265 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001266 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001267}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001268
Laura Abbottb14ed962012-01-30 14:18:08 -08001269static void ion_dma_buf_release(struct dma_buf *dmabuf)
1270{
1271 struct ion_buffer *buffer = dmabuf->priv;
1272 ion_buffer_put(buffer);
1273}
1274
1275static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1276{
1277 struct ion_buffer *buffer = dmabuf->priv;
1278 return buffer->vaddr + offset;
1279}
1280
1281static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1282 void *ptr)
1283{
1284 return;
1285}
1286
1287static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1288 size_t len,
1289 enum dma_data_direction direction)
1290{
1291 struct ion_buffer *buffer = dmabuf->priv;
1292 void *vaddr;
1293
1294 if (!buffer->heap->ops->map_kernel) {
1295 pr_err("%s: map kernel is not implemented by this heap.\n",
1296 __func__);
1297 return -ENODEV;
1298 }
1299
1300 mutex_lock(&buffer->lock);
1301 vaddr = ion_buffer_kmap_get(buffer);
1302 mutex_unlock(&buffer->lock);
1303 if (IS_ERR(vaddr))
1304 return PTR_ERR(vaddr);
1305 if (!vaddr)
1306 return -ENOMEM;
1307 return 0;
1308}
1309
1310static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1311 size_t len,
1312 enum dma_data_direction direction)
1313{
1314 struct ion_buffer *buffer = dmabuf->priv;
1315
1316 mutex_lock(&buffer->lock);
1317 ion_buffer_kmap_put(buffer);
1318 mutex_unlock(&buffer->lock);
1319}
1320
1321struct dma_buf_ops dma_buf_ops = {
1322 .map_dma_buf = ion_map_dma_buf,
1323 .unmap_dma_buf = ion_unmap_dma_buf,
1324 .mmap = ion_mmap,
1325 .release = ion_dma_buf_release,
1326 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1327 .end_cpu_access = ion_dma_buf_end_cpu_access,
1328 .kmap_atomic = ion_dma_buf_kmap,
1329 .kunmap_atomic = ion_dma_buf_kunmap,
1330 .kmap = ion_dma_buf_kmap,
1331 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001332};
1333
Laura Abbottb14ed962012-01-30 14:18:08 -08001334int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1335{
1336 struct ion_buffer *buffer;
1337 struct dma_buf *dmabuf;
1338 bool valid_handle;
1339 int fd;
1340
1341 mutex_lock(&client->lock);
1342 valid_handle = ion_handle_validate(client, handle);
1343 mutex_unlock(&client->lock);
1344 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001345 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001346 return -EINVAL;
1347 }
1348
1349 buffer = handle->buffer;
1350 ion_buffer_get(buffer);
1351 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1352 if (IS_ERR(dmabuf)) {
1353 ion_buffer_put(buffer);
1354 return PTR_ERR(dmabuf);
1355 }
1356 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001357 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001358 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001359
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001360 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001361}
Olav Hauganbd453a92012-07-05 14:21:34 -07001362EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001363
Laura Abbottb14ed962012-01-30 14:18:08 -08001364struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1365{
1366 struct dma_buf *dmabuf;
1367 struct ion_buffer *buffer;
1368 struct ion_handle *handle;
1369
1370 dmabuf = dma_buf_get(fd);
1371 if (IS_ERR_OR_NULL(dmabuf))
1372 return ERR_PTR(PTR_ERR(dmabuf));
1373 /* if this memory came from ion */
1374
1375 if (dmabuf->ops != &dma_buf_ops) {
1376 pr_err("%s: can not import dmabuf from another exporter\n",
1377 __func__);
1378 dma_buf_put(dmabuf);
1379 return ERR_PTR(-EINVAL);
1380 }
1381 buffer = dmabuf->priv;
1382
1383 mutex_lock(&client->lock);
1384 /* if a handle exists for this buffer just take a reference to it */
1385 handle = ion_handle_lookup(client, buffer);
1386 if (!IS_ERR_OR_NULL(handle)) {
1387 ion_handle_get(handle);
1388 goto end;
1389 }
1390 handle = ion_handle_create(client, buffer);
1391 if (IS_ERR_OR_NULL(handle))
1392 goto end;
1393 ion_handle_add(client, handle);
1394end:
1395 mutex_unlock(&client->lock);
1396 dma_buf_put(dmabuf);
1397 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001398}
Olav Hauganbd453a92012-07-05 14:21:34 -07001399EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001400
1401static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1402{
1403 struct ion_client *client = filp->private_data;
1404
1405 switch (cmd) {
1406 case ION_IOC_ALLOC:
1407 {
1408 struct ion_allocation_data data;
1409
1410 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1411 return -EFAULT;
1412 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh7d72bad2012-08-29 18:39:44 -07001413 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001414
Laura Abbottb14ed962012-01-30 14:18:08 -08001415 if (IS_ERR(data.handle))
1416 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001417
Laura Abbottb14ed962012-01-30 14:18:08 -08001418 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1419 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001420 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001421 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001422 break;
1423 }
1424 case ION_IOC_FREE:
1425 {
1426 struct ion_handle_data data;
1427 bool valid;
1428
1429 if (copy_from_user(&data, (void __user *)arg,
1430 sizeof(struct ion_handle_data)))
1431 return -EFAULT;
1432 mutex_lock(&client->lock);
1433 valid = ion_handle_validate(client, data.handle);
1434 mutex_unlock(&client->lock);
1435 if (!valid)
1436 return -EINVAL;
1437 ion_free(client, data.handle);
1438 break;
1439 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001440 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001441 case ION_IOC_SHARE:
1442 {
1443 struct ion_fd_data data;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001444 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1445 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001446
Laura Abbottb14ed962012-01-30 14:18:08 -08001447 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001448 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1449 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001450 if (data.fd < 0)
1451 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001452 break;
1453 }
1454 case ION_IOC_IMPORT:
1455 {
1456 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001457 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001458 if (copy_from_user(&data, (void __user *)arg,
1459 sizeof(struct ion_fd_data)))
1460 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001461 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan865e97f2012-05-15 14:40:11 -07001462 if (IS_ERR(data.handle)) {
1463 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001464 data.handle = NULL;
Olav Haugan865e97f2012-05-15 14:40:11 -07001465 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001466 if (copy_to_user((void __user *)arg, &data,
1467 sizeof(struct ion_fd_data)))
1468 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001469 if (ret < 0)
1470 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001471 break;
1472 }
1473 case ION_IOC_CUSTOM:
1474 {
1475 struct ion_device *dev = client->dev;
1476 struct ion_custom_data data;
1477
1478 if (!dev->custom_ioctl)
1479 return -ENOTTY;
1480 if (copy_from_user(&data, (void __user *)arg,
1481 sizeof(struct ion_custom_data)))
1482 return -EFAULT;
1483 return dev->custom_ioctl(client, data.cmd, data.arg);
1484 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001485 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001486 return client->dev->custom_ioctl(client,
1487 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001488 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001489 return client->dev->custom_ioctl(client,
1490 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001491 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001492 return client->dev->custom_ioctl(client,
1493 ION_IOC_CLEAN_INV_CACHES, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001494 default:
1495 return -ENOTTY;
1496 }
1497 return 0;
1498}
1499
1500static int ion_release(struct inode *inode, struct file *file)
1501{
1502 struct ion_client *client = file->private_data;
1503
1504 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001505 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001506 return 0;
1507}
1508
1509static int ion_open(struct inode *inode, struct file *file)
1510{
1511 struct miscdevice *miscdev = file->private_data;
1512 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1513 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001514 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001515
1516 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001517 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1518 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001519 if (IS_ERR_OR_NULL(client))
1520 return PTR_ERR(client);
1521 file->private_data = client;
1522
1523 return 0;
1524}
1525
1526static const struct file_operations ion_fops = {
1527 .owner = THIS_MODULE,
1528 .open = ion_open,
1529 .release = ion_release,
1530 .unlocked_ioctl = ion_ioctl,
1531};
1532
1533static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001534 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001535{
1536 size_t size = 0;
1537 struct rb_node *n;
1538
1539 mutex_lock(&client->lock);
1540 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1541 struct ion_handle *handle = rb_entry(n,
1542 struct ion_handle,
1543 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001544 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001545 size += handle->buffer->size;
1546 }
1547 mutex_unlock(&client->lock);
1548 return size;
1549}
1550
Olav Haugan0671b9a2012-05-25 11:58:56 -07001551/**
1552 * Searches through a clients handles to find if the buffer is owned
1553 * by this client. Used for debug output.
1554 * @param client pointer to candidate owner of buffer
1555 * @param buf pointer to buffer that we are trying to find the owner of
1556 * @return 1 if found, 0 otherwise
1557 */
1558static int ion_debug_find_buffer_owner(const struct ion_client *client,
1559 const struct ion_buffer *buf)
1560{
1561 struct rb_node *n;
1562
1563 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1564 const struct ion_handle *handle = rb_entry(n,
1565 const struct ion_handle,
1566 node);
1567 if (handle->buffer == buf)
1568 return 1;
1569 }
1570 return 0;
1571}
1572
1573/**
1574 * Adds mem_map_data pointer to the tree of mem_map
1575 * Used for debug output.
1576 * @param mem_map The mem_map tree
1577 * @param data The new data to add to the tree
1578 */
1579static void ion_debug_mem_map_add(struct rb_root *mem_map,
1580 struct mem_map_data *data)
1581{
1582 struct rb_node **p = &mem_map->rb_node;
1583 struct rb_node *parent = NULL;
1584 struct mem_map_data *entry;
1585
1586 while (*p) {
1587 parent = *p;
1588 entry = rb_entry(parent, struct mem_map_data, node);
1589
1590 if (data->addr < entry->addr) {
1591 p = &(*p)->rb_left;
1592 } else if (data->addr > entry->addr) {
1593 p = &(*p)->rb_right;
1594 } else {
1595 pr_err("%s: mem_map_data already found.", __func__);
1596 BUG();
1597 }
1598 }
1599 rb_link_node(&data->node, parent, p);
1600 rb_insert_color(&data->node, mem_map);
1601}
1602
1603/**
1604 * Search for an owner of a buffer by iterating over all ION clients.
1605 * @param dev ion device containing pointers to all the clients.
1606 * @param buffer pointer to buffer we are trying to find the owner of.
1607 * @return name of owner.
1608 */
1609const char *ion_debug_locate_owner(const struct ion_device *dev,
1610 const struct ion_buffer *buffer)
1611{
1612 struct rb_node *j;
1613 const char *client_name = NULL;
1614
Laura Abbottb14ed962012-01-30 14:18:08 -08001615 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001616 j = rb_next(j)) {
1617 struct ion_client *client = rb_entry(j, struct ion_client,
1618 node);
1619 if (ion_debug_find_buffer_owner(client, buffer))
1620 client_name = client->name;
1621 }
1622 return client_name;
1623}
1624
1625/**
1626 * Create a mem_map of the heap.
1627 * @param s seq_file to log error message to.
1628 * @param heap The heap to create mem_map for.
1629 * @param mem_map The mem map to be created.
1630 */
1631void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1632 struct rb_root *mem_map)
1633{
1634 struct ion_device *dev = heap->dev;
1635 struct rb_node *n;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301636 size_t size;
1637
1638 if (!heap->ops->phys)
1639 return;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001640
1641 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1642 struct ion_buffer *buffer =
1643 rb_entry(n, struct ion_buffer, node);
1644 if (buffer->heap->id == heap->id) {
1645 struct mem_map_data *data =
1646 kzalloc(sizeof(*data), GFP_KERNEL);
1647 if (!data) {
1648 seq_printf(s, "ERROR: out of memory. "
1649 "Part of memory map will not be logged\n");
1650 break;
1651 }
Chintan Pandyadaf75622013-01-29 19:40:01 +05301652
1653 buffer->heap->ops->phys(buffer->heap, buffer,
1654 &(data->addr), &size);
1655 data->size = (unsigned long) size;
1656 data->addr_end = data->addr + data->size - 1;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001657 data->client_name = ion_debug_locate_owner(dev, buffer);
1658 ion_debug_mem_map_add(mem_map, data);
1659 }
1660 }
1661}
1662
1663/**
1664 * Free the memory allocated by ion_debug_mem_map_create
1665 * @param mem_map The mem map to free.
1666 */
1667static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1668{
1669 if (mem_map) {
1670 struct rb_node *n;
1671 while ((n = rb_first(mem_map)) != 0) {
1672 struct mem_map_data *data =
1673 rb_entry(n, struct mem_map_data, node);
1674 rb_erase(&data->node, mem_map);
1675 kfree(data);
1676 }
1677 }
1678}
1679
1680/**
1681 * Print heap debug information.
1682 * @param s seq_file to log message to.
1683 * @param heap pointer to heap that we will print debug information for.
1684 */
1685static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1686{
1687 if (heap->ops->print_debug) {
1688 struct rb_root mem_map = RB_ROOT;
1689 ion_debug_mem_map_create(s, heap, &mem_map);
1690 heap->ops->print_debug(heap, s, &mem_map);
1691 ion_debug_mem_map_destroy(&mem_map);
1692 }
1693}
1694
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001695static int ion_debug_heap_show(struct seq_file *s, void *unused)
1696{
1697 struct ion_heap *heap = s->private;
1698 struct ion_device *dev = heap->dev;
1699 struct rb_node *n;
1700
Olav Haugane4900b52012-05-25 11:58:03 -07001701 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001702 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001703
Laura Abbottb14ed962012-01-30 14:18:08 -08001704 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001705 struct ion_client *client = rb_entry(n, struct ion_client,
1706 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001707 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001708 if (!size)
1709 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001710 if (client->task) {
1711 char task_comm[TASK_COMM_LEN];
1712
1713 get_task_comm(task_comm, client->task);
1714 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1715 client->pid, size);
1716 } else {
1717 seq_printf(s, "%16.s %16u %16u\n", client->name,
1718 client->pid, size);
1719 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001720 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001721 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001722 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001723 return 0;
1724}
1725
1726static int ion_debug_heap_open(struct inode *inode, struct file *file)
1727{
1728 return single_open(file, ion_debug_heap_show, inode->i_private);
1729}
1730
1731static const struct file_operations debug_heap_fops = {
1732 .open = ion_debug_heap_open,
1733 .read = seq_read,
1734 .llseek = seq_lseek,
1735 .release = single_release,
1736};
1737
1738void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1739{
1740 struct rb_node **p = &dev->heaps.rb_node;
1741 struct rb_node *parent = NULL;
1742 struct ion_heap *entry;
1743
Laura Abbottb14ed962012-01-30 14:18:08 -08001744 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1745 !heap->ops->unmap_dma)
1746 pr_err("%s: can not add heap with invalid ops struct.\n",
1747 __func__);
1748
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001749 heap->dev = dev;
1750 mutex_lock(&dev->lock);
1751 while (*p) {
1752 parent = *p;
1753 entry = rb_entry(parent, struct ion_heap, node);
1754
1755 if (heap->id < entry->id) {
1756 p = &(*p)->rb_left;
1757 } else if (heap->id > entry->id ) {
1758 p = &(*p)->rb_right;
1759 } else {
1760 pr_err("%s: can not insert multiple heaps with "
1761 "id %d\n", __func__, heap->id);
1762 goto end;
1763 }
1764 }
1765
1766 rb_link_node(&heap->node, parent, p);
1767 rb_insert_color(&heap->node, &dev->heaps);
1768 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1769 &debug_heap_fops);
1770end:
1771 mutex_unlock(&dev->lock);
1772}
1773
Laura Abbott93619302012-10-11 11:51:40 -07001774int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
1775 int version, void *data, int flags)
1776{
1777 int ret = -EINVAL;
1778 struct ion_heap *heap;
1779 struct ion_buffer *buffer;
1780
1781 mutex_lock(&client->lock);
1782 if (!ion_handle_validate(client, handle)) {
1783 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1784 goto out_unlock;
1785 }
1786
1787 buffer = handle->buffer;
1788 heap = buffer->heap;
1789
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001790 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001791 pr_err("%s: cannot secure buffer from non secure heap\n",
1792 __func__);
1793 goto out_unlock;
1794 }
1795
1796 BUG_ON(!buffer->heap->ops->secure_buffer);
1797 /*
1798 * Protect the handle via the client lock to ensure we aren't
1799 * racing with free
1800 */
1801 ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
1802
1803out_unlock:
1804 mutex_unlock(&client->lock);
1805 return ret;
1806}
1807
1808int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
1809{
1810 int ret = -EINVAL;
1811 struct ion_heap *heap;
1812 struct ion_buffer *buffer;
1813
1814 mutex_lock(&client->lock);
1815 if (!ion_handle_validate(client, handle)) {
1816 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1817 goto out_unlock;
1818 }
1819
1820 buffer = handle->buffer;
1821 heap = buffer->heap;
1822
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001823 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001824 pr_err("%s: cannot secure buffer from non secure heap\n",
1825 __func__);
1826 goto out_unlock;
1827 }
1828
1829 BUG_ON(!buffer->heap->ops->unsecure_buffer);
1830 /*
1831 * Protect the handle via the client lock to ensure we aren't
1832 * racing with free
1833 */
1834 ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
1835
1836out_unlock:
1837 mutex_unlock(&client->lock);
1838 return ret;
1839}
1840
Laura Abbott7e446482012-06-13 15:59:39 -07001841int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1842 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001843{
1844 struct rb_node *n;
1845 int ret_val = 0;
1846
1847 /*
1848 * traverse the list of heaps available in this system
1849 * and find the heap that is specified.
1850 */
1851 mutex_lock(&dev->lock);
1852 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1853 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001854 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001855 continue;
1856 if (ION_HEAP(heap->id) != heap_id)
1857 continue;
1858 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001859 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001860 else
1861 ret_val = -EINVAL;
1862 break;
1863 }
1864 mutex_unlock(&dev->lock);
1865 return ret_val;
1866}
Olav Hauganbd453a92012-07-05 14:21:34 -07001867EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001868
Laura Abbott7e446482012-06-13 15:59:39 -07001869int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1870 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001871{
1872 struct rb_node *n;
1873 int ret_val = 0;
1874
1875 /*
1876 * traverse the list of heaps available in this system
1877 * and find the heap that is specified.
1878 */
1879 mutex_lock(&dev->lock);
1880 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1881 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001882 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001883 continue;
1884 if (ION_HEAP(heap->id) != heap_id)
1885 continue;
1886 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001887 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001888 else
1889 ret_val = -EINVAL;
1890 break;
1891 }
1892 mutex_unlock(&dev->lock);
1893 return ret_val;
1894}
Olav Hauganbd453a92012-07-05 14:21:34 -07001895EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001896
Laura Abbott404f8242011-10-31 14:22:53 -07001897static int ion_debug_leak_show(struct seq_file *s, void *unused)
1898{
1899 struct ion_device *dev = s->private;
1900 struct rb_node *n;
Laura Abbott404f8242011-10-31 14:22:53 -07001901
Laura Abbott404f8242011-10-31 14:22:53 -07001902 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1903 "ref cnt");
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001904
Laura Abbott404f8242011-10-31 14:22:53 -07001905 mutex_lock(&dev->lock);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001906 ion_mark_dangling_buffers_locked(dev);
Laura Abbott404f8242011-10-31 14:22:53 -07001907
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001908 /* Anyone still marked as a 1 means a leaked handle somewhere */
Laura Abbott404f8242011-10-31 14:22:53 -07001909 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1910 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1911 node);
1912
1913 if (buf->marked == 1)
1914 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1915 (int)buf, buf->heap->name, buf->size,
1916 atomic_read(&buf->ref.refcount));
1917 }
1918 mutex_unlock(&dev->lock);
1919 return 0;
1920}
1921
1922static int ion_debug_leak_open(struct inode *inode, struct file *file)
1923{
1924 return single_open(file, ion_debug_leak_show, inode->i_private);
1925}
1926
1927static const struct file_operations debug_leak_fops = {
1928 .open = ion_debug_leak_open,
1929 .read = seq_read,
1930 .llseek = seq_lseek,
1931 .release = single_release,
1932};
1933
1934
1935
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001936struct ion_device *ion_device_create(long (*custom_ioctl)
1937 (struct ion_client *client,
1938 unsigned int cmd,
1939 unsigned long arg))
1940{
1941 struct ion_device *idev;
1942 int ret;
1943
1944 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1945 if (!idev)
1946 return ERR_PTR(-ENOMEM);
1947
1948 idev->dev.minor = MISC_DYNAMIC_MINOR;
1949 idev->dev.name = "ion";
1950 idev->dev.fops = &ion_fops;
1951 idev->dev.parent = NULL;
1952 ret = misc_register(&idev->dev);
1953 if (ret) {
1954 pr_err("ion: failed to register misc device.\n");
1955 return ERR_PTR(ret);
1956 }
1957
1958 idev->debug_root = debugfs_create_dir("ion", NULL);
1959 if (IS_ERR_OR_NULL(idev->debug_root))
1960 pr_err("ion: failed to create debug files.\n");
1961
1962 idev->custom_ioctl = custom_ioctl;
1963 idev->buffers = RB_ROOT;
1964 mutex_init(&idev->lock);
1965 idev->heaps = RB_ROOT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001966 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001967 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1968 &debug_leak_fops);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001969
1970 setup_ion_leak_check(idev->debug_root);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001971 return idev;
1972}
1973
1974void ion_device_destroy(struct ion_device *dev)
1975{
1976 misc_deregister(&dev->dev);
1977 /* XXX need to free the heaps and clients ? */
1978 kfree(dev);
1979}
Laura Abbottb14ed962012-01-30 14:18:08 -08001980
1981void __init ion_reserve(struct ion_platform_data *data)
1982{
1983 int i, ret;
1984
1985 for (i = 0; i < data->nr; i++) {
1986 if (data->heaps[i].size == 0)
1987 continue;
1988 ret = memblock_reserve(data->heaps[i].base,
1989 data->heaps[i].size);
1990 if (ret)
1991 pr_err("memblock reserve of %x@%lx failed\n",
1992 data->heaps[i].size,
1993 data->heaps[i].base);
1994 }
1995}