blob: a01ef3f63c0cf46d83ad0e296004dcdda0ef2cf0 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Liam Markcc2d4bd2013-01-16 10:14:40 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070036#include <linux/msm_ion.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080037#include <trace/events/kmem.h>
38
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070039
Laura Abbott8c017362011-09-22 20:59:12 -070040#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070041#include "ion_priv.h"
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070042
43/**
44 * struct ion_device - the metadata of the ion device node
45 * @dev: the actual misc device
46 * @buffers: an rb tree of all the existing buffers
47 * @lock: lock protecting the buffers & heaps trees
48 * @heaps: list of all the heaps in the system
49 * @user_clients: list of all the clients created from userspace
50 */
51struct ion_device {
52 struct miscdevice dev;
53 struct rb_root buffers;
54 struct mutex lock;
55 struct rb_root heaps;
56 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
57 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080058 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070059 struct dentry *debug_root;
60};
61
62/**
63 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070064 * @node: node in the tree of all clients
65 * @dev: backpointer to ion device
66 * @handles: an rb tree of all the handles in this client
67 * @lock: lock protecting the tree of handles
68 * @heap_mask: mask of all supported heaps
69 * @name: used for debugging
70 * @task: used for debugging
71 *
72 * A client represents a list of buffers this client may access.
73 * The mutex stored here is used to protect both handles tree
74 * as well as the handles themselves, and should be held while modifying either.
75 */
76struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070077 struct rb_node node;
78 struct ion_device *dev;
79 struct rb_root handles;
80 struct mutex lock;
81 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080082 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070083 struct task_struct *task;
84 pid_t pid;
85 struct dentry *debug_root;
86};
87
88/**
89 * ion_handle - a client local reference to a buffer
90 * @ref: reference count
91 * @client: back pointer to the client the buffer resides in
92 * @buffer: pointer to the buffer
93 * @node: node in the client's handle rbtree
94 * @kmap_cnt: count of times this client has mapped to kernel
95 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070096 *
97 * Modifications to node, map_cnt or mapping should be protected by the
98 * lock in the client. Other fields are never changed after initialization.
99 */
100struct ion_handle {
101 struct kref ref;
102 struct ion_client *client;
103 struct ion_buffer *buffer;
104 struct rb_node node;
105 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700106 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700107};
108
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700109bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
110{
111 return ((buffer->flags & ION_FLAG_CACHED) &&
112 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
113}
114
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700115/* this function should only be called while dev->lock is held */
116static void ion_buffer_add(struct ion_device *dev,
117 struct ion_buffer *buffer)
118{
119 struct rb_node **p = &dev->buffers.rb_node;
120 struct rb_node *parent = NULL;
121 struct ion_buffer *entry;
122
123 while (*p) {
124 parent = *p;
125 entry = rb_entry(parent, struct ion_buffer, node);
126
127 if (buffer < entry) {
128 p = &(*p)->rb_left;
129 } else if (buffer > entry) {
130 p = &(*p)->rb_right;
131 } else {
132 pr_err("%s: buffer already found.", __func__);
133 BUG();
134 }
135 }
136
137 rb_link_node(&buffer->node, parent, p);
138 rb_insert_color(&buffer->node, &dev->buffers);
139}
140
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700141static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
142
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700143/* this function should only be called while dev->lock is held */
144static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
145 struct ion_device *dev,
146 unsigned long len,
147 unsigned long align,
148 unsigned long flags)
149{
150 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800151 struct sg_table *table;
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700152 struct scatterlist *sg;
153 int i, ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700154
155 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
156 if (!buffer)
157 return ERR_PTR(-ENOMEM);
158
159 buffer->heap = heap;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700160 buffer->flags = flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700161 kref_init(&buffer->ref);
162
163 ret = heap->ops->allocate(heap, buffer, len, align, flags);
164 if (ret) {
165 kfree(buffer);
166 return ERR_PTR(ret);
167 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800168
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700169 buffer->dev = dev;
170 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800171
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700172 table = heap->ops->map_dma(heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800173 if (IS_ERR_OR_NULL(table)) {
174 heap->ops->free(buffer);
175 kfree(buffer);
176 return ERR_PTR(PTR_ERR(table));
177 }
178 buffer->sg_table = table;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700179 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700180 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
181 i) {
182 if (sg_dma_len(sg) == PAGE_SIZE)
183 continue;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700184 pr_err("%s: cached mappings that will be faulted in "
185 "must have pagewise sg_lists\n", __func__);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700186 ret = -EINVAL;
187 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700188 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800189
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700190 ret = ion_buffer_alloc_dirty(buffer);
191 if (ret)
192 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700193 }
194
195 buffer->dev = dev;
196 buffer->size = len;
197 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700198 mutex_init(&buffer->lock);
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700199 /* this will set up dma addresses for the sglist -- it is not
200 technically correct as per the dma api -- a specific
201 device isn't really taking ownership here. However, in practice on
202 our systems the only dma_address space is physical addresses.
203 Additionally, we can't afford the overhead of invalidating every
204 allocation via dma_map_sg. The implicit contract here is that
205 memory comming from the heaps is ready for dma, ie if it has a
206 cached mapping that mapping has been invalidated */
Laura Abbott6c13b9822013-03-29 18:29:03 -0700207 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
208 if (sg_dma_address(sg) == 0)
209 sg_dma_address(sg) = sg_phys(sg);
210 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700211 ion_buffer_add(dev, buffer);
212 return buffer;
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700213
214err:
215 heap->ops->unmap_dma(heap, buffer);
216 heap->ops->free(buffer);
217 kfree(buffer);
218 return ERR_PTR(ret);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700219}
220
Laura Abbott93619302012-10-11 11:51:40 -0700221static void ion_delayed_unsecure(struct ion_buffer *buffer)
222{
223 if (buffer->heap->ops->unsecure_buffer)
224 buffer->heap->ops->unsecure_buffer(buffer, 1);
225}
226
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700227static void ion_buffer_destroy(struct kref *kref)
228{
229 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
230 struct ion_device *dev = buffer->dev;
231
Laura Abbottb14ed962012-01-30 14:18:08 -0800232 if (WARN_ON(buffer->kmap_cnt > 0))
233 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800234 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
235
Laura Abbott93619302012-10-11 11:51:40 -0700236 ion_delayed_unsecure(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700237 buffer->heap->ops->free(buffer);
238 mutex_lock(&dev->lock);
239 rb_erase(&buffer->node, &dev->buffers);
240 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700241 if (buffer->flags & ION_FLAG_CACHED)
242 kfree(buffer->dirty);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700243 kfree(buffer);
244}
245
246static void ion_buffer_get(struct ion_buffer *buffer)
247{
248 kref_get(&buffer->ref);
249}
250
251static int ion_buffer_put(struct ion_buffer *buffer)
252{
253 return kref_put(&buffer->ref, ion_buffer_destroy);
254}
255
256static struct ion_handle *ion_handle_create(struct ion_client *client,
257 struct ion_buffer *buffer)
258{
259 struct ion_handle *handle;
260
261 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
262 if (!handle)
263 return ERR_PTR(-ENOMEM);
264 kref_init(&handle->ref);
265 rb_init_node(&handle->node);
266 handle->client = client;
267 ion_buffer_get(buffer);
268 handle->buffer = buffer;
269
270 return handle;
271}
272
Laura Abbottb14ed962012-01-30 14:18:08 -0800273static void ion_handle_kmap_put(struct ion_handle *);
274
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700275static void ion_handle_destroy(struct kref *kref)
276{
277 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800278 struct ion_client *client = handle->client;
279 struct ion_buffer *buffer = handle->buffer;
280
Laura Abbottb14ed962012-01-30 14:18:08 -0800281 mutex_lock(&buffer->lock);
282 while (handle->kmap_cnt)
283 ion_handle_kmap_put(handle);
284 mutex_unlock(&buffer->lock);
285
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700286 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800287 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800288
289 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700290 kfree(handle);
291}
292
293struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
294{
295 return handle->buffer;
296}
297
298static void ion_handle_get(struct ion_handle *handle)
299{
300 kref_get(&handle->ref);
301}
302
303static int ion_handle_put(struct ion_handle *handle)
304{
305 return kref_put(&handle->ref, ion_handle_destroy);
306}
307
308static struct ion_handle *ion_handle_lookup(struct ion_client *client,
309 struct ion_buffer *buffer)
310{
311 struct rb_node *n;
312
313 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
314 struct ion_handle *handle = rb_entry(n, struct ion_handle,
315 node);
316 if (handle->buffer == buffer)
317 return handle;
318 }
319 return NULL;
320}
321
322static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
323{
324 struct rb_node *n = client->handles.rb_node;
325
326 while (n) {
327 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
328 node);
329 if (handle < handle_node)
330 n = n->rb_left;
331 else if (handle > handle_node)
332 n = n->rb_right;
333 else
334 return true;
335 }
336 return false;
337}
338
339static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
340{
341 struct rb_node **p = &client->handles.rb_node;
342 struct rb_node *parent = NULL;
343 struct ion_handle *entry;
344
345 while (*p) {
346 parent = *p;
347 entry = rb_entry(parent, struct ion_handle, node);
348
349 if (handle < entry)
350 p = &(*p)->rb_left;
351 else if (handle > entry)
352 p = &(*p)->rb_right;
353 else
354 WARN(1, "%s: buffer already found.", __func__);
355 }
356
357 rb_link_node(&handle->node, parent, p);
358 rb_insert_color(&handle->node, &client->handles);
359}
360
361struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700362 size_t align, unsigned int heap_mask,
363 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700364{
365 struct rb_node *n;
366 struct ion_handle *handle;
367 struct ion_device *dev = client->dev;
368 struct ion_buffer *buffer = NULL;
Adrian Alexei92538592013-03-27 10:53:43 -0700369 unsigned long secure_allocation = flags & ION_FLAG_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800370 const unsigned int MAX_DBG_STR_LEN = 64;
371 char dbg_str[MAX_DBG_STR_LEN];
372 unsigned int dbg_str_idx = 0;
373
374 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700375
Mitchel Humpherys485650f2013-03-15 16:06:07 -0700376 /*
377 * For now, we don't want to fault in pages individually since
378 * clients are already doing manual cache maintenance. In
379 * other words, the implicit caching infrastructure is in
380 * place (in code) but should not be used.
381 */
382 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
383
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700384 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
385 align, heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700386 /*
387 * traverse the list of heaps available in this system in priority
388 * order. If the heap type is supported by the client, and matches the
389 * request of the caller allocate from it. Repeat until allocate has
390 * succeeded or all heaps have been tried
391 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800392 if (WARN_ON(!len))
393 return ERR_PTR(-EINVAL);
394
395 len = PAGE_ALIGN(len);
396
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700397 mutex_lock(&dev->lock);
398 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
399 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
400 /* if the client doesn't support this heap type */
401 if (!((1 << heap->type) & client->heap_mask))
402 continue;
403 /* if the caller didn't specify this heap type */
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700404 if (!((1 << heap->id) & heap_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700405 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800406 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700407 if (secure_allocation &&
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800408 !ion_heap_allow_secure_allocation(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -0800409 continue;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800410 trace_ion_alloc_buffer_start(client->name, heap->name, len,
411 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700412 buffer = ion_buffer_create(heap, dev, len, align, flags);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800413 trace_ion_alloc_buffer_end(client->name, heap->name, len,
414 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700415 if (!IS_ERR_OR_NULL(buffer))
416 break;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800417
418 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
419 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800420 if (dbg_str_idx < MAX_DBG_STR_LEN) {
421 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
422 int ret_value = snprintf(&dbg_str[dbg_str_idx],
423 len_left, "%s ", heap->name);
424 if (ret_value >= len_left) {
425 /* overflow */
426 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
427 dbg_str_idx = MAX_DBG_STR_LEN;
428 } else if (ret_value >= 0) {
429 dbg_str_idx += ret_value;
430 } else {
431 /* error */
432 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
433 }
434 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700435 }
436 mutex_unlock(&dev->lock);
437
Liam Markcc2d4bd2013-01-16 10:14:40 -0800438 if (buffer == NULL) {
439 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
440 heap_mask, flags, -ENODEV);
Laura Abbottb14ed962012-01-30 14:18:08 -0800441 return ERR_PTR(-ENODEV);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800442 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800443
444 if (IS_ERR(buffer)) {
Liam Markcc2d4bd2013-01-16 10:14:40 -0800445 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
446 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800447 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
448 "0x%x) from heap(s) %sfor client %s with heap "
449 "mask 0x%x\n",
450 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700451 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800452 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700453
454 handle = ion_handle_create(client, buffer);
455
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700456 /*
457 * ion_buffer_create will create a buffer with a ref_cnt of 1,
458 * and ion_handle_create will take a second reference, drop one here
459 */
460 ion_buffer_put(buffer);
461
Laura Abbottb14ed962012-01-30 14:18:08 -0800462 if (!IS_ERR(handle)) {
463 mutex_lock(&client->lock);
464 ion_handle_add(client, handle);
465 mutex_unlock(&client->lock);
466 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700467
Laura Abbottb14ed962012-01-30 14:18:08 -0800468
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700469 return handle;
470}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800471EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700472
473void ion_free(struct ion_client *client, struct ion_handle *handle)
474{
475 bool valid_handle;
476
477 BUG_ON(client != handle->client);
478
479 mutex_lock(&client->lock);
480 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700481 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800482 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700483 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700484 return;
485 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800486 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700487 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700488}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800489EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700490
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700491int ion_phys(struct ion_client *client, struct ion_handle *handle,
492 ion_phys_addr_t *addr, size_t *len)
493{
494 struct ion_buffer *buffer;
495 int ret;
496
497 mutex_lock(&client->lock);
498 if (!ion_handle_validate(client, handle)) {
499 mutex_unlock(&client->lock);
500 return -EINVAL;
501 }
502
503 buffer = handle->buffer;
504
505 if (!buffer->heap->ops->phys) {
506 pr_err("%s: ion_phys is not implemented by this heap.\n",
507 __func__);
508 mutex_unlock(&client->lock);
509 return -ENODEV;
510 }
511 mutex_unlock(&client->lock);
512 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
513 return ret;
514}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800515EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700516
Laura Abbottb14ed962012-01-30 14:18:08 -0800517static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700518{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700519 void *vaddr;
520
Laura Abbottb14ed962012-01-30 14:18:08 -0800521 if (buffer->kmap_cnt) {
522 buffer->kmap_cnt++;
523 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700524 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800525 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
526 if (IS_ERR_OR_NULL(vaddr))
527 return vaddr;
528 buffer->vaddr = vaddr;
529 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700530 return vaddr;
531}
Laura Abbottb14ed962012-01-30 14:18:08 -0800532
533static void *ion_handle_kmap_get(struct ion_handle *handle)
534{
535 struct ion_buffer *buffer = handle->buffer;
536 void *vaddr;
537
538 if (handle->kmap_cnt) {
539 handle->kmap_cnt++;
540 return buffer->vaddr;
541 }
542 vaddr = ion_buffer_kmap_get(buffer);
543 if (IS_ERR_OR_NULL(vaddr))
544 return vaddr;
545 handle->kmap_cnt++;
546 return vaddr;
547}
548
549static void ion_buffer_kmap_put(struct ion_buffer *buffer)
550{
551 buffer->kmap_cnt--;
552 if (!buffer->kmap_cnt) {
553 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
554 buffer->vaddr = NULL;
555 }
556}
557
558static void ion_handle_kmap_put(struct ion_handle *handle)
559{
560 struct ion_buffer *buffer = handle->buffer;
561
562 handle->kmap_cnt--;
563 if (!handle->kmap_cnt)
564 ion_buffer_kmap_put(buffer);
565}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700566
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700567void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700568{
569 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800570 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700571
572 mutex_lock(&client->lock);
573 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800574 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700575 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700576 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700577 return ERR_PTR(-EINVAL);
578 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700579
Laura Abbottb14ed962012-01-30 14:18:08 -0800580 buffer = handle->buffer;
581
582 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700583 pr_err("%s: map_kernel is not implemented by this heap.\n",
584 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700585 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700586 return ERR_PTR(-ENODEV);
587 }
Laura Abbott894fd582011-08-19 13:33:56 -0700588
Laura Abbottb14ed962012-01-30 14:18:08 -0800589 mutex_lock(&buffer->lock);
590 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700591 mutex_unlock(&buffer->lock);
592 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800593 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700594}
Olav Hauganbd453a92012-07-05 14:21:34 -0700595EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700596
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700597void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
598{
599 struct ion_buffer *buffer;
600
601 mutex_lock(&client->lock);
602 buffer = handle->buffer;
603 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800604 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700605 mutex_unlock(&buffer->lock);
606 mutex_unlock(&client->lock);
607}
Olav Hauganbd453a92012-07-05 14:21:34 -0700608EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700609
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700610static int ion_debug_client_show(struct seq_file *s, void *unused)
611{
612 struct ion_client *client = s->private;
613 struct rb_node *n;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700614
Olav Haugan854c9e12012-05-16 16:34:28 -0700615 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
616 "heap_name", "size_in_bytes", "handle refcount",
617 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700618
619 mutex_lock(&client->lock);
620 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
621 struct ion_handle *handle = rb_entry(n, struct ion_handle,
622 node);
623 enum ion_heap_type type = handle->buffer->heap->type;
624
Olav Haugan854c9e12012-05-16 16:34:28 -0700625 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700626 handle->buffer->heap->name,
627 handle->buffer->size,
628 atomic_read(&handle->ref.refcount),
629 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700630
631 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
632 type == ION_HEAP_TYPE_CARVEOUT ||
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700633 type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
Laura Abbott1135c9e2013-03-13 15:33:40 -0700634 seq_printf(s, " : %12pa", &handle->buffer->priv_phys);
Olav Haugan854c9e12012-05-16 16:34:28 -0700635 else
636 seq_printf(s, " : %12s", "N/A");
637
Olav Haugan854c9e12012-05-16 16:34:28 -0700638 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700639 }
640 mutex_unlock(&client->lock);
641
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700642 return 0;
643}
644
645static int ion_debug_client_open(struct inode *inode, struct file *file)
646{
647 return single_open(file, ion_debug_client_show, inode->i_private);
648}
649
650static const struct file_operations debug_client_fops = {
651 .open = ion_debug_client_open,
652 .read = seq_read,
653 .llseek = seq_lseek,
654 .release = single_release,
655};
656
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700657struct ion_client *ion_client_create(struct ion_device *dev,
658 unsigned int heap_mask,
659 const char *name)
660{
661 struct ion_client *client;
662 struct task_struct *task;
663 struct rb_node **p;
664 struct rb_node *parent = NULL;
665 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700666 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700667 unsigned int name_len;
668
669 if (!name) {
670 pr_err("%s: Name cannot be null\n", __func__);
671 return ERR_PTR(-EINVAL);
672 }
673 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700674
675 get_task_struct(current->group_leader);
676 task_lock(current->group_leader);
677 pid = task_pid_nr(current->group_leader);
678 /* don't bother to store task struct for kernel threads,
679 they can't be killed anyway */
680 if (current->group_leader->flags & PF_KTHREAD) {
681 put_task_struct(current->group_leader);
682 task = NULL;
683 } else {
684 task = current->group_leader;
685 }
686 task_unlock(current->group_leader);
687
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700688 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
689 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800690 if (task)
691 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700692 return ERR_PTR(-ENOMEM);
693 }
694
695 client->dev = dev;
696 client->handles = RB_ROOT;
697 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800698
Olav Haugan6625c7d12012-01-24 13:50:43 -0800699 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800700 if (!client->name) {
701 put_task_struct(current->group_leader);
702 kfree(client);
703 return ERR_PTR(-ENOMEM);
704 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -0800705 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800706 }
707
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700708 client->heap_mask = heap_mask;
709 client->task = task;
710 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700711
712 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800713 p = &dev->clients.rb_node;
714 while (*p) {
715 parent = *p;
716 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700717
Laura Abbottb14ed962012-01-30 14:18:08 -0800718 if (client < entry)
719 p = &(*p)->rb_left;
720 else if (client > entry)
721 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700722 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800723 rb_link_node(&client->node, parent, p);
724 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700725
Laura Abbotteed86032011-12-05 15:32:36 -0800726
727 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700728 dev->debug_root, client,
729 &debug_client_fops);
730 mutex_unlock(&dev->lock);
731
732 return client;
733}
734
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800735/**
736 * ion_mark_dangling_buffers_locked() - Mark dangling buffers
737 * @dev: the ion device whose buffers will be searched
738 *
739 * Sets marked=1 for all known buffers associated with `dev' that no
740 * longer have a handle pointing to them. dev->lock should be held
741 * across a call to this function (and should only be unlocked after
742 * checking for marked buffers).
743 */
744static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
745{
746 struct rb_node *n, *n2;
747 /* mark all buffers as 1 */
748 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
749 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
750 node);
751
752 buf->marked = 1;
753 }
754
755 /* now see which buffers we can access */
756 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
757 struct ion_client *client = rb_entry(n, struct ion_client,
758 node);
759
760 mutex_lock(&client->lock);
761 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
762 struct ion_handle *handle
763 = rb_entry(n2, struct ion_handle, node);
764
765 handle->buffer->marked = 0;
766
767 }
768 mutex_unlock(&client->lock);
769
770 }
771}
772
773#ifdef CONFIG_ION_LEAK_CHECK
774static u32 ion_debug_check_leaks_on_destroy;
775
776static int ion_check_for_and_print_leaks(struct ion_device *dev)
777{
778 struct rb_node *n;
779 int num_leaks = 0;
780
781 if (!ion_debug_check_leaks_on_destroy)
782 return 0;
783
784 /* check for leaked buffers (those that no longer have a
785 * handle pointing to them) */
786 ion_mark_dangling_buffers_locked(dev);
787
788 /* Anyone still marked as a 1 means a leaked handle somewhere */
789 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
790 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
791 node);
792
793 if (buf->marked == 1) {
794 pr_info("Leaked ion buffer at %p\n", buf);
795 num_leaks++;
796 }
797 }
798 return num_leaks;
799}
800static void setup_ion_leak_check(struct dentry *debug_root)
801{
802 debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
803 &ion_debug_check_leaks_on_destroy);
804}
805#else
806static int ion_check_for_and_print_leaks(struct ion_device *dev)
807{
808 return 0;
809}
810static void setup_ion_leak_check(struct dentry *debug_root)
811{
812}
813#endif
814
Laura Abbottb14ed962012-01-30 14:18:08 -0800815void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700816{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700817 struct ion_device *dev = client->dev;
818 struct rb_node *n;
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800819 int num_leaks;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700820
821 pr_debug("%s: %d\n", __func__, __LINE__);
822 while ((n = rb_first(&client->handles))) {
823 struct ion_handle *handle = rb_entry(n, struct ion_handle,
824 node);
825 ion_handle_destroy(&handle->ref);
826 }
827 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800828 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700829 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -0800830 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700831 debugfs_remove_recursive(client->debug_root);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800832
833 num_leaks = ion_check_for_and_print_leaks(dev);
834
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700835 mutex_unlock(&dev->lock);
836
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800837 if (num_leaks) {
838 struct task_struct *current_task = current;
839 char current_task_name[TASK_COMM_LEN];
840 get_task_comm(current_task_name, current_task);
841 WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
842 __func__, num_leaks, num_leaks == 1 ? "" : "s");
843 pr_info("task name at time of leak: %s, pid: %d\n",
844 current_task_name, current_task->pid);
845 }
846
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800847 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700848 kfree(client);
849}
Olav Hauganbd453a92012-07-05 14:21:34 -0700850EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700851
Laura Abbott273dd8e2011-10-12 14:26:33 -0700852int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
853 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700854{
855 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700856
857 mutex_lock(&client->lock);
858 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -0700859 pr_err("%s: invalid handle passed to %s.\n",
860 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700861 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800862 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700863 }
Laura Abbott273dd8e2011-10-12 14:26:33 -0700864 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700865 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700866 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700867 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700868 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800869
Laura Abbott273dd8e2011-10-12 14:26:33 -0700870 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700871}
Laura Abbott273dd8e2011-10-12 14:26:33 -0700872EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700873
Laura Abbott8c017362011-09-22 20:59:12 -0700874int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
875 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800876{
Laura Abbott8c017362011-09-22 20:59:12 -0700877 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800878
Laura Abbott8c017362011-09-22 20:59:12 -0700879 mutex_lock(&client->lock);
880 if (!ion_handle_validate(client, handle)) {
881 pr_err("%s: invalid handle passed to %s.\n",
882 __func__, __func__);
883 mutex_unlock(&client->lock);
884 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700885 }
Laura Abbott8c017362011-09-22 20:59:12 -0700886 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700887 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700888 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700889 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700890 mutex_unlock(&client->lock);
891
892 return 0;
893}
894EXPORT_SYMBOL(ion_handle_get_size);
895
Laura Abbottb14ed962012-01-30 14:18:08 -0800896struct sg_table *ion_sg_table(struct ion_client *client,
897 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700898{
Laura Abbottb14ed962012-01-30 14:18:08 -0800899 struct ion_buffer *buffer;
900 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700901
Laura Abbottb14ed962012-01-30 14:18:08 -0800902 mutex_lock(&client->lock);
903 if (!ion_handle_validate(client, handle)) {
904 pr_err("%s: invalid handle passed to map_dma.\n",
905 __func__);
906 mutex_unlock(&client->lock);
907 return ERR_PTR(-EINVAL);
908 }
909 buffer = handle->buffer;
910 table = buffer->sg_table;
911 mutex_unlock(&client->lock);
912 return table;
913}
Olav Hauganbd453a92012-07-05 14:21:34 -0700914EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -0800915
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800916struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
917 size_t chunk_size, size_t total_size)
918{
919 struct sg_table *table;
920 int i, n_chunks, ret;
921 struct scatterlist *sg;
922
923 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
924 if (!table)
925 return ERR_PTR(-ENOMEM);
926
927 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
928 pr_debug("creating sg_table with %d chunks\n", n_chunks);
929
930 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
931 if (ret)
932 goto err0;
933
934 for_each_sg(table->sgl, sg, table->nents, i) {
935 dma_addr_t addr = buffer_base + i * chunk_size;
936 sg_dma_address(sg) = addr;
Olav Hauganbbdc30a2013-03-30 06:48:35 -0700937 sg_dma_len(sg) = chunk_size;
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800938 }
939
940 return table;
941err0:
942 kfree(table);
943 return ERR_PTR(ret);
944}
945
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700946static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
947 struct device *dev,
948 enum dma_data_direction direction);
949
Laura Abbottb14ed962012-01-30 14:18:08 -0800950static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
951 enum dma_data_direction direction)
952{
953 struct dma_buf *dmabuf = attachment->dmabuf;
954 struct ion_buffer *buffer = dmabuf->priv;
955
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700956 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbottb14ed962012-01-30 14:18:08 -0800957 return buffer->sg_table;
958}
959
960static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
961 struct sg_table *table,
962 enum dma_data_direction direction)
963{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800964}
965
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700966static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
967{
968 unsigned long pages = buffer->sg_table->nents;
969 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
970
971 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
972 if (!buffer->dirty)
973 return -ENOMEM;
974 return 0;
975}
976
977struct ion_vma_list {
978 struct list_head list;
979 struct vm_area_struct *vma;
980};
981
982static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
983 struct device *dev,
984 enum dma_data_direction dir)
985{
986 struct scatterlist *sg;
987 int i;
988 struct ion_vma_list *vma_list;
989
990 pr_debug("%s: syncing for device %s\n", __func__,
991 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700992
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700993 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700994 return;
995
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700996 mutex_lock(&buffer->lock);
997 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
998 if (!test_bit(i, buffer->dirty))
999 continue;
1000 dma_sync_sg_for_device(dev, sg, 1, dir);
1001 clear_bit(i, buffer->dirty);
1002 }
1003 list_for_each_entry(vma_list, &buffer->vmas, list) {
1004 struct vm_area_struct *vma = vma_list->vma;
1005
1006 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1007 NULL);
1008 }
1009 mutex_unlock(&buffer->lock);
1010}
1011
1012int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001013{
Laura Abbottb14ed962012-01-30 14:18:08 -08001014 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001015 struct scatterlist *sg;
1016 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001017
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001018 mutex_lock(&buffer->lock);
1019 set_bit(vmf->pgoff, buffer->dirty);
1020
1021 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1022 if (i != vmf->pgoff)
1023 continue;
1024 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
1025 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
1026 sg_page(sg));
1027 break;
1028 }
1029 mutex_unlock(&buffer->lock);
1030 return VM_FAULT_NOPAGE;
1031}
1032
1033static void ion_vm_open(struct vm_area_struct *vma)
1034{
1035 struct ion_buffer *buffer = vma->vm_private_data;
1036 struct ion_vma_list *vma_list;
1037
1038 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1039 if (!vma_list)
1040 return;
1041 vma_list->vma = vma;
1042 mutex_lock(&buffer->lock);
1043 list_add(&vma_list->list, &buffer->vmas);
1044 mutex_unlock(&buffer->lock);
1045 pr_debug("%s: adding %p\n", __func__, vma);
1046}
1047
1048static void ion_vm_close(struct vm_area_struct *vma)
1049{
1050 struct ion_buffer *buffer = vma->vm_private_data;
1051 struct ion_vma_list *vma_list, *tmp;
1052
1053 pr_debug("%s\n", __func__);
1054 mutex_lock(&buffer->lock);
1055 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1056 if (vma_list->vma != vma)
1057 continue;
1058 list_del(&vma_list->list);
1059 kfree(vma_list);
1060 pr_debug("%s: deleting %p\n", __func__, vma);
1061 break;
1062 }
1063 mutex_unlock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001064
Laura Abbotta6835092011-11-14 15:27:02 -08001065 if (buffer->heap->ops->unmap_user)
1066 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001067}
1068
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001069struct vm_operations_struct ion_vma_ops = {
1070 .open = ion_vm_open,
1071 .close = ion_vm_close,
1072 .fault = ion_vm_fault,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001073};
1074
Laura Abbottb14ed962012-01-30 14:18:08 -08001075static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001076{
Laura Abbottb14ed962012-01-30 14:18:08 -08001077 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001078 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001079
Laura Abbottb14ed962012-01-30 14:18:08 -08001080 if (!buffer->heap->ops->map_user) {
1081 pr_err("%s: this heap does not define a method for mapping "
1082 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001083 return -EINVAL;
1084 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001085
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001086 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001087 vma->vm_private_data = buffer;
1088 vma->vm_ops = &ion_vma_ops;
Mitchel Humpherys7ce0fe42013-01-10 11:30:26 -08001089 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001090 ion_vm_open(vma);
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001091 return 0;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001092 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001093
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001094 if (!(buffer->flags & ION_FLAG_CACHED))
1095 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1096
1097 mutex_lock(&buffer->lock);
1098 /* now map it to userspace */
1099 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1100 mutex_unlock(&buffer->lock);
1101
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001102 if (ret)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001103 pr_err("%s: failure mapping buffer to userspace\n",
1104 __func__);
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001105
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001106 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001107}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001108
Laura Abbottb14ed962012-01-30 14:18:08 -08001109static void ion_dma_buf_release(struct dma_buf *dmabuf)
1110{
1111 struct ion_buffer *buffer = dmabuf->priv;
1112 ion_buffer_put(buffer);
1113}
1114
1115static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1116{
1117 struct ion_buffer *buffer = dmabuf->priv;
1118 return buffer->vaddr + offset;
1119}
1120
1121static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1122 void *ptr)
1123{
1124 return;
1125}
1126
1127static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1128 size_t len,
1129 enum dma_data_direction direction)
1130{
1131 struct ion_buffer *buffer = dmabuf->priv;
1132 void *vaddr;
1133
1134 if (!buffer->heap->ops->map_kernel) {
1135 pr_err("%s: map kernel is not implemented by this heap.\n",
1136 __func__);
1137 return -ENODEV;
1138 }
1139
1140 mutex_lock(&buffer->lock);
1141 vaddr = ion_buffer_kmap_get(buffer);
1142 mutex_unlock(&buffer->lock);
1143 if (IS_ERR(vaddr))
1144 return PTR_ERR(vaddr);
1145 if (!vaddr)
1146 return -ENOMEM;
1147 return 0;
1148}
1149
1150static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1151 size_t len,
1152 enum dma_data_direction direction)
1153{
1154 struct ion_buffer *buffer = dmabuf->priv;
1155
1156 mutex_lock(&buffer->lock);
1157 ion_buffer_kmap_put(buffer);
1158 mutex_unlock(&buffer->lock);
1159}
1160
1161struct dma_buf_ops dma_buf_ops = {
1162 .map_dma_buf = ion_map_dma_buf,
1163 .unmap_dma_buf = ion_unmap_dma_buf,
1164 .mmap = ion_mmap,
1165 .release = ion_dma_buf_release,
1166 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1167 .end_cpu_access = ion_dma_buf_end_cpu_access,
1168 .kmap_atomic = ion_dma_buf_kmap,
1169 .kunmap_atomic = ion_dma_buf_kunmap,
1170 .kmap = ion_dma_buf_kmap,
1171 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001172};
1173
Laura Abbottb14ed962012-01-30 14:18:08 -08001174int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1175{
1176 struct ion_buffer *buffer;
1177 struct dma_buf *dmabuf;
1178 bool valid_handle;
1179 int fd;
1180
1181 mutex_lock(&client->lock);
1182 valid_handle = ion_handle_validate(client, handle);
1183 mutex_unlock(&client->lock);
1184 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001185 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001186 return -EINVAL;
1187 }
1188
1189 buffer = handle->buffer;
1190 ion_buffer_get(buffer);
1191 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1192 if (IS_ERR(dmabuf)) {
1193 ion_buffer_put(buffer);
1194 return PTR_ERR(dmabuf);
1195 }
1196 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001197 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001198 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001199
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001200 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001201}
Olav Hauganbd453a92012-07-05 14:21:34 -07001202EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001203
Laura Abbottb14ed962012-01-30 14:18:08 -08001204struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1205{
1206 struct dma_buf *dmabuf;
1207 struct ion_buffer *buffer;
1208 struct ion_handle *handle;
1209
1210 dmabuf = dma_buf_get(fd);
1211 if (IS_ERR_OR_NULL(dmabuf))
1212 return ERR_PTR(PTR_ERR(dmabuf));
1213 /* if this memory came from ion */
1214
1215 if (dmabuf->ops != &dma_buf_ops) {
1216 pr_err("%s: can not import dmabuf from another exporter\n",
1217 __func__);
1218 dma_buf_put(dmabuf);
1219 return ERR_PTR(-EINVAL);
1220 }
1221 buffer = dmabuf->priv;
1222
1223 mutex_lock(&client->lock);
1224 /* if a handle exists for this buffer just take a reference to it */
1225 handle = ion_handle_lookup(client, buffer);
1226 if (!IS_ERR_OR_NULL(handle)) {
1227 ion_handle_get(handle);
1228 goto end;
1229 }
1230 handle = ion_handle_create(client, buffer);
1231 if (IS_ERR_OR_NULL(handle))
1232 goto end;
1233 ion_handle_add(client, handle);
1234end:
1235 mutex_unlock(&client->lock);
1236 dma_buf_put(dmabuf);
1237 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001238}
Olav Hauganbd453a92012-07-05 14:21:34 -07001239EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001240
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001241static int ion_sync_for_device(struct ion_client *client, int fd)
1242{
1243 struct dma_buf *dmabuf;
1244 struct ion_buffer *buffer;
1245
1246 dmabuf = dma_buf_get(fd);
1247 if (IS_ERR_OR_NULL(dmabuf))
1248 return PTR_ERR(dmabuf);
1249
1250 /* if this memory came from ion */
1251 if (dmabuf->ops != &dma_buf_ops) {
1252 pr_err("%s: can not sync dmabuf from another exporter\n",
1253 __func__);
1254 dma_buf_put(dmabuf);
1255 return -EINVAL;
1256 }
1257 buffer = dmabuf->priv;
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001258
1259 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1260 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001261 dma_buf_put(dmabuf);
1262 return 0;
1263}
1264
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001265static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1266{
1267 struct ion_client *client = filp->private_data;
1268
1269 switch (cmd) {
1270 case ION_IOC_ALLOC:
1271 {
1272 struct ion_allocation_data data;
1273
1274 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1275 return -EFAULT;
1276 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh7d72bad2012-08-29 18:39:44 -07001277 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001278
Laura Abbottb14ed962012-01-30 14:18:08 -08001279 if (IS_ERR(data.handle))
1280 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001281
Laura Abbottb14ed962012-01-30 14:18:08 -08001282 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1283 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001284 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001285 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001286 break;
1287 }
1288 case ION_IOC_FREE:
1289 {
1290 struct ion_handle_data data;
1291 bool valid;
1292
1293 if (copy_from_user(&data, (void __user *)arg,
1294 sizeof(struct ion_handle_data)))
1295 return -EFAULT;
1296 mutex_lock(&client->lock);
1297 valid = ion_handle_validate(client, data.handle);
1298 mutex_unlock(&client->lock);
1299 if (!valid)
1300 return -EINVAL;
1301 ion_free(client, data.handle);
1302 break;
1303 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001304 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001305 case ION_IOC_SHARE:
1306 {
1307 struct ion_fd_data data;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001308 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1309 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001310
Laura Abbottb14ed962012-01-30 14:18:08 -08001311 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001312 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1313 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001314 if (data.fd < 0)
1315 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001316 break;
1317 }
1318 case ION_IOC_IMPORT:
1319 {
1320 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001321 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001322 if (copy_from_user(&data, (void __user *)arg,
1323 sizeof(struct ion_fd_data)))
1324 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001325 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan865e97f2012-05-15 14:40:11 -07001326 if (IS_ERR(data.handle)) {
1327 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001328 data.handle = NULL;
Olav Haugan865e97f2012-05-15 14:40:11 -07001329 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001330 if (copy_to_user((void __user *)arg, &data,
1331 sizeof(struct ion_fd_data)))
1332 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001333 if (ret < 0)
1334 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001335 break;
1336 }
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001337 case ION_IOC_SYNC:
1338 {
1339 struct ion_fd_data data;
1340 if (copy_from_user(&data, (void __user *)arg,
1341 sizeof(struct ion_fd_data)))
1342 return -EFAULT;
1343 ion_sync_for_device(client, data.fd);
1344 break;
1345 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001346 case ION_IOC_CUSTOM:
1347 {
1348 struct ion_device *dev = client->dev;
1349 struct ion_custom_data data;
1350
1351 if (!dev->custom_ioctl)
1352 return -ENOTTY;
1353 if (copy_from_user(&data, (void __user *)arg,
1354 sizeof(struct ion_custom_data)))
1355 return -EFAULT;
1356 return dev->custom_ioctl(client, data.cmd, data.arg);
1357 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001358 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001359 return client->dev->custom_ioctl(client,
1360 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001361 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001362 return client->dev->custom_ioctl(client,
1363 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001364 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001365 return client->dev->custom_ioctl(client,
1366 ION_IOC_CLEAN_INV_CACHES, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001367 default:
1368 return -ENOTTY;
1369 }
1370 return 0;
1371}
1372
1373static int ion_release(struct inode *inode, struct file *file)
1374{
1375 struct ion_client *client = file->private_data;
1376
1377 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001378 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001379 return 0;
1380}
1381
1382static int ion_open(struct inode *inode, struct file *file)
1383{
1384 struct miscdevice *miscdev = file->private_data;
1385 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1386 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001387 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001388
1389 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001390 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1391 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001392 if (IS_ERR_OR_NULL(client))
1393 return PTR_ERR(client);
1394 file->private_data = client;
1395
1396 return 0;
1397}
1398
1399static const struct file_operations ion_fops = {
1400 .owner = THIS_MODULE,
1401 .open = ion_open,
1402 .release = ion_release,
1403 .unlocked_ioctl = ion_ioctl,
1404};
1405
1406static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001407 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001408{
1409 size_t size = 0;
1410 struct rb_node *n;
1411
1412 mutex_lock(&client->lock);
1413 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1414 struct ion_handle *handle = rb_entry(n,
1415 struct ion_handle,
1416 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001417 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001418 size += handle->buffer->size;
1419 }
1420 mutex_unlock(&client->lock);
1421 return size;
1422}
1423
Olav Haugan0671b9a2012-05-25 11:58:56 -07001424/**
1425 * Searches through a clients handles to find if the buffer is owned
1426 * by this client. Used for debug output.
1427 * @param client pointer to candidate owner of buffer
1428 * @param buf pointer to buffer that we are trying to find the owner of
1429 * @return 1 if found, 0 otherwise
1430 */
1431static int ion_debug_find_buffer_owner(const struct ion_client *client,
1432 const struct ion_buffer *buf)
1433{
1434 struct rb_node *n;
1435
1436 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1437 const struct ion_handle *handle = rb_entry(n,
1438 const struct ion_handle,
1439 node);
1440 if (handle->buffer == buf)
1441 return 1;
1442 }
1443 return 0;
1444}
1445
1446/**
1447 * Adds mem_map_data pointer to the tree of mem_map
1448 * Used for debug output.
1449 * @param mem_map The mem_map tree
1450 * @param data The new data to add to the tree
1451 */
1452static void ion_debug_mem_map_add(struct rb_root *mem_map,
1453 struct mem_map_data *data)
1454{
1455 struct rb_node **p = &mem_map->rb_node;
1456 struct rb_node *parent = NULL;
1457 struct mem_map_data *entry;
1458
1459 while (*p) {
1460 parent = *p;
1461 entry = rb_entry(parent, struct mem_map_data, node);
1462
1463 if (data->addr < entry->addr) {
1464 p = &(*p)->rb_left;
1465 } else if (data->addr > entry->addr) {
1466 p = &(*p)->rb_right;
1467 } else {
1468 pr_err("%s: mem_map_data already found.", __func__);
1469 BUG();
1470 }
1471 }
1472 rb_link_node(&data->node, parent, p);
1473 rb_insert_color(&data->node, mem_map);
1474}
1475
1476/**
1477 * Search for an owner of a buffer by iterating over all ION clients.
1478 * @param dev ion device containing pointers to all the clients.
1479 * @param buffer pointer to buffer we are trying to find the owner of.
1480 * @return name of owner.
1481 */
1482const char *ion_debug_locate_owner(const struct ion_device *dev,
1483 const struct ion_buffer *buffer)
1484{
1485 struct rb_node *j;
1486 const char *client_name = NULL;
1487
Laura Abbottb14ed962012-01-30 14:18:08 -08001488 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001489 j = rb_next(j)) {
1490 struct ion_client *client = rb_entry(j, struct ion_client,
1491 node);
1492 if (ion_debug_find_buffer_owner(client, buffer))
1493 client_name = client->name;
1494 }
1495 return client_name;
1496}
1497
1498/**
1499 * Create a mem_map of the heap.
1500 * @param s seq_file to log error message to.
1501 * @param heap The heap to create mem_map for.
1502 * @param mem_map The mem map to be created.
1503 */
1504void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1505 struct rb_root *mem_map)
1506{
1507 struct ion_device *dev = heap->dev;
1508 struct rb_node *n;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301509 size_t size;
1510
1511 if (!heap->ops->phys)
1512 return;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001513
1514 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1515 struct ion_buffer *buffer =
1516 rb_entry(n, struct ion_buffer, node);
1517 if (buffer->heap->id == heap->id) {
1518 struct mem_map_data *data =
1519 kzalloc(sizeof(*data), GFP_KERNEL);
1520 if (!data) {
1521 seq_printf(s, "ERROR: out of memory. "
1522 "Part of memory map will not be logged\n");
1523 break;
1524 }
Chintan Pandyadaf75622013-01-29 19:40:01 +05301525
1526 buffer->heap->ops->phys(buffer->heap, buffer,
1527 &(data->addr), &size);
1528 data->size = (unsigned long) size;
1529 data->addr_end = data->addr + data->size - 1;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001530 data->client_name = ion_debug_locate_owner(dev, buffer);
1531 ion_debug_mem_map_add(mem_map, data);
1532 }
1533 }
1534}
1535
1536/**
1537 * Free the memory allocated by ion_debug_mem_map_create
1538 * @param mem_map The mem map to free.
1539 */
1540static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1541{
1542 if (mem_map) {
1543 struct rb_node *n;
1544 while ((n = rb_first(mem_map)) != 0) {
1545 struct mem_map_data *data =
1546 rb_entry(n, struct mem_map_data, node);
1547 rb_erase(&data->node, mem_map);
1548 kfree(data);
1549 }
1550 }
1551}
1552
1553/**
1554 * Print heap debug information.
1555 * @param s seq_file to log message to.
1556 * @param heap pointer to heap that we will print debug information for.
1557 */
1558static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1559{
1560 if (heap->ops->print_debug) {
1561 struct rb_root mem_map = RB_ROOT;
1562 ion_debug_mem_map_create(s, heap, &mem_map);
1563 heap->ops->print_debug(heap, s, &mem_map);
1564 ion_debug_mem_map_destroy(&mem_map);
1565 }
1566}
1567
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001568static int ion_debug_heap_show(struct seq_file *s, void *unused)
1569{
1570 struct ion_heap *heap = s->private;
1571 struct ion_device *dev = heap->dev;
1572 struct rb_node *n;
1573
Olav Haugane4900b52012-05-25 11:58:03 -07001574 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001575 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001576
Laura Abbottb14ed962012-01-30 14:18:08 -08001577 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001578 struct ion_client *client = rb_entry(n, struct ion_client,
1579 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001580 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001581 if (!size)
1582 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001583 if (client->task) {
1584 char task_comm[TASK_COMM_LEN];
1585
1586 get_task_comm(task_comm, client->task);
1587 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1588 client->pid, size);
1589 } else {
1590 seq_printf(s, "%16.s %16u %16u\n", client->name,
1591 client->pid, size);
1592 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001593 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001594 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001595 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001596 return 0;
1597}
1598
1599static int ion_debug_heap_open(struct inode *inode, struct file *file)
1600{
1601 return single_open(file, ion_debug_heap_show, inode->i_private);
1602}
1603
1604static const struct file_operations debug_heap_fops = {
1605 .open = ion_debug_heap_open,
1606 .read = seq_read,
1607 .llseek = seq_lseek,
1608 .release = single_release,
1609};
1610
1611void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1612{
1613 struct rb_node **p = &dev->heaps.rb_node;
1614 struct rb_node *parent = NULL;
1615 struct ion_heap *entry;
1616
Laura Abbottb14ed962012-01-30 14:18:08 -08001617 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1618 !heap->ops->unmap_dma)
1619 pr_err("%s: can not add heap with invalid ops struct.\n",
1620 __func__);
1621
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001622 heap->dev = dev;
1623 mutex_lock(&dev->lock);
1624 while (*p) {
1625 parent = *p;
1626 entry = rb_entry(parent, struct ion_heap, node);
1627
1628 if (heap->id < entry->id) {
1629 p = &(*p)->rb_left;
1630 } else if (heap->id > entry->id ) {
1631 p = &(*p)->rb_right;
1632 } else {
1633 pr_err("%s: can not insert multiple heaps with "
1634 "id %d\n", __func__, heap->id);
1635 goto end;
1636 }
1637 }
1638
1639 rb_link_node(&heap->node, parent, p);
1640 rb_insert_color(&heap->node, &dev->heaps);
1641 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1642 &debug_heap_fops);
1643end:
1644 mutex_unlock(&dev->lock);
1645}
1646
Laura Abbott93619302012-10-11 11:51:40 -07001647int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
1648 int version, void *data, int flags)
1649{
1650 int ret = -EINVAL;
1651 struct ion_heap *heap;
1652 struct ion_buffer *buffer;
1653
1654 mutex_lock(&client->lock);
1655 if (!ion_handle_validate(client, handle)) {
1656 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1657 goto out_unlock;
1658 }
1659
1660 buffer = handle->buffer;
1661 heap = buffer->heap;
1662
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001663 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001664 pr_err("%s: cannot secure buffer from non secure heap\n",
1665 __func__);
1666 goto out_unlock;
1667 }
1668
1669 BUG_ON(!buffer->heap->ops->secure_buffer);
1670 /*
1671 * Protect the handle via the client lock to ensure we aren't
1672 * racing with free
1673 */
1674 ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
1675
1676out_unlock:
1677 mutex_unlock(&client->lock);
1678 return ret;
1679}
1680
1681int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
1682{
1683 int ret = -EINVAL;
1684 struct ion_heap *heap;
1685 struct ion_buffer *buffer;
1686
1687 mutex_lock(&client->lock);
1688 if (!ion_handle_validate(client, handle)) {
1689 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1690 goto out_unlock;
1691 }
1692
1693 buffer = handle->buffer;
1694 heap = buffer->heap;
1695
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001696 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001697 pr_err("%s: cannot secure buffer from non secure heap\n",
1698 __func__);
1699 goto out_unlock;
1700 }
1701
1702 BUG_ON(!buffer->heap->ops->unsecure_buffer);
1703 /*
1704 * Protect the handle via the client lock to ensure we aren't
1705 * racing with free
1706 */
1707 ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
1708
1709out_unlock:
1710 mutex_unlock(&client->lock);
1711 return ret;
1712}
1713
Laura Abbott7e446482012-06-13 15:59:39 -07001714int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1715 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001716{
1717 struct rb_node *n;
1718 int ret_val = 0;
1719
1720 /*
1721 * traverse the list of heaps available in this system
1722 * and find the heap that is specified.
1723 */
1724 mutex_lock(&dev->lock);
1725 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1726 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001727 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001728 continue;
1729 if (ION_HEAP(heap->id) != heap_id)
1730 continue;
1731 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001732 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001733 else
1734 ret_val = -EINVAL;
1735 break;
1736 }
1737 mutex_unlock(&dev->lock);
1738 return ret_val;
1739}
Olav Hauganbd453a92012-07-05 14:21:34 -07001740EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001741
Laura Abbott7e446482012-06-13 15:59:39 -07001742int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1743 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001744{
1745 struct rb_node *n;
1746 int ret_val = 0;
1747
1748 /*
1749 * traverse the list of heaps available in this system
1750 * and find the heap that is specified.
1751 */
1752 mutex_lock(&dev->lock);
1753 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1754 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001755 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001756 continue;
1757 if (ION_HEAP(heap->id) != heap_id)
1758 continue;
1759 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001760 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001761 else
1762 ret_val = -EINVAL;
1763 break;
1764 }
1765 mutex_unlock(&dev->lock);
1766 return ret_val;
1767}
Olav Hauganbd453a92012-07-05 14:21:34 -07001768EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001769
Laura Abbott404f8242011-10-31 14:22:53 -07001770static int ion_debug_leak_show(struct seq_file *s, void *unused)
1771{
1772 struct ion_device *dev = s->private;
1773 struct rb_node *n;
Laura Abbott404f8242011-10-31 14:22:53 -07001774
Laura Abbott404f8242011-10-31 14:22:53 -07001775 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1776 "ref cnt");
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001777
Laura Abbott404f8242011-10-31 14:22:53 -07001778 mutex_lock(&dev->lock);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001779 ion_mark_dangling_buffers_locked(dev);
Laura Abbott404f8242011-10-31 14:22:53 -07001780
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001781 /* Anyone still marked as a 1 means a leaked handle somewhere */
Laura Abbott404f8242011-10-31 14:22:53 -07001782 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1783 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1784 node);
1785
1786 if (buf->marked == 1)
1787 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1788 (int)buf, buf->heap->name, buf->size,
1789 atomic_read(&buf->ref.refcount));
1790 }
1791 mutex_unlock(&dev->lock);
1792 return 0;
1793}
1794
1795static int ion_debug_leak_open(struct inode *inode, struct file *file)
1796{
1797 return single_open(file, ion_debug_leak_show, inode->i_private);
1798}
1799
1800static const struct file_operations debug_leak_fops = {
1801 .open = ion_debug_leak_open,
1802 .read = seq_read,
1803 .llseek = seq_lseek,
1804 .release = single_release,
1805};
1806
1807
1808
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001809struct ion_device *ion_device_create(long (*custom_ioctl)
1810 (struct ion_client *client,
1811 unsigned int cmd,
1812 unsigned long arg))
1813{
1814 struct ion_device *idev;
1815 int ret;
1816
1817 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1818 if (!idev)
1819 return ERR_PTR(-ENOMEM);
1820
1821 idev->dev.minor = MISC_DYNAMIC_MINOR;
1822 idev->dev.name = "ion";
1823 idev->dev.fops = &ion_fops;
1824 idev->dev.parent = NULL;
1825 ret = misc_register(&idev->dev);
1826 if (ret) {
1827 pr_err("ion: failed to register misc device.\n");
1828 return ERR_PTR(ret);
1829 }
1830
1831 idev->debug_root = debugfs_create_dir("ion", NULL);
1832 if (IS_ERR_OR_NULL(idev->debug_root))
1833 pr_err("ion: failed to create debug files.\n");
1834
1835 idev->custom_ioctl = custom_ioctl;
1836 idev->buffers = RB_ROOT;
1837 mutex_init(&idev->lock);
1838 idev->heaps = RB_ROOT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001839 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001840 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1841 &debug_leak_fops);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001842
1843 setup_ion_leak_check(idev->debug_root);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001844 return idev;
1845}
1846
1847void ion_device_destroy(struct ion_device *dev)
1848{
1849 misc_deregister(&dev->dev);
1850 /* XXX need to free the heaps and clients ? */
1851 kfree(dev);
1852}
Laura Abbottb14ed962012-01-30 14:18:08 -08001853
1854void __init ion_reserve(struct ion_platform_data *data)
1855{
1856 int i, ret;
1857
1858 for (i = 0; i < data->nr; i++) {
1859 if (data->heaps[i].size == 0)
1860 continue;
1861 ret = memblock_reserve(data->heaps[i].base,
1862 data->heaps[i].size);
1863 if (ret)
Laura Abbott1135c9e2013-03-13 15:33:40 -07001864 pr_err("memblock reserve of %x@%pa failed\n",
Laura Abbottb14ed962012-01-30 14:18:08 -08001865 data->heaps[i].size,
Laura Abbott1135c9e2013-03-13 15:33:40 -07001866 &data->heaps[i].base);
Laura Abbottb14ed962012-01-30 14:18:08 -08001867 }
1868}