blob: 84656b3fd56a170aa472bd83e96e617cca5329b2 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Liam Markcc2d4bd2013-01-16 10:14:40 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070036#include <linux/msm_ion.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080037#include <trace/events/kmem.h>
38
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070039
Laura Abbott8c017362011-09-22 20:59:12 -070040#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070041#include "ion_priv.h"
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070042
43/**
44 * struct ion_device - the metadata of the ion device node
45 * @dev: the actual misc device
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -070046 * @buffers: an rb tree of all the existing buffers
47 * @buffer_lock: lock protecting the tree of buffers
48 * @lock: rwsem protecting the tree of heaps and clients
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070049 * @heaps: list of all the heaps in the system
50 * @user_clients: list of all the clients created from userspace
51 */
52struct ion_device {
53 struct miscdevice dev;
54 struct rb_root buffers;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -070055 struct mutex buffer_lock;
56 struct rw_semaphore lock;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -080057 struct plist_head heaps;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070058 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
59 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080060 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070061 struct dentry *debug_root;
62};
63
64/**
65 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070066 * @node: node in the tree of all clients
67 * @dev: backpointer to ion device
68 * @handles: an rb tree of all the handles in this client
69 * @lock: lock protecting the tree of handles
70 * @heap_mask: mask of all supported heaps
71 * @name: used for debugging
72 * @task: used for debugging
73 *
74 * A client represents a list of buffers this client may access.
75 * The mutex stored here is used to protect both handles tree
76 * as well as the handles themselves, and should be held while modifying either.
77 */
78struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070079 struct rb_node node;
80 struct ion_device *dev;
81 struct rb_root handles;
82 struct mutex lock;
83 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080084 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070085 struct task_struct *task;
86 pid_t pid;
87 struct dentry *debug_root;
88};
89
90/**
91 * ion_handle - a client local reference to a buffer
92 * @ref: reference count
93 * @client: back pointer to the client the buffer resides in
94 * @buffer: pointer to the buffer
95 * @node: node in the client's handle rbtree
96 * @kmap_cnt: count of times this client has mapped to kernel
97 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070098 *
99 * Modifications to node, map_cnt or mapping should be protected by the
100 * lock in the client. Other fields are never changed after initialization.
101 */
102struct ion_handle {
103 struct kref ref;
104 struct ion_client *client;
105 struct ion_buffer *buffer;
106 struct rb_node node;
107 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700108 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700109};
110
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700111bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
112{
113 return ((buffer->flags & ION_FLAG_CACHED) &&
114 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
115}
116
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700117bool ion_buffer_cached(struct ion_buffer *buffer)
118{
119 return !!(buffer->flags & ION_FLAG_CACHED);
120}
121
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700122/* this function should only be called while dev->lock is held */
123static void ion_buffer_add(struct ion_device *dev,
124 struct ion_buffer *buffer)
125{
126 struct rb_node **p = &dev->buffers.rb_node;
127 struct rb_node *parent = NULL;
128 struct ion_buffer *entry;
129
130 while (*p) {
131 parent = *p;
132 entry = rb_entry(parent, struct ion_buffer, node);
133
134 if (buffer < entry) {
135 p = &(*p)->rb_left;
136 } else if (buffer > entry) {
137 p = &(*p)->rb_right;
138 } else {
139 pr_err("%s: buffer already found.", __func__);
140 BUG();
141 }
142 }
143
144 rb_link_node(&buffer->node, parent, p);
145 rb_insert_color(&buffer->node, &dev->buffers);
146}
147
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700148static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
149
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700150/* this function should only be called while dev->lock is held */
151static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
152 struct ion_device *dev,
153 unsigned long len,
154 unsigned long align,
155 unsigned long flags)
156{
157 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800158 struct sg_table *table;
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700159 struct scatterlist *sg;
160 int i, ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700161
162 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
163 if (!buffer)
164 return ERR_PTR(-ENOMEM);
165
166 buffer->heap = heap;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700167 buffer->flags = flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700168 kref_init(&buffer->ref);
169
170 ret = heap->ops->allocate(heap, buffer, len, align, flags);
171 if (ret) {
172 kfree(buffer);
173 return ERR_PTR(ret);
174 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800175
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700176 buffer->dev = dev;
177 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800178
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700179 table = heap->ops->map_dma(heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800180 if (IS_ERR_OR_NULL(table)) {
181 heap->ops->free(buffer);
182 kfree(buffer);
183 return ERR_PTR(PTR_ERR(table));
184 }
185 buffer->sg_table = table;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700186 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700187 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
188 i) {
189 if (sg_dma_len(sg) == PAGE_SIZE)
190 continue;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700191 pr_err("%s: cached mappings that will be faulted in "
192 "must have pagewise sg_lists\n", __func__);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700193 ret = -EINVAL;
194 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700195 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800196
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700197 ret = ion_buffer_alloc_dirty(buffer);
198 if (ret)
199 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700200 }
201
202 buffer->dev = dev;
203 buffer->size = len;
204 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700205 mutex_init(&buffer->lock);
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700206 /* this will set up dma addresses for the sglist -- it is not
207 technically correct as per the dma api -- a specific
208 device isn't really taking ownership here. However, in practice on
209 our systems the only dma_address space is physical addresses.
210 Additionally, we can't afford the overhead of invalidating every
211 allocation via dma_map_sg. The implicit contract here is that
212 memory comming from the heaps is ready for dma, ie if it has a
213 cached mapping that mapping has been invalidated */
Laura Abbott6c13b9822013-03-29 18:29:03 -0700214 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
215 if (sg_dma_address(sg) == 0)
216 sg_dma_address(sg) = sg_phys(sg);
217 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700218 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700219 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700220 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700221 return buffer;
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700222
223err:
224 heap->ops->unmap_dma(heap, buffer);
225 heap->ops->free(buffer);
226 kfree(buffer);
227 return ERR_PTR(ret);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700228}
229
Laura Abbott93619302012-10-11 11:51:40 -0700230static void ion_delayed_unsecure(struct ion_buffer *buffer)
231{
232 if (buffer->heap->ops->unsecure_buffer)
233 buffer->heap->ops->unsecure_buffer(buffer, 1);
234}
235
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700236static void ion_buffer_destroy(struct kref *kref)
237{
238 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
239 struct ion_device *dev = buffer->dev;
240
Laura Abbottb14ed962012-01-30 14:18:08 -0800241 if (WARN_ON(buffer->kmap_cnt > 0))
242 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800243 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
244
Laura Abbott93619302012-10-11 11:51:40 -0700245 ion_delayed_unsecure(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700246 buffer->heap->ops->free(buffer);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700247 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700248 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700249 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700250 if (buffer->flags & ION_FLAG_CACHED)
251 kfree(buffer->dirty);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700252 kfree(buffer);
253}
254
255static void ion_buffer_get(struct ion_buffer *buffer)
256{
257 kref_get(&buffer->ref);
258}
259
260static int ion_buffer_put(struct ion_buffer *buffer)
261{
262 return kref_put(&buffer->ref, ion_buffer_destroy);
263}
264
265static struct ion_handle *ion_handle_create(struct ion_client *client,
266 struct ion_buffer *buffer)
267{
268 struct ion_handle *handle;
269
270 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
271 if (!handle)
272 return ERR_PTR(-ENOMEM);
273 kref_init(&handle->ref);
274 rb_init_node(&handle->node);
275 handle->client = client;
276 ion_buffer_get(buffer);
277 handle->buffer = buffer;
278
279 return handle;
280}
281
Laura Abbottb14ed962012-01-30 14:18:08 -0800282static void ion_handle_kmap_put(struct ion_handle *);
283
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700284static void ion_handle_destroy(struct kref *kref)
285{
286 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800287 struct ion_client *client = handle->client;
288 struct ion_buffer *buffer = handle->buffer;
289
Laura Abbottb14ed962012-01-30 14:18:08 -0800290 mutex_lock(&buffer->lock);
291 while (handle->kmap_cnt)
292 ion_handle_kmap_put(handle);
293 mutex_unlock(&buffer->lock);
294
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700295 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800296 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800297
298 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700299 kfree(handle);
300}
301
302struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
303{
304 return handle->buffer;
305}
306
307static void ion_handle_get(struct ion_handle *handle)
308{
309 kref_get(&handle->ref);
310}
311
312static int ion_handle_put(struct ion_handle *handle)
313{
314 return kref_put(&handle->ref, ion_handle_destroy);
315}
316
317static struct ion_handle *ion_handle_lookup(struct ion_client *client,
318 struct ion_buffer *buffer)
319{
320 struct rb_node *n;
321
322 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
323 struct ion_handle *handle = rb_entry(n, struct ion_handle,
324 node);
325 if (handle->buffer == buffer)
326 return handle;
327 }
328 return NULL;
329}
330
331static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
332{
333 struct rb_node *n = client->handles.rb_node;
334
335 while (n) {
336 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
337 node);
338 if (handle < handle_node)
339 n = n->rb_left;
340 else if (handle > handle_node)
341 n = n->rb_right;
342 else
343 return true;
344 }
345 return false;
346}
347
348static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
349{
350 struct rb_node **p = &client->handles.rb_node;
351 struct rb_node *parent = NULL;
352 struct ion_handle *entry;
353
354 while (*p) {
355 parent = *p;
356 entry = rb_entry(parent, struct ion_handle, node);
357
358 if (handle < entry)
359 p = &(*p)->rb_left;
360 else if (handle > entry)
361 p = &(*p)->rb_right;
362 else
363 WARN(1, "%s: buffer already found.", __func__);
364 }
365
366 rb_link_node(&handle->node, parent, p);
367 rb_insert_color(&handle->node, &client->handles);
368}
369
370struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700371 size_t align, unsigned int heap_mask,
372 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700373{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700374 struct ion_handle *handle;
375 struct ion_device *dev = client->dev;
376 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -0800377 struct ion_heap *heap;
Adrian Alexei92538592013-03-27 10:53:43 -0700378 unsigned long secure_allocation = flags & ION_FLAG_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800379 const unsigned int MAX_DBG_STR_LEN = 64;
380 char dbg_str[MAX_DBG_STR_LEN];
381 unsigned int dbg_str_idx = 0;
382
383 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700384
Mitchel Humpherys485650f2013-03-15 16:06:07 -0700385 /*
386 * For now, we don't want to fault in pages individually since
387 * clients are already doing manual cache maintenance. In
388 * other words, the implicit caching infrastructure is in
389 * place (in code) but should not be used.
390 */
391 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
392
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700393 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
394 align, heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700395 /*
396 * traverse the list of heaps available in this system in priority
397 * order. If the heap type is supported by the client, and matches the
398 * request of the caller allocate from it. Repeat until allocate has
399 * succeeded or all heaps have been tried
400 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800401 if (WARN_ON(!len))
402 return ERR_PTR(-EINVAL);
403
404 len = PAGE_ALIGN(len);
405
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700406 down_read(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -0800407 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700408 /* if the client doesn't support this heap type */
409 if (!((1 << heap->type) & client->heap_mask))
410 continue;
411 /* if the caller didn't specify this heap type */
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700412 if (!((1 << heap->id) & heap_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700413 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800414 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700415 if (secure_allocation &&
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800416 !ion_heap_allow_secure_allocation(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -0800417 continue;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800418 trace_ion_alloc_buffer_start(client->name, heap->name, len,
419 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700420 buffer = ion_buffer_create(heap, dev, len, align, flags);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800421 trace_ion_alloc_buffer_end(client->name, heap->name, len,
422 heap_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700423 if (!IS_ERR_OR_NULL(buffer))
424 break;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800425
426 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
427 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800428 if (dbg_str_idx < MAX_DBG_STR_LEN) {
429 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
430 int ret_value = snprintf(&dbg_str[dbg_str_idx],
431 len_left, "%s ", heap->name);
432 if (ret_value >= len_left) {
433 /* overflow */
434 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
435 dbg_str_idx = MAX_DBG_STR_LEN;
436 } else if (ret_value >= 0) {
437 dbg_str_idx += ret_value;
438 } else {
439 /* error */
440 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
441 }
442 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700443 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700444 up_read(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700445
Liam Markcc2d4bd2013-01-16 10:14:40 -0800446 if (buffer == NULL) {
447 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
448 heap_mask, flags, -ENODEV);
Laura Abbottb14ed962012-01-30 14:18:08 -0800449 return ERR_PTR(-ENODEV);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800450 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800451
452 if (IS_ERR(buffer)) {
Liam Markcc2d4bd2013-01-16 10:14:40 -0800453 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
454 heap_mask, flags, PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800455 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
456 "0x%x) from heap(s) %sfor client %s with heap "
457 "mask 0x%x\n",
458 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700459 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800460 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700461
462 handle = ion_handle_create(client, buffer);
463
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700464 /*
465 * ion_buffer_create will create a buffer with a ref_cnt of 1,
466 * and ion_handle_create will take a second reference, drop one here
467 */
468 ion_buffer_put(buffer);
469
Laura Abbottb14ed962012-01-30 14:18:08 -0800470 if (!IS_ERR(handle)) {
471 mutex_lock(&client->lock);
472 ion_handle_add(client, handle);
473 mutex_unlock(&client->lock);
474 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700475
Laura Abbottb14ed962012-01-30 14:18:08 -0800476
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700477 return handle;
478}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800479EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700480
481void ion_free(struct ion_client *client, struct ion_handle *handle)
482{
483 bool valid_handle;
484
485 BUG_ON(client != handle->client);
486
487 mutex_lock(&client->lock);
488 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700489 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800490 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700491 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700492 return;
493 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800494 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700495 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700496}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800497EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700498
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700499int ion_phys(struct ion_client *client, struct ion_handle *handle,
500 ion_phys_addr_t *addr, size_t *len)
501{
502 struct ion_buffer *buffer;
503 int ret;
504
505 mutex_lock(&client->lock);
506 if (!ion_handle_validate(client, handle)) {
507 mutex_unlock(&client->lock);
508 return -EINVAL;
509 }
510
511 buffer = handle->buffer;
512
513 if (!buffer->heap->ops->phys) {
514 pr_err("%s: ion_phys is not implemented by this heap.\n",
515 __func__);
516 mutex_unlock(&client->lock);
517 return -ENODEV;
518 }
519 mutex_unlock(&client->lock);
520 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
521 return ret;
522}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800523EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700524
Laura Abbottb14ed962012-01-30 14:18:08 -0800525static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700526{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700527 void *vaddr;
528
Laura Abbottb14ed962012-01-30 14:18:08 -0800529 if (buffer->kmap_cnt) {
530 buffer->kmap_cnt++;
531 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700532 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800533 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
534 if (IS_ERR_OR_NULL(vaddr))
535 return vaddr;
536 buffer->vaddr = vaddr;
537 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700538 return vaddr;
539}
Laura Abbottb14ed962012-01-30 14:18:08 -0800540
541static void *ion_handle_kmap_get(struct ion_handle *handle)
542{
543 struct ion_buffer *buffer = handle->buffer;
544 void *vaddr;
545
546 if (handle->kmap_cnt) {
547 handle->kmap_cnt++;
548 return buffer->vaddr;
549 }
550 vaddr = ion_buffer_kmap_get(buffer);
551 if (IS_ERR_OR_NULL(vaddr))
552 return vaddr;
553 handle->kmap_cnt++;
554 return vaddr;
555}
556
557static void ion_buffer_kmap_put(struct ion_buffer *buffer)
558{
559 buffer->kmap_cnt--;
560 if (!buffer->kmap_cnt) {
561 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
562 buffer->vaddr = NULL;
563 }
564}
565
566static void ion_handle_kmap_put(struct ion_handle *handle)
567{
568 struct ion_buffer *buffer = handle->buffer;
569
570 handle->kmap_cnt--;
571 if (!handle->kmap_cnt)
572 ion_buffer_kmap_put(buffer);
573}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700574
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700575void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700576{
577 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800578 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700579
580 mutex_lock(&client->lock);
581 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800582 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700583 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700584 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700585 return ERR_PTR(-EINVAL);
586 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700587
Laura Abbottb14ed962012-01-30 14:18:08 -0800588 buffer = handle->buffer;
589
590 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700591 pr_err("%s: map_kernel is not implemented by this heap.\n",
592 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700593 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700594 return ERR_PTR(-ENODEV);
595 }
Laura Abbott894fd582011-08-19 13:33:56 -0700596
Laura Abbottb14ed962012-01-30 14:18:08 -0800597 mutex_lock(&buffer->lock);
598 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700599 mutex_unlock(&buffer->lock);
600 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800601 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700602}
Olav Hauganbd453a92012-07-05 14:21:34 -0700603EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700604
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700605void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
606{
607 struct ion_buffer *buffer;
608
609 mutex_lock(&client->lock);
610 buffer = handle->buffer;
611 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800612 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700613 mutex_unlock(&buffer->lock);
614 mutex_unlock(&client->lock);
615}
Olav Hauganbd453a92012-07-05 14:21:34 -0700616EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700617
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700618static int ion_debug_client_show(struct seq_file *s, void *unused)
619{
620 struct ion_client *client = s->private;
621 struct rb_node *n;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700622
Olav Haugan854c9e12012-05-16 16:34:28 -0700623 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
624 "heap_name", "size_in_bytes", "handle refcount",
625 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700626
627 mutex_lock(&client->lock);
628 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
629 struct ion_handle *handle = rb_entry(n, struct ion_handle,
630 node);
631 enum ion_heap_type type = handle->buffer->heap->type;
632
Olav Haugan854c9e12012-05-16 16:34:28 -0700633 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700634 handle->buffer->heap->name,
635 handle->buffer->size,
636 atomic_read(&handle->ref.refcount),
637 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700638
639 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
640 type == ION_HEAP_TYPE_CARVEOUT ||
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700641 type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
Laura Abbott1135c9e2013-03-13 15:33:40 -0700642 seq_printf(s, " : %12pa", &handle->buffer->priv_phys);
Olav Haugan854c9e12012-05-16 16:34:28 -0700643 else
644 seq_printf(s, " : %12s", "N/A");
645
Olav Haugan854c9e12012-05-16 16:34:28 -0700646 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700647 }
648 mutex_unlock(&client->lock);
649
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700650 return 0;
651}
652
653static int ion_debug_client_open(struct inode *inode, struct file *file)
654{
655 return single_open(file, ion_debug_client_show, inode->i_private);
656}
657
658static const struct file_operations debug_client_fops = {
659 .open = ion_debug_client_open,
660 .read = seq_read,
661 .llseek = seq_lseek,
662 .release = single_release,
663};
664
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700665struct ion_client *ion_client_create(struct ion_device *dev,
666 unsigned int heap_mask,
667 const char *name)
668{
669 struct ion_client *client;
670 struct task_struct *task;
671 struct rb_node **p;
672 struct rb_node *parent = NULL;
673 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700674 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700675 unsigned int name_len;
676
677 if (!name) {
678 pr_err("%s: Name cannot be null\n", __func__);
679 return ERR_PTR(-EINVAL);
680 }
681 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700682
683 get_task_struct(current->group_leader);
684 task_lock(current->group_leader);
685 pid = task_pid_nr(current->group_leader);
686 /* don't bother to store task struct for kernel threads,
687 they can't be killed anyway */
688 if (current->group_leader->flags & PF_KTHREAD) {
689 put_task_struct(current->group_leader);
690 task = NULL;
691 } else {
692 task = current->group_leader;
693 }
694 task_unlock(current->group_leader);
695
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700696 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
697 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800698 if (task)
699 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700700 return ERR_PTR(-ENOMEM);
701 }
702
703 client->dev = dev;
704 client->handles = RB_ROOT;
705 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800706
Olav Haugan6625c7d12012-01-24 13:50:43 -0800707 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800708 if (!client->name) {
709 put_task_struct(current->group_leader);
710 kfree(client);
711 return ERR_PTR(-ENOMEM);
712 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -0800713 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800714 }
715
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700716 client->heap_mask = heap_mask;
717 client->task = task;
718 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700719
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700720 down_write(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800721 p = &dev->clients.rb_node;
722 while (*p) {
723 parent = *p;
724 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700725
Laura Abbottb14ed962012-01-30 14:18:08 -0800726 if (client < entry)
727 p = &(*p)->rb_left;
728 else if (client > entry)
729 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700730 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800731 rb_link_node(&client->node, parent, p);
732 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700733
Laura Abbotteed86032011-12-05 15:32:36 -0800734
735 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700736 dev->debug_root, client,
737 &debug_client_fops);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700738 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700739
740 return client;
741}
742
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800743/**
744 * ion_mark_dangling_buffers_locked() - Mark dangling buffers
745 * @dev: the ion device whose buffers will be searched
746 *
747 * Sets marked=1 for all known buffers associated with `dev' that no
748 * longer have a handle pointing to them. dev->lock should be held
749 * across a call to this function (and should only be unlocked after
750 * checking for marked buffers).
751 */
752static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
753{
754 struct rb_node *n, *n2;
755 /* mark all buffers as 1 */
756 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
757 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
758 node);
759
760 buf->marked = 1;
761 }
762
763 /* now see which buffers we can access */
764 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
765 struct ion_client *client = rb_entry(n, struct ion_client,
766 node);
767
768 mutex_lock(&client->lock);
769 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
770 struct ion_handle *handle
771 = rb_entry(n2, struct ion_handle, node);
772
773 handle->buffer->marked = 0;
774
775 }
776 mutex_unlock(&client->lock);
777
778 }
779}
780
781#ifdef CONFIG_ION_LEAK_CHECK
782static u32 ion_debug_check_leaks_on_destroy;
783
784static int ion_check_for_and_print_leaks(struct ion_device *dev)
785{
786 struct rb_node *n;
787 int num_leaks = 0;
788
789 if (!ion_debug_check_leaks_on_destroy)
790 return 0;
791
792 /* check for leaked buffers (those that no longer have a
793 * handle pointing to them) */
794 ion_mark_dangling_buffers_locked(dev);
795
796 /* Anyone still marked as a 1 means a leaked handle somewhere */
797 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
798 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
799 node);
800
801 if (buf->marked == 1) {
802 pr_info("Leaked ion buffer at %p\n", buf);
803 num_leaks++;
804 }
805 }
806 return num_leaks;
807}
808static void setup_ion_leak_check(struct dentry *debug_root)
809{
810 debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
811 &ion_debug_check_leaks_on_destroy);
812}
813#else
814static int ion_check_for_and_print_leaks(struct ion_device *dev)
815{
816 return 0;
817}
818static void setup_ion_leak_check(struct dentry *debug_root)
819{
820}
821#endif
822
Laura Abbottb14ed962012-01-30 14:18:08 -0800823void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700824{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700825 struct ion_device *dev = client->dev;
826 struct rb_node *n;
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800827 int num_leaks;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700828
829 pr_debug("%s: %d\n", __func__, __LINE__);
830 while ((n = rb_first(&client->handles))) {
831 struct ion_handle *handle = rb_entry(n, struct ion_handle,
832 node);
833 ion_handle_destroy(&handle->ref);
834 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700835 down_write(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800836 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700837 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -0800838 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700839 debugfs_remove_recursive(client->debug_root);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800840
841 num_leaks = ion_check_for_and_print_leaks(dev);
842
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700843 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700844
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800845 if (num_leaks) {
846 struct task_struct *current_task = current;
847 char current_task_name[TASK_COMM_LEN];
848 get_task_comm(current_task_name, current_task);
849 WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
850 __func__, num_leaks, num_leaks == 1 ? "" : "s");
851 pr_info("task name at time of leak: %s, pid: %d\n",
852 current_task_name, current_task->pid);
853 }
854
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800855 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700856 kfree(client);
857}
Olav Hauganbd453a92012-07-05 14:21:34 -0700858EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700859
Laura Abbott273dd8e2011-10-12 14:26:33 -0700860int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
861 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700862{
863 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700864
865 mutex_lock(&client->lock);
866 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -0700867 pr_err("%s: invalid handle passed to %s.\n",
868 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700869 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800870 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700871 }
Laura Abbott273dd8e2011-10-12 14:26:33 -0700872 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700873 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700874 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700875 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700876 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800877
Laura Abbott273dd8e2011-10-12 14:26:33 -0700878 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700879}
Laura Abbott273dd8e2011-10-12 14:26:33 -0700880EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700881
Laura Abbott8c017362011-09-22 20:59:12 -0700882int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
883 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800884{
Laura Abbott8c017362011-09-22 20:59:12 -0700885 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800886
Laura Abbott8c017362011-09-22 20:59:12 -0700887 mutex_lock(&client->lock);
888 if (!ion_handle_validate(client, handle)) {
889 pr_err("%s: invalid handle passed to %s.\n",
890 __func__, __func__);
891 mutex_unlock(&client->lock);
892 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700893 }
Laura Abbott8c017362011-09-22 20:59:12 -0700894 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700895 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700896 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700897 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700898 mutex_unlock(&client->lock);
899
900 return 0;
901}
902EXPORT_SYMBOL(ion_handle_get_size);
903
Laura Abbottb14ed962012-01-30 14:18:08 -0800904struct sg_table *ion_sg_table(struct ion_client *client,
905 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700906{
Laura Abbottb14ed962012-01-30 14:18:08 -0800907 struct ion_buffer *buffer;
908 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700909
Laura Abbottb14ed962012-01-30 14:18:08 -0800910 mutex_lock(&client->lock);
911 if (!ion_handle_validate(client, handle)) {
912 pr_err("%s: invalid handle passed to map_dma.\n",
913 __func__);
914 mutex_unlock(&client->lock);
915 return ERR_PTR(-EINVAL);
916 }
917 buffer = handle->buffer;
918 table = buffer->sg_table;
919 mutex_unlock(&client->lock);
920 return table;
921}
Olav Hauganbd453a92012-07-05 14:21:34 -0700922EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -0800923
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800924struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
925 size_t chunk_size, size_t total_size)
926{
927 struct sg_table *table;
928 int i, n_chunks, ret;
929 struct scatterlist *sg;
930
931 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
932 if (!table)
933 return ERR_PTR(-ENOMEM);
934
935 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
936 pr_debug("creating sg_table with %d chunks\n", n_chunks);
937
938 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
939 if (ret)
940 goto err0;
941
942 for_each_sg(table->sgl, sg, table->nents, i) {
943 dma_addr_t addr = buffer_base + i * chunk_size;
944 sg_dma_address(sg) = addr;
Olav Hauganbbdc30a2013-03-30 06:48:35 -0700945 sg_dma_len(sg) = chunk_size;
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800946 }
947
948 return table;
949err0:
950 kfree(table);
951 return ERR_PTR(ret);
952}
953
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700954static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
955 struct device *dev,
956 enum dma_data_direction direction);
957
Laura Abbottb14ed962012-01-30 14:18:08 -0800958static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
959 enum dma_data_direction direction)
960{
961 struct dma_buf *dmabuf = attachment->dmabuf;
962 struct ion_buffer *buffer = dmabuf->priv;
963
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700964 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbottb14ed962012-01-30 14:18:08 -0800965 return buffer->sg_table;
966}
967
968static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
969 struct sg_table *table,
970 enum dma_data_direction direction)
971{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800972}
973
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700974static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
975{
976 unsigned long pages = buffer->sg_table->nents;
977 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
978
979 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
980 if (!buffer->dirty)
981 return -ENOMEM;
982 return 0;
983}
984
985struct ion_vma_list {
986 struct list_head list;
987 struct vm_area_struct *vma;
988};
989
990static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
991 struct device *dev,
992 enum dma_data_direction dir)
993{
994 struct scatterlist *sg;
995 int i;
996 struct ion_vma_list *vma_list;
997
998 pr_debug("%s: syncing for device %s\n", __func__,
999 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001000
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001001 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001002 return;
1003
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001004 mutex_lock(&buffer->lock);
1005 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1006 if (!test_bit(i, buffer->dirty))
1007 continue;
1008 dma_sync_sg_for_device(dev, sg, 1, dir);
1009 clear_bit(i, buffer->dirty);
1010 }
1011 list_for_each_entry(vma_list, &buffer->vmas, list) {
1012 struct vm_area_struct *vma = vma_list->vma;
1013
1014 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1015 NULL);
1016 }
1017 mutex_unlock(&buffer->lock);
1018}
1019
1020int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001021{
Laura Abbottb14ed962012-01-30 14:18:08 -08001022 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001023 struct scatterlist *sg;
1024 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001025
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001026 mutex_lock(&buffer->lock);
1027 set_bit(vmf->pgoff, buffer->dirty);
1028
1029 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1030 if (i != vmf->pgoff)
1031 continue;
1032 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
1033 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
1034 sg_page(sg));
1035 break;
1036 }
1037 mutex_unlock(&buffer->lock);
1038 return VM_FAULT_NOPAGE;
1039}
1040
1041static void ion_vm_open(struct vm_area_struct *vma)
1042{
1043 struct ion_buffer *buffer = vma->vm_private_data;
1044 struct ion_vma_list *vma_list;
1045
1046 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1047 if (!vma_list)
1048 return;
1049 vma_list->vma = vma;
1050 mutex_lock(&buffer->lock);
1051 list_add(&vma_list->list, &buffer->vmas);
1052 mutex_unlock(&buffer->lock);
1053 pr_debug("%s: adding %p\n", __func__, vma);
1054}
1055
1056static void ion_vm_close(struct vm_area_struct *vma)
1057{
1058 struct ion_buffer *buffer = vma->vm_private_data;
1059 struct ion_vma_list *vma_list, *tmp;
1060
1061 pr_debug("%s\n", __func__);
1062 mutex_lock(&buffer->lock);
1063 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1064 if (vma_list->vma != vma)
1065 continue;
1066 list_del(&vma_list->list);
1067 kfree(vma_list);
1068 pr_debug("%s: deleting %p\n", __func__, vma);
1069 break;
1070 }
1071 mutex_unlock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001072
Laura Abbotta6835092011-11-14 15:27:02 -08001073 if (buffer->heap->ops->unmap_user)
1074 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001075}
1076
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001077struct vm_operations_struct ion_vma_ops = {
1078 .open = ion_vm_open,
1079 .close = ion_vm_close,
1080 .fault = ion_vm_fault,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001081};
1082
Laura Abbottb14ed962012-01-30 14:18:08 -08001083static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001084{
Laura Abbottb14ed962012-01-30 14:18:08 -08001085 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001086 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001087
Laura Abbottb14ed962012-01-30 14:18:08 -08001088 if (!buffer->heap->ops->map_user) {
1089 pr_err("%s: this heap does not define a method for mapping "
1090 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001091 return -EINVAL;
1092 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001093
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001094 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001095 vma->vm_private_data = buffer;
1096 vma->vm_ops = &ion_vma_ops;
Mitchel Humpherys7ce0fe42013-01-10 11:30:26 -08001097 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001098 ion_vm_open(vma);
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001099 return 0;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001100 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001101
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001102 if (!(buffer->flags & ION_FLAG_CACHED))
1103 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1104
1105 mutex_lock(&buffer->lock);
1106 /* now map it to userspace */
1107 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1108 mutex_unlock(&buffer->lock);
1109
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001110 if (ret)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001111 pr_err("%s: failure mapping buffer to userspace\n",
1112 __func__);
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001113
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001114 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001115}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001116
Laura Abbottb14ed962012-01-30 14:18:08 -08001117static void ion_dma_buf_release(struct dma_buf *dmabuf)
1118{
1119 struct ion_buffer *buffer = dmabuf->priv;
1120 ion_buffer_put(buffer);
1121}
1122
1123static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1124{
1125 struct ion_buffer *buffer = dmabuf->priv;
1126 return buffer->vaddr + offset;
1127}
1128
1129static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1130 void *ptr)
1131{
1132 return;
1133}
1134
1135static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1136 size_t len,
1137 enum dma_data_direction direction)
1138{
1139 struct ion_buffer *buffer = dmabuf->priv;
1140 void *vaddr;
1141
1142 if (!buffer->heap->ops->map_kernel) {
1143 pr_err("%s: map kernel is not implemented by this heap.\n",
1144 __func__);
1145 return -ENODEV;
1146 }
1147
1148 mutex_lock(&buffer->lock);
1149 vaddr = ion_buffer_kmap_get(buffer);
1150 mutex_unlock(&buffer->lock);
1151 if (IS_ERR(vaddr))
1152 return PTR_ERR(vaddr);
1153 if (!vaddr)
1154 return -ENOMEM;
1155 return 0;
1156}
1157
1158static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1159 size_t len,
1160 enum dma_data_direction direction)
1161{
1162 struct ion_buffer *buffer = dmabuf->priv;
1163
1164 mutex_lock(&buffer->lock);
1165 ion_buffer_kmap_put(buffer);
1166 mutex_unlock(&buffer->lock);
1167}
1168
1169struct dma_buf_ops dma_buf_ops = {
1170 .map_dma_buf = ion_map_dma_buf,
1171 .unmap_dma_buf = ion_unmap_dma_buf,
1172 .mmap = ion_mmap,
1173 .release = ion_dma_buf_release,
1174 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1175 .end_cpu_access = ion_dma_buf_end_cpu_access,
1176 .kmap_atomic = ion_dma_buf_kmap,
1177 .kunmap_atomic = ion_dma_buf_kunmap,
1178 .kmap = ion_dma_buf_kmap,
1179 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001180};
1181
Laura Abbottb14ed962012-01-30 14:18:08 -08001182int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1183{
1184 struct ion_buffer *buffer;
1185 struct dma_buf *dmabuf;
1186 bool valid_handle;
1187 int fd;
1188
1189 mutex_lock(&client->lock);
1190 valid_handle = ion_handle_validate(client, handle);
1191 mutex_unlock(&client->lock);
1192 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001193 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001194 return -EINVAL;
1195 }
1196
1197 buffer = handle->buffer;
1198 ion_buffer_get(buffer);
1199 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1200 if (IS_ERR(dmabuf)) {
1201 ion_buffer_put(buffer);
1202 return PTR_ERR(dmabuf);
1203 }
1204 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001205 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001206 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001207
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001208 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001209}
Olav Hauganbd453a92012-07-05 14:21:34 -07001210EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001211
Laura Abbottb14ed962012-01-30 14:18:08 -08001212struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1213{
1214 struct dma_buf *dmabuf;
1215 struct ion_buffer *buffer;
1216 struct ion_handle *handle;
1217
1218 dmabuf = dma_buf_get(fd);
1219 if (IS_ERR_OR_NULL(dmabuf))
1220 return ERR_PTR(PTR_ERR(dmabuf));
1221 /* if this memory came from ion */
1222
1223 if (dmabuf->ops != &dma_buf_ops) {
1224 pr_err("%s: can not import dmabuf from another exporter\n",
1225 __func__);
1226 dma_buf_put(dmabuf);
1227 return ERR_PTR(-EINVAL);
1228 }
1229 buffer = dmabuf->priv;
1230
1231 mutex_lock(&client->lock);
1232 /* if a handle exists for this buffer just take a reference to it */
1233 handle = ion_handle_lookup(client, buffer);
1234 if (!IS_ERR_OR_NULL(handle)) {
1235 ion_handle_get(handle);
1236 goto end;
1237 }
1238 handle = ion_handle_create(client, buffer);
1239 if (IS_ERR_OR_NULL(handle))
1240 goto end;
1241 ion_handle_add(client, handle);
1242end:
1243 mutex_unlock(&client->lock);
1244 dma_buf_put(dmabuf);
1245 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001246}
Olav Hauganbd453a92012-07-05 14:21:34 -07001247EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001248
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001249static int ion_sync_for_device(struct ion_client *client, int fd)
1250{
1251 struct dma_buf *dmabuf;
1252 struct ion_buffer *buffer;
1253
1254 dmabuf = dma_buf_get(fd);
1255 if (IS_ERR_OR_NULL(dmabuf))
1256 return PTR_ERR(dmabuf);
1257
1258 /* if this memory came from ion */
1259 if (dmabuf->ops != &dma_buf_ops) {
1260 pr_err("%s: can not sync dmabuf from another exporter\n",
1261 __func__);
1262 dma_buf_put(dmabuf);
1263 return -EINVAL;
1264 }
1265 buffer = dmabuf->priv;
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001266
1267 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1268 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001269 dma_buf_put(dmabuf);
1270 return 0;
1271}
1272
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001273static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1274{
1275 struct ion_client *client = filp->private_data;
1276
1277 switch (cmd) {
1278 case ION_IOC_ALLOC:
1279 {
1280 struct ion_allocation_data data;
1281
1282 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1283 return -EFAULT;
1284 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh7d72bad2012-08-29 18:39:44 -07001285 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001286
Laura Abbottb14ed962012-01-30 14:18:08 -08001287 if (IS_ERR(data.handle))
1288 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001289
Laura Abbottb14ed962012-01-30 14:18:08 -08001290 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1291 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001292 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001293 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001294 break;
1295 }
1296 case ION_IOC_FREE:
1297 {
1298 struct ion_handle_data data;
1299 bool valid;
1300
1301 if (copy_from_user(&data, (void __user *)arg,
1302 sizeof(struct ion_handle_data)))
1303 return -EFAULT;
1304 mutex_lock(&client->lock);
1305 valid = ion_handle_validate(client, data.handle);
1306 mutex_unlock(&client->lock);
1307 if (!valid)
1308 return -EINVAL;
1309 ion_free(client, data.handle);
1310 break;
1311 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001312 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001313 case ION_IOC_SHARE:
1314 {
1315 struct ion_fd_data data;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001316 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1317 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001318
Laura Abbottb14ed962012-01-30 14:18:08 -08001319 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001320 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1321 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001322 if (data.fd < 0)
1323 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001324 break;
1325 }
1326 case ION_IOC_IMPORT:
1327 {
1328 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001329 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001330 if (copy_from_user(&data, (void __user *)arg,
1331 sizeof(struct ion_fd_data)))
1332 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001333 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan865e97f2012-05-15 14:40:11 -07001334 if (IS_ERR(data.handle)) {
1335 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001336 data.handle = NULL;
Olav Haugan865e97f2012-05-15 14:40:11 -07001337 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001338 if (copy_to_user((void __user *)arg, &data,
1339 sizeof(struct ion_fd_data)))
1340 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001341 if (ret < 0)
1342 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001343 break;
1344 }
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001345 case ION_IOC_SYNC:
1346 {
1347 struct ion_fd_data data;
1348 if (copy_from_user(&data, (void __user *)arg,
1349 sizeof(struct ion_fd_data)))
1350 return -EFAULT;
1351 ion_sync_for_device(client, data.fd);
1352 break;
1353 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001354 case ION_IOC_CUSTOM:
1355 {
1356 struct ion_device *dev = client->dev;
1357 struct ion_custom_data data;
1358
1359 if (!dev->custom_ioctl)
1360 return -ENOTTY;
1361 if (copy_from_user(&data, (void __user *)arg,
1362 sizeof(struct ion_custom_data)))
1363 return -EFAULT;
1364 return dev->custom_ioctl(client, data.cmd, data.arg);
1365 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001366 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001367 return client->dev->custom_ioctl(client,
1368 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001369 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001370 return client->dev->custom_ioctl(client,
1371 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001372 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001373 return client->dev->custom_ioctl(client,
1374 ION_IOC_CLEAN_INV_CACHES, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001375 default:
1376 return -ENOTTY;
1377 }
1378 return 0;
1379}
1380
1381static int ion_release(struct inode *inode, struct file *file)
1382{
1383 struct ion_client *client = file->private_data;
1384
1385 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001386 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001387 return 0;
1388}
1389
1390static int ion_open(struct inode *inode, struct file *file)
1391{
1392 struct miscdevice *miscdev = file->private_data;
1393 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1394 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001395 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001396
1397 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001398 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1399 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001400 if (IS_ERR_OR_NULL(client))
1401 return PTR_ERR(client);
1402 file->private_data = client;
1403
1404 return 0;
1405}
1406
1407static const struct file_operations ion_fops = {
1408 .owner = THIS_MODULE,
1409 .open = ion_open,
1410 .release = ion_release,
1411 .unlocked_ioctl = ion_ioctl,
1412};
1413
1414static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001415 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001416{
1417 size_t size = 0;
1418 struct rb_node *n;
1419
1420 mutex_lock(&client->lock);
1421 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1422 struct ion_handle *handle = rb_entry(n,
1423 struct ion_handle,
1424 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001425 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001426 size += handle->buffer->size;
1427 }
1428 mutex_unlock(&client->lock);
1429 return size;
1430}
1431
Olav Haugan0671b9a2012-05-25 11:58:56 -07001432/**
1433 * Searches through a clients handles to find if the buffer is owned
1434 * by this client. Used for debug output.
1435 * @param client pointer to candidate owner of buffer
1436 * @param buf pointer to buffer that we are trying to find the owner of
1437 * @return 1 if found, 0 otherwise
1438 */
1439static int ion_debug_find_buffer_owner(const struct ion_client *client,
1440 const struct ion_buffer *buf)
1441{
1442 struct rb_node *n;
1443
1444 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1445 const struct ion_handle *handle = rb_entry(n,
1446 const struct ion_handle,
1447 node);
1448 if (handle->buffer == buf)
1449 return 1;
1450 }
1451 return 0;
1452}
1453
1454/**
1455 * Adds mem_map_data pointer to the tree of mem_map
1456 * Used for debug output.
1457 * @param mem_map The mem_map tree
1458 * @param data The new data to add to the tree
1459 */
1460static void ion_debug_mem_map_add(struct rb_root *mem_map,
1461 struct mem_map_data *data)
1462{
1463 struct rb_node **p = &mem_map->rb_node;
1464 struct rb_node *parent = NULL;
1465 struct mem_map_data *entry;
1466
1467 while (*p) {
1468 parent = *p;
1469 entry = rb_entry(parent, struct mem_map_data, node);
1470
1471 if (data->addr < entry->addr) {
1472 p = &(*p)->rb_left;
1473 } else if (data->addr > entry->addr) {
1474 p = &(*p)->rb_right;
1475 } else {
1476 pr_err("%s: mem_map_data already found.", __func__);
1477 BUG();
1478 }
1479 }
1480 rb_link_node(&data->node, parent, p);
1481 rb_insert_color(&data->node, mem_map);
1482}
1483
1484/**
1485 * Search for an owner of a buffer by iterating over all ION clients.
1486 * @param dev ion device containing pointers to all the clients.
1487 * @param buffer pointer to buffer we are trying to find the owner of.
1488 * @return name of owner.
1489 */
1490const char *ion_debug_locate_owner(const struct ion_device *dev,
1491 const struct ion_buffer *buffer)
1492{
1493 struct rb_node *j;
1494 const char *client_name = NULL;
1495
Laura Abbottb14ed962012-01-30 14:18:08 -08001496 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001497 j = rb_next(j)) {
1498 struct ion_client *client = rb_entry(j, struct ion_client,
1499 node);
1500 if (ion_debug_find_buffer_owner(client, buffer))
1501 client_name = client->name;
1502 }
1503 return client_name;
1504}
1505
1506/**
1507 * Create a mem_map of the heap.
1508 * @param s seq_file to log error message to.
1509 * @param heap The heap to create mem_map for.
1510 * @param mem_map The mem map to be created.
1511 */
1512void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1513 struct rb_root *mem_map)
1514{
1515 struct ion_device *dev = heap->dev;
1516 struct rb_node *n;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301517 size_t size;
1518
1519 if (!heap->ops->phys)
1520 return;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001521
1522 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1523 struct ion_buffer *buffer =
1524 rb_entry(n, struct ion_buffer, node);
1525 if (buffer->heap->id == heap->id) {
1526 struct mem_map_data *data =
1527 kzalloc(sizeof(*data), GFP_KERNEL);
1528 if (!data) {
1529 seq_printf(s, "ERROR: out of memory. "
1530 "Part of memory map will not be logged\n");
1531 break;
1532 }
Chintan Pandyadaf75622013-01-29 19:40:01 +05301533
1534 buffer->heap->ops->phys(buffer->heap, buffer,
1535 &(data->addr), &size);
1536 data->size = (unsigned long) size;
1537 data->addr_end = data->addr + data->size - 1;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001538 data->client_name = ion_debug_locate_owner(dev, buffer);
1539 ion_debug_mem_map_add(mem_map, data);
1540 }
1541 }
1542}
1543
1544/**
1545 * Free the memory allocated by ion_debug_mem_map_create
1546 * @param mem_map The mem map to free.
1547 */
1548static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1549{
1550 if (mem_map) {
1551 struct rb_node *n;
1552 while ((n = rb_first(mem_map)) != 0) {
1553 struct mem_map_data *data =
1554 rb_entry(n, struct mem_map_data, node);
1555 rb_erase(&data->node, mem_map);
1556 kfree(data);
1557 }
1558 }
1559}
1560
1561/**
1562 * Print heap debug information.
1563 * @param s seq_file to log message to.
1564 * @param heap pointer to heap that we will print debug information for.
1565 */
1566static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1567{
1568 if (heap->ops->print_debug) {
1569 struct rb_root mem_map = RB_ROOT;
1570 ion_debug_mem_map_create(s, heap, &mem_map);
1571 heap->ops->print_debug(heap, s, &mem_map);
1572 ion_debug_mem_map_destroy(&mem_map);
1573 }
1574}
1575
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001576static int ion_debug_heap_show(struct seq_file *s, void *unused)
1577{
1578 struct ion_heap *heap = s->private;
1579 struct ion_device *dev = heap->dev;
1580 struct rb_node *n;
1581
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001582 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001583 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001584
Laura Abbottb14ed962012-01-30 14:18:08 -08001585 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001586 struct ion_client *client = rb_entry(n, struct ion_client,
1587 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001588 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001589 if (!size)
1590 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001591 if (client->task) {
1592 char task_comm[TASK_COMM_LEN];
1593
1594 get_task_comm(task_comm, client->task);
1595 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1596 client->pid, size);
1597 } else {
1598 seq_printf(s, "%16.s %16u %16u\n", client->name,
1599 client->pid, size);
1600 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001601 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001602 ion_heap_print_debug(s, heap);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001603 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001604 return 0;
1605}
1606
1607static int ion_debug_heap_open(struct inode *inode, struct file *file)
1608{
1609 return single_open(file, ion_debug_heap_show, inode->i_private);
1610}
1611
1612static const struct file_operations debug_heap_fops = {
1613 .open = ion_debug_heap_open,
1614 .read = seq_read,
1615 .llseek = seq_lseek,
1616 .release = single_release,
1617};
1618
1619void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1620{
Laura Abbottb14ed962012-01-30 14:18:08 -08001621 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1622 !heap->ops->unmap_dma)
1623 pr_err("%s: can not add heap with invalid ops struct.\n",
1624 __func__);
1625
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001626 heap->dev = dev;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001627 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001628 /* use negative heap->id to reverse the priority -- when traversing
1629 the list later attempt higher id numbers first */
1630 plist_node_init(&heap->node, -heap->id);
1631 plist_add(&heap->node, &dev->heaps);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001632 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1633 &debug_heap_fops);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001634 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001635}
1636
Laura Abbott93619302012-10-11 11:51:40 -07001637int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
1638 int version, void *data, int flags)
1639{
1640 int ret = -EINVAL;
1641 struct ion_heap *heap;
1642 struct ion_buffer *buffer;
1643
1644 mutex_lock(&client->lock);
1645 if (!ion_handle_validate(client, handle)) {
1646 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1647 goto out_unlock;
1648 }
1649
1650 buffer = handle->buffer;
1651 heap = buffer->heap;
1652
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001653 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001654 pr_err("%s: cannot secure buffer from non secure heap\n",
1655 __func__);
1656 goto out_unlock;
1657 }
1658
1659 BUG_ON(!buffer->heap->ops->secure_buffer);
1660 /*
1661 * Protect the handle via the client lock to ensure we aren't
1662 * racing with free
1663 */
1664 ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
1665
1666out_unlock:
1667 mutex_unlock(&client->lock);
1668 return ret;
1669}
1670
1671int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
1672{
1673 int ret = -EINVAL;
1674 struct ion_heap *heap;
1675 struct ion_buffer *buffer;
1676
1677 mutex_lock(&client->lock);
1678 if (!ion_handle_validate(client, handle)) {
1679 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1680 goto out_unlock;
1681 }
1682
1683 buffer = handle->buffer;
1684 heap = buffer->heap;
1685
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001686 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001687 pr_err("%s: cannot secure buffer from non secure heap\n",
1688 __func__);
1689 goto out_unlock;
1690 }
1691
1692 BUG_ON(!buffer->heap->ops->unsecure_buffer);
1693 /*
1694 * Protect the handle via the client lock to ensure we aren't
1695 * racing with free
1696 */
1697 ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
1698
1699out_unlock:
1700 mutex_unlock(&client->lock);
1701 return ret;
1702}
1703
Laura Abbott7e446482012-06-13 15:59:39 -07001704int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1705 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001706{
Olav Haugan0a852512012-01-09 10:20:55 -08001707 int ret_val = 0;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001708 struct ion_heap *heap;
Olav Haugan0a852512012-01-09 10:20:55 -08001709
1710 /*
1711 * traverse the list of heaps available in this system
1712 * and find the heap that is specified.
1713 */
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001714 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001715 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001716 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001717 continue;
1718 if (ION_HEAP(heap->id) != heap_id)
1719 continue;
1720 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001721 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001722 else
1723 ret_val = -EINVAL;
1724 break;
1725 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001726 up_write(&dev->lock);
Olav Haugan0a852512012-01-09 10:20:55 -08001727 return ret_val;
1728}
Olav Hauganbd453a92012-07-05 14:21:34 -07001729EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001730
Laura Abbott7e446482012-06-13 15:59:39 -07001731int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1732 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001733{
Olav Haugan0a852512012-01-09 10:20:55 -08001734 int ret_val = 0;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001735 struct ion_heap *heap;
Olav Haugan0a852512012-01-09 10:20:55 -08001736
1737 /*
1738 * traverse the list of heaps available in this system
1739 * and find the heap that is specified.
1740 */
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001741 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001742 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001743 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001744 continue;
1745 if (ION_HEAP(heap->id) != heap_id)
1746 continue;
1747 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001748 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001749 else
1750 ret_val = -EINVAL;
1751 break;
1752 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001753 up_write(&dev->lock);
Olav Haugan0a852512012-01-09 10:20:55 -08001754 return ret_val;
1755}
Olav Hauganbd453a92012-07-05 14:21:34 -07001756EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001757
Laura Abbott404f8242011-10-31 14:22:53 -07001758static int ion_debug_leak_show(struct seq_file *s, void *unused)
1759{
1760 struct ion_device *dev = s->private;
1761 struct rb_node *n;
Laura Abbott404f8242011-10-31 14:22:53 -07001762
Laura Abbott404f8242011-10-31 14:22:53 -07001763 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1764 "ref cnt");
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001765
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001766 ion_mark_dangling_buffers_locked(dev);
Laura Abbott404f8242011-10-31 14:22:53 -07001767
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001768 down_write(&dev->lock);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001769 /* Anyone still marked as a 1 means a leaked handle somewhere */
Laura Abbott404f8242011-10-31 14:22:53 -07001770 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1771 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1772 node);
1773
1774 if (buf->marked == 1)
1775 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1776 (int)buf, buf->heap->name, buf->size,
1777 atomic_read(&buf->ref.refcount));
1778 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001779 up_write(&dev->lock);
Laura Abbott404f8242011-10-31 14:22:53 -07001780 return 0;
1781}
1782
1783static int ion_debug_leak_open(struct inode *inode, struct file *file)
1784{
1785 return single_open(file, ion_debug_leak_show, inode->i_private);
1786}
1787
1788static const struct file_operations debug_leak_fops = {
1789 .open = ion_debug_leak_open,
1790 .read = seq_read,
1791 .llseek = seq_lseek,
1792 .release = single_release,
1793};
1794
1795
1796
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001797struct ion_device *ion_device_create(long (*custom_ioctl)
1798 (struct ion_client *client,
1799 unsigned int cmd,
1800 unsigned long arg))
1801{
1802 struct ion_device *idev;
1803 int ret;
1804
1805 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1806 if (!idev)
1807 return ERR_PTR(-ENOMEM);
1808
1809 idev->dev.minor = MISC_DYNAMIC_MINOR;
1810 idev->dev.name = "ion";
1811 idev->dev.fops = &ion_fops;
1812 idev->dev.parent = NULL;
1813 ret = misc_register(&idev->dev);
1814 if (ret) {
1815 pr_err("ion: failed to register misc device.\n");
1816 return ERR_PTR(ret);
1817 }
1818
1819 idev->debug_root = debugfs_create_dir("ion", NULL);
1820 if (IS_ERR_OR_NULL(idev->debug_root))
1821 pr_err("ion: failed to create debug files.\n");
1822
1823 idev->custom_ioctl = custom_ioctl;
1824 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001825 mutex_init(&idev->buffer_lock);
1826 init_rwsem(&idev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001827 plist_head_init(&idev->heaps);
Laura Abbottb14ed962012-01-30 14:18:08 -08001828 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001829 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1830 &debug_leak_fops);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001831
1832 setup_ion_leak_check(idev->debug_root);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001833 return idev;
1834}
1835
1836void ion_device_destroy(struct ion_device *dev)
1837{
1838 misc_deregister(&dev->dev);
1839 /* XXX need to free the heaps and clients ? */
1840 kfree(dev);
1841}
Laura Abbottb14ed962012-01-30 14:18:08 -08001842
1843void __init ion_reserve(struct ion_platform_data *data)
1844{
Rebecca Schultz Zavinfbce1512012-11-15 10:31:02 -08001845 int i;
Laura Abbottb14ed962012-01-30 14:18:08 -08001846
1847 for (i = 0; i < data->nr; i++) {
1848 if (data->heaps[i].size == 0)
1849 continue;
Rebecca Schultz Zavinfbce1512012-11-15 10:31:02 -08001850
1851 if (data->heaps[i].base == 0) {
1852 phys_addr_t paddr;
1853 paddr = memblock_alloc_base(data->heaps[i].size,
1854 data->heaps[i].align,
1855 MEMBLOCK_ALLOC_ANYWHERE);
1856 if (!paddr) {
1857 pr_err("%s: error allocating memblock for "
1858 "heap %d\n",
1859 __func__, i);
1860 continue;
1861 }
1862 data->heaps[i].base = paddr;
1863 } else {
1864 int ret = memblock_reserve(data->heaps[i].base,
1865 data->heaps[i].size);
1866 if (ret)
1867 pr_err("memblock reserve of %x@%pa failed\n",
1868 data->heaps[i].size,
1869 &data->heaps[i].base);
1870 }
1871 pr_info("%s: %s reserved base %pa size %d\n", __func__,
1872 data->heaps[i].name,
1873 &data->heaps[i].base,
1874 data->heaps[i].size);
Laura Abbottb14ed962012-01-30 14:18:08 -08001875 }
1876}