blob: d9fdcf463e571c6f07bddd598e5daca80fe7bb80 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -08002
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07003 * drivers/gpu/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
Liam Markcc2d4bd2013-01-16 10:14:40 -08006 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07007 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070020#include <linux/device.h>
21#include <linux/file.h>
22#include <linux/fs.h>
23#include <linux/anon_inodes.h>
24#include <linux/ion.h>
25#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080026#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070028#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
35#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080036#include <linux/dma-buf.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070037#include <linux/msm_ion.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080038#include <trace/events/kmem.h>
39
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070040
Laura Abbott8c017362011-09-22 20:59:12 -070041#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070042#include "ion_priv.h"
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070043
44/**
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -070047 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070050 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
52 */
53struct ion_device {
54 struct miscdevice dev;
55 struct rb_root buffers;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -070056 struct mutex buffer_lock;
57 struct rw_semaphore lock;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -080058 struct plist_head heaps;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070059 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
60 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080061 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070062 struct dentry *debug_root;
63};
64
65/**
66 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070067 * @node: node in the tree of all clients
68 * @dev: backpointer to ion device
69 * @handles: an rb tree of all the handles in this client
70 * @lock: lock protecting the tree of handles
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -080071 * @heap_type_mask: mask of all supported heap types
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070072 * @name: used for debugging
73 * @task: used for debugging
74 *
75 * A client represents a list of buffers this client may access.
76 * The mutex stored here is used to protect both handles tree
77 * as well as the handles themselves, and should be held while modifying either.
78 */
79struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070080 struct rb_node node;
81 struct ion_device *dev;
82 struct rb_root handles;
83 struct mutex lock;
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -080084 unsigned int heap_type_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080085 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070086 struct task_struct *task;
87 pid_t pid;
88 struct dentry *debug_root;
89};
90
91/**
92 * ion_handle - a client local reference to a buffer
93 * @ref: reference count
94 * @client: back pointer to the client the buffer resides in
95 * @buffer: pointer to the buffer
96 * @node: node in the client's handle rbtree
97 * @kmap_cnt: count of times this client has mapped to kernel
98 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070099 *
100 * Modifications to node, map_cnt or mapping should be protected by the
101 * lock in the client. Other fields are never changed after initialization.
102 */
103struct ion_handle {
104 struct kref ref;
105 struct ion_client *client;
106 struct ion_buffer *buffer;
107 struct rb_node node;
108 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700109 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700110};
111
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700112bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
113{
114 return ((buffer->flags & ION_FLAG_CACHED) &&
115 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
116}
117
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700118bool ion_buffer_cached(struct ion_buffer *buffer)
119{
120 return !!(buffer->flags & ION_FLAG_CACHED);
121}
122
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700123/* this function should only be called while dev->lock is held */
124static void ion_buffer_add(struct ion_device *dev,
125 struct ion_buffer *buffer)
126{
127 struct rb_node **p = &dev->buffers.rb_node;
128 struct rb_node *parent = NULL;
129 struct ion_buffer *entry;
130
131 while (*p) {
132 parent = *p;
133 entry = rb_entry(parent, struct ion_buffer, node);
134
135 if (buffer < entry) {
136 p = &(*p)->rb_left;
137 } else if (buffer > entry) {
138 p = &(*p)->rb_right;
139 } else {
140 pr_err("%s: buffer already found.", __func__);
141 BUG();
142 }
143 }
144
145 rb_link_node(&buffer->node, parent, p);
146 rb_insert_color(&buffer->node, &dev->buffers);
147}
148
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700149static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
150
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700151/* this function should only be called while dev->lock is held */
152static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
153 struct ion_device *dev,
154 unsigned long len,
155 unsigned long align,
156 unsigned long flags)
157{
158 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800159 struct sg_table *table;
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700160 struct scatterlist *sg;
161 int i, ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700162
163 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
164 if (!buffer)
165 return ERR_PTR(-ENOMEM);
166
167 buffer->heap = heap;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700168 buffer->flags = flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700169 kref_init(&buffer->ref);
170
171 ret = heap->ops->allocate(heap, buffer, len, align, flags);
172 if (ret) {
173 kfree(buffer);
174 return ERR_PTR(ret);
175 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800176
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700177 buffer->dev = dev;
178 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800179
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700180 table = heap->ops->map_dma(heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800181 if (IS_ERR_OR_NULL(table)) {
182 heap->ops->free(buffer);
183 kfree(buffer);
184 return ERR_PTR(PTR_ERR(table));
185 }
186 buffer->sg_table = table;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700187 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700188 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
189 i) {
190 if (sg_dma_len(sg) == PAGE_SIZE)
191 continue;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700192 pr_err("%s: cached mappings that will be faulted in "
193 "must have pagewise sg_lists\n", __func__);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700194 ret = -EINVAL;
195 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700196 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800197
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700198 ret = ion_buffer_alloc_dirty(buffer);
199 if (ret)
200 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700201 }
202
203 buffer->dev = dev;
204 buffer->size = len;
205 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700206 mutex_init(&buffer->lock);
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700207 /* this will set up dma addresses for the sglist -- it is not
208 technically correct as per the dma api -- a specific
209 device isn't really taking ownership here. However, in practice on
210 our systems the only dma_address space is physical addresses.
211 Additionally, we can't afford the overhead of invalidating every
212 allocation via dma_map_sg. The implicit contract here is that
213 memory comming from the heaps is ready for dma, ie if it has a
214 cached mapping that mapping has been invalidated */
Laura Abbott6c13b9822013-03-29 18:29:03 -0700215 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
216 if (sg_dma_address(sg) == 0)
217 sg_dma_address(sg) = sg_phys(sg);
218 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700219 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700220 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700221 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700222 return buffer;
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700223
224err:
225 heap->ops->unmap_dma(heap, buffer);
226 heap->ops->free(buffer);
227 kfree(buffer);
228 return ERR_PTR(ret);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700229}
230
Laura Abbott93619302012-10-11 11:51:40 -0700231static void ion_delayed_unsecure(struct ion_buffer *buffer)
232{
233 if (buffer->heap->ops->unsecure_buffer)
234 buffer->heap->ops->unsecure_buffer(buffer, 1);
235}
236
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700237static void ion_buffer_destroy(struct kref *kref)
238{
239 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
240 struct ion_device *dev = buffer->dev;
241
Laura Abbottb14ed962012-01-30 14:18:08 -0800242 if (WARN_ON(buffer->kmap_cnt > 0))
243 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800244 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
245
Laura Abbott93619302012-10-11 11:51:40 -0700246 ion_delayed_unsecure(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700247 buffer->heap->ops->free(buffer);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700248 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700249 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700250 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700251 if (buffer->flags & ION_FLAG_CACHED)
252 kfree(buffer->dirty);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700253 kfree(buffer);
254}
255
256static void ion_buffer_get(struct ion_buffer *buffer)
257{
258 kref_get(&buffer->ref);
259}
260
261static int ion_buffer_put(struct ion_buffer *buffer)
262{
263 return kref_put(&buffer->ref, ion_buffer_destroy);
264}
265
266static struct ion_handle *ion_handle_create(struct ion_client *client,
267 struct ion_buffer *buffer)
268{
269 struct ion_handle *handle;
270
271 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
272 if (!handle)
273 return ERR_PTR(-ENOMEM);
274 kref_init(&handle->ref);
275 rb_init_node(&handle->node);
276 handle->client = client;
277 ion_buffer_get(buffer);
278 handle->buffer = buffer;
279
280 return handle;
281}
282
Laura Abbottb14ed962012-01-30 14:18:08 -0800283static void ion_handle_kmap_put(struct ion_handle *);
284
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700285static void ion_handle_destroy(struct kref *kref)
286{
287 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800288 struct ion_client *client = handle->client;
289 struct ion_buffer *buffer = handle->buffer;
290
Laura Abbottb14ed962012-01-30 14:18:08 -0800291 mutex_lock(&buffer->lock);
292 while (handle->kmap_cnt)
293 ion_handle_kmap_put(handle);
294 mutex_unlock(&buffer->lock);
295
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700296 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800297 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800298
299 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700300 kfree(handle);
301}
302
303struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
304{
305 return handle->buffer;
306}
307
308static void ion_handle_get(struct ion_handle *handle)
309{
310 kref_get(&handle->ref);
311}
312
313static int ion_handle_put(struct ion_handle *handle)
314{
315 return kref_put(&handle->ref, ion_handle_destroy);
316}
317
318static struct ion_handle *ion_handle_lookup(struct ion_client *client,
319 struct ion_buffer *buffer)
320{
321 struct rb_node *n;
322
323 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
324 struct ion_handle *handle = rb_entry(n, struct ion_handle,
325 node);
326 if (handle->buffer == buffer)
327 return handle;
328 }
329 return NULL;
330}
331
332static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
333{
334 struct rb_node *n = client->handles.rb_node;
335
336 while (n) {
337 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
338 node);
339 if (handle < handle_node)
340 n = n->rb_left;
341 else if (handle > handle_node)
342 n = n->rb_right;
343 else
344 return true;
345 }
346 return false;
347}
348
349static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
350{
351 struct rb_node **p = &client->handles.rb_node;
352 struct rb_node *parent = NULL;
353 struct ion_handle *entry;
354
355 while (*p) {
356 parent = *p;
357 entry = rb_entry(parent, struct ion_handle, node);
358
359 if (handle < entry)
360 p = &(*p)->rb_left;
361 else if (handle > entry)
362 p = &(*p)->rb_right;
363 else
364 WARN(1, "%s: buffer already found.", __func__);
365 }
366
367 rb_link_node(&handle->node, parent, p);
368 rb_insert_color(&handle->node, &client->handles);
369}
370
371struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800372 size_t align, unsigned int heap_id_mask,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700373 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700374{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700375 struct ion_handle *handle;
376 struct ion_device *dev = client->dev;
377 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -0800378 struct ion_heap *heap;
Adrian Alexei92538592013-03-27 10:53:43 -0700379 unsigned long secure_allocation = flags & ION_FLAG_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800380 const unsigned int MAX_DBG_STR_LEN = 64;
381 char dbg_str[MAX_DBG_STR_LEN];
382 unsigned int dbg_str_idx = 0;
383
384 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700385
Mitchel Humpherys485650f2013-03-15 16:06:07 -0700386 /*
387 * For now, we don't want to fault in pages individually since
388 * clients are already doing manual cache maintenance. In
389 * other words, the implicit caching infrastructure is in
390 * place (in code) but should not be used.
391 */
392 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
393
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800394 pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
395 len, align, heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700396 /*
397 * traverse the list of heaps available in this system in priority
398 * order. If the heap type is supported by the client, and matches the
399 * request of the caller allocate from it. Repeat until allocate has
400 * succeeded or all heaps have been tried
401 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800402 if (WARN_ON(!len))
403 return ERR_PTR(-EINVAL);
404
405 len = PAGE_ALIGN(len);
406
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700407 down_read(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -0800408 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700409 /* if the client doesn't support this heap type */
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800410 if (!((1 << heap->type) & client->heap_type_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700411 continue;
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800412 /* if the caller didn't specify this heap id */
413 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700414 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800415 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700416 if (secure_allocation &&
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800417 !ion_heap_allow_secure_allocation(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -0800418 continue;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800419 trace_ion_alloc_buffer_start(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800420 heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700421 buffer = ion_buffer_create(heap, dev, len, align, flags);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800422 trace_ion_alloc_buffer_end(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800423 heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700424 if (!IS_ERR_OR_NULL(buffer))
425 break;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800426
427 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800428 heap_id_mask, flags,
429 PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800430 if (dbg_str_idx < MAX_DBG_STR_LEN) {
431 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
432 int ret_value = snprintf(&dbg_str[dbg_str_idx],
433 len_left, "%s ", heap->name);
434 if (ret_value >= len_left) {
435 /* overflow */
436 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
437 dbg_str_idx = MAX_DBG_STR_LEN;
438 } else if (ret_value >= 0) {
439 dbg_str_idx += ret_value;
440 } else {
441 /* error */
442 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
443 }
444 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700445 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700446 up_read(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700447
Liam Markcc2d4bd2013-01-16 10:14:40 -0800448 if (buffer == NULL) {
449 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800450 heap_id_mask, flags, -ENODEV);
Laura Abbottb14ed962012-01-30 14:18:08 -0800451 return ERR_PTR(-ENODEV);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800452 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800453
454 if (IS_ERR(buffer)) {
Liam Markcc2d4bd2013-01-16 10:14:40 -0800455 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800456 heap_id_mask, flags,
457 PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800458 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800459 "0x%x) from heap(s) %sfor client %s\n",
460 len, align, dbg_str, client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700461 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800462 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700463
464 handle = ion_handle_create(client, buffer);
465
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700466 /*
467 * ion_buffer_create will create a buffer with a ref_cnt of 1,
468 * and ion_handle_create will take a second reference, drop one here
469 */
470 ion_buffer_put(buffer);
471
Laura Abbottb14ed962012-01-30 14:18:08 -0800472 if (!IS_ERR(handle)) {
473 mutex_lock(&client->lock);
474 ion_handle_add(client, handle);
475 mutex_unlock(&client->lock);
476 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700477
Laura Abbottb14ed962012-01-30 14:18:08 -0800478
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700479 return handle;
480}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800481EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700482
483void ion_free(struct ion_client *client, struct ion_handle *handle)
484{
485 bool valid_handle;
486
487 BUG_ON(client != handle->client);
488
489 mutex_lock(&client->lock);
490 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700491 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800492 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700493 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700494 return;
495 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800496 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700497 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700498}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800499EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700500
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700501int ion_phys(struct ion_client *client, struct ion_handle *handle,
502 ion_phys_addr_t *addr, size_t *len)
503{
504 struct ion_buffer *buffer;
505 int ret;
506
507 mutex_lock(&client->lock);
508 if (!ion_handle_validate(client, handle)) {
509 mutex_unlock(&client->lock);
510 return -EINVAL;
511 }
512
513 buffer = handle->buffer;
514
515 if (!buffer->heap->ops->phys) {
516 pr_err("%s: ion_phys is not implemented by this heap.\n",
517 __func__);
518 mutex_unlock(&client->lock);
519 return -ENODEV;
520 }
521 mutex_unlock(&client->lock);
522 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
523 return ret;
524}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800525EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700526
Laura Abbottb14ed962012-01-30 14:18:08 -0800527static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700528{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700529 void *vaddr;
530
Laura Abbottb14ed962012-01-30 14:18:08 -0800531 if (buffer->kmap_cnt) {
532 buffer->kmap_cnt++;
533 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700534 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800535 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
536 if (IS_ERR_OR_NULL(vaddr))
537 return vaddr;
538 buffer->vaddr = vaddr;
539 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700540 return vaddr;
541}
Laura Abbottb14ed962012-01-30 14:18:08 -0800542
543static void *ion_handle_kmap_get(struct ion_handle *handle)
544{
545 struct ion_buffer *buffer = handle->buffer;
546 void *vaddr;
547
548 if (handle->kmap_cnt) {
549 handle->kmap_cnt++;
550 return buffer->vaddr;
551 }
552 vaddr = ion_buffer_kmap_get(buffer);
553 if (IS_ERR_OR_NULL(vaddr))
554 return vaddr;
555 handle->kmap_cnt++;
556 return vaddr;
557}
558
559static void ion_buffer_kmap_put(struct ion_buffer *buffer)
560{
561 buffer->kmap_cnt--;
562 if (!buffer->kmap_cnt) {
563 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
564 buffer->vaddr = NULL;
565 }
566}
567
568static void ion_handle_kmap_put(struct ion_handle *handle)
569{
570 struct ion_buffer *buffer = handle->buffer;
571
572 handle->kmap_cnt--;
573 if (!handle->kmap_cnt)
574 ion_buffer_kmap_put(buffer);
575}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700576
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700577void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700578{
579 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800580 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700581
582 mutex_lock(&client->lock);
583 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800584 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700585 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700586 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700587 return ERR_PTR(-EINVAL);
588 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700589
Laura Abbottb14ed962012-01-30 14:18:08 -0800590 buffer = handle->buffer;
591
592 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700593 pr_err("%s: map_kernel is not implemented by this heap.\n",
594 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700595 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700596 return ERR_PTR(-ENODEV);
597 }
Laura Abbott894fd582011-08-19 13:33:56 -0700598
Laura Abbottb14ed962012-01-30 14:18:08 -0800599 mutex_lock(&buffer->lock);
600 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700601 mutex_unlock(&buffer->lock);
602 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800603 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700604}
Olav Hauganbd453a92012-07-05 14:21:34 -0700605EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700606
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700607void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
608{
609 struct ion_buffer *buffer;
610
611 mutex_lock(&client->lock);
612 buffer = handle->buffer;
613 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800614 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700615 mutex_unlock(&buffer->lock);
616 mutex_unlock(&client->lock);
617}
Olav Hauganbd453a92012-07-05 14:21:34 -0700618EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700619
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700620static int ion_debug_client_show(struct seq_file *s, void *unused)
621{
622 struct ion_client *client = s->private;
623 struct rb_node *n;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700624
Olav Haugan854c9e12012-05-16 16:34:28 -0700625 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
626 "heap_name", "size_in_bytes", "handle refcount",
627 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700628
629 mutex_lock(&client->lock);
630 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
631 struct ion_handle *handle = rb_entry(n, struct ion_handle,
632 node);
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800633
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700634 enum ion_heap_type type = handle->buffer->heap->type;
635
Olav Haugan854c9e12012-05-16 16:34:28 -0700636 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700637 handle->buffer->heap->name,
638 handle->buffer->size,
639 atomic_read(&handle->ref.refcount),
640 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700641
642 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
643 type == ION_HEAP_TYPE_CARVEOUT ||
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700644 type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
Laura Abbott1135c9e2013-03-13 15:33:40 -0700645 seq_printf(s, " : %12pa", &handle->buffer->priv_phys);
Olav Haugan854c9e12012-05-16 16:34:28 -0700646 else
647 seq_printf(s, " : %12s", "N/A");
648
Olav Haugan854c9e12012-05-16 16:34:28 -0700649 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700650 }
651 mutex_unlock(&client->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700652 return 0;
653}
654
655static int ion_debug_client_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, ion_debug_client_show, inode->i_private);
658}
659
660static const struct file_operations debug_client_fops = {
661 .open = ion_debug_client_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700667struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800668 unsigned int heap_type_mask,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700669 const char *name)
670{
671 struct ion_client *client;
672 struct task_struct *task;
673 struct rb_node **p;
674 struct rb_node *parent = NULL;
675 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700676 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700677 unsigned int name_len;
678
679 if (!name) {
680 pr_err("%s: Name cannot be null\n", __func__);
681 return ERR_PTR(-EINVAL);
682 }
683 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700684
685 get_task_struct(current->group_leader);
686 task_lock(current->group_leader);
687 pid = task_pid_nr(current->group_leader);
688 /* don't bother to store task struct for kernel threads,
689 they can't be killed anyway */
690 if (current->group_leader->flags & PF_KTHREAD) {
691 put_task_struct(current->group_leader);
692 task = NULL;
693 } else {
694 task = current->group_leader;
695 }
696 task_unlock(current->group_leader);
697
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700698 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
699 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800700 if (task)
701 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700702 return ERR_PTR(-ENOMEM);
703 }
704
705 client->dev = dev;
706 client->handles = RB_ROOT;
707 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800708
Olav Haugan6625c7d12012-01-24 13:50:43 -0800709 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800710 if (!client->name) {
711 put_task_struct(current->group_leader);
712 kfree(client);
713 return ERR_PTR(-ENOMEM);
714 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -0800715 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800716 }
717
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800718 client->heap_type_mask = heap_type_mask;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700719 client->task = task;
720 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700721
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700722 down_write(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800723 p = &dev->clients.rb_node;
724 while (*p) {
725 parent = *p;
726 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700727
Laura Abbottb14ed962012-01-30 14:18:08 -0800728 if (client < entry)
729 p = &(*p)->rb_left;
730 else if (client > entry)
731 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700732 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800733 rb_link_node(&client->node, parent, p);
734 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700735
Laura Abbotteed86032011-12-05 15:32:36 -0800736
737 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700738 dev->debug_root, client,
739 &debug_client_fops);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700740 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700741
742 return client;
743}
Johan Mossberg73080182012-12-10 17:46:16 +0100744EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700745
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800746/**
747 * ion_mark_dangling_buffers_locked() - Mark dangling buffers
748 * @dev: the ion device whose buffers will be searched
749 *
750 * Sets marked=1 for all known buffers associated with `dev' that no
751 * longer have a handle pointing to them. dev->lock should be held
752 * across a call to this function (and should only be unlocked after
753 * checking for marked buffers).
754 */
755static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
756{
757 struct rb_node *n, *n2;
758 /* mark all buffers as 1 */
759 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
760 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
761 node);
762
763 buf->marked = 1;
764 }
765
766 /* now see which buffers we can access */
767 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
768 struct ion_client *client = rb_entry(n, struct ion_client,
769 node);
770
771 mutex_lock(&client->lock);
772 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
773 struct ion_handle *handle
774 = rb_entry(n2, struct ion_handle, node);
775
776 handle->buffer->marked = 0;
777
778 }
779 mutex_unlock(&client->lock);
780
781 }
782}
783
784#ifdef CONFIG_ION_LEAK_CHECK
785static u32 ion_debug_check_leaks_on_destroy;
786
787static int ion_check_for_and_print_leaks(struct ion_device *dev)
788{
789 struct rb_node *n;
790 int num_leaks = 0;
791
792 if (!ion_debug_check_leaks_on_destroy)
793 return 0;
794
795 /* check for leaked buffers (those that no longer have a
796 * handle pointing to them) */
797 ion_mark_dangling_buffers_locked(dev);
798
799 /* Anyone still marked as a 1 means a leaked handle somewhere */
800 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
801 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
802 node);
803
804 if (buf->marked == 1) {
805 pr_info("Leaked ion buffer at %p\n", buf);
806 num_leaks++;
807 }
808 }
809 return num_leaks;
810}
811static void setup_ion_leak_check(struct dentry *debug_root)
812{
813 debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
814 &ion_debug_check_leaks_on_destroy);
815}
816#else
817static int ion_check_for_and_print_leaks(struct ion_device *dev)
818{
819 return 0;
820}
821static void setup_ion_leak_check(struct dentry *debug_root)
822{
823}
824#endif
825
Laura Abbottb14ed962012-01-30 14:18:08 -0800826void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700827{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700828 struct ion_device *dev = client->dev;
829 struct rb_node *n;
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800830 int num_leaks;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700831
832 pr_debug("%s: %d\n", __func__, __LINE__);
833 while ((n = rb_first(&client->handles))) {
834 struct ion_handle *handle = rb_entry(n, struct ion_handle,
835 node);
836 ion_handle_destroy(&handle->ref);
837 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700838 down_write(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800839 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700840 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -0800841 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700842 debugfs_remove_recursive(client->debug_root);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800843
844 num_leaks = ion_check_for_and_print_leaks(dev);
845
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700846 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700847
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800848 if (num_leaks) {
849 struct task_struct *current_task = current;
850 char current_task_name[TASK_COMM_LEN];
851 get_task_comm(current_task_name, current_task);
852 WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
853 __func__, num_leaks, num_leaks == 1 ? "" : "s");
854 pr_info("task name at time of leak: %s, pid: %d\n",
855 current_task_name, current_task->pid);
856 }
857
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800858 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700859 kfree(client);
860}
Olav Hauganbd453a92012-07-05 14:21:34 -0700861EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700862
Laura Abbott273dd8e2011-10-12 14:26:33 -0700863int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
864 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700865{
866 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700867
868 mutex_lock(&client->lock);
869 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -0700870 pr_err("%s: invalid handle passed to %s.\n",
871 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700872 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800873 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700874 }
Laura Abbott273dd8e2011-10-12 14:26:33 -0700875 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700876 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700877 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700878 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700879 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800880
Laura Abbott273dd8e2011-10-12 14:26:33 -0700881 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700882}
Laura Abbott273dd8e2011-10-12 14:26:33 -0700883EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700884
Laura Abbott8c017362011-09-22 20:59:12 -0700885int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
886 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800887{
Laura Abbott8c017362011-09-22 20:59:12 -0700888 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800889
Laura Abbott8c017362011-09-22 20:59:12 -0700890 mutex_lock(&client->lock);
891 if (!ion_handle_validate(client, handle)) {
892 pr_err("%s: invalid handle passed to %s.\n",
893 __func__, __func__);
894 mutex_unlock(&client->lock);
895 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700896 }
Laura Abbott8c017362011-09-22 20:59:12 -0700897 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700898 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700899 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700900 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700901 mutex_unlock(&client->lock);
902
903 return 0;
904}
905EXPORT_SYMBOL(ion_handle_get_size);
906
Laura Abbottb14ed962012-01-30 14:18:08 -0800907struct sg_table *ion_sg_table(struct ion_client *client,
908 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700909{
Laura Abbottb14ed962012-01-30 14:18:08 -0800910 struct ion_buffer *buffer;
911 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700912
Laura Abbottb14ed962012-01-30 14:18:08 -0800913 mutex_lock(&client->lock);
914 if (!ion_handle_validate(client, handle)) {
915 pr_err("%s: invalid handle passed to map_dma.\n",
916 __func__);
917 mutex_unlock(&client->lock);
918 return ERR_PTR(-EINVAL);
919 }
920 buffer = handle->buffer;
921 table = buffer->sg_table;
922 mutex_unlock(&client->lock);
923 return table;
924}
Olav Hauganbd453a92012-07-05 14:21:34 -0700925EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -0800926
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800927struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
928 size_t chunk_size, size_t total_size)
929{
930 struct sg_table *table;
931 int i, n_chunks, ret;
932 struct scatterlist *sg;
933
934 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
935 if (!table)
936 return ERR_PTR(-ENOMEM);
937
938 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
939 pr_debug("creating sg_table with %d chunks\n", n_chunks);
940
941 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
942 if (ret)
943 goto err0;
944
945 for_each_sg(table->sgl, sg, table->nents, i) {
946 dma_addr_t addr = buffer_base + i * chunk_size;
947 sg_dma_address(sg) = addr;
Olav Hauganbbdc30a2013-03-30 06:48:35 -0700948 sg_dma_len(sg) = chunk_size;
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800949 }
950
951 return table;
952err0:
953 kfree(table);
954 return ERR_PTR(ret);
955}
956
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700957static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
958 struct device *dev,
959 enum dma_data_direction direction);
960
Laura Abbottb14ed962012-01-30 14:18:08 -0800961static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
962 enum dma_data_direction direction)
963{
964 struct dma_buf *dmabuf = attachment->dmabuf;
965 struct ion_buffer *buffer = dmabuf->priv;
966
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700967 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbottb14ed962012-01-30 14:18:08 -0800968 return buffer->sg_table;
969}
970
971static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
972 struct sg_table *table,
973 enum dma_data_direction direction)
974{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800975}
976
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700977static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
978{
979 unsigned long pages = buffer->sg_table->nents;
980 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
981
982 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
983 if (!buffer->dirty)
984 return -ENOMEM;
985 return 0;
986}
987
988struct ion_vma_list {
989 struct list_head list;
990 struct vm_area_struct *vma;
991};
992
993static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
994 struct device *dev,
995 enum dma_data_direction dir)
996{
997 struct scatterlist *sg;
998 int i;
999 struct ion_vma_list *vma_list;
1000
1001 pr_debug("%s: syncing for device %s\n", __func__,
1002 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001003
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001004 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001005 return;
1006
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001007 mutex_lock(&buffer->lock);
1008 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1009 if (!test_bit(i, buffer->dirty))
1010 continue;
1011 dma_sync_sg_for_device(dev, sg, 1, dir);
1012 clear_bit(i, buffer->dirty);
1013 }
1014 list_for_each_entry(vma_list, &buffer->vmas, list) {
1015 struct vm_area_struct *vma = vma_list->vma;
1016
1017 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1018 NULL);
1019 }
1020 mutex_unlock(&buffer->lock);
1021}
1022
1023int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001024{
Laura Abbottb14ed962012-01-30 14:18:08 -08001025 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001026 struct scatterlist *sg;
1027 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001028
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001029 mutex_lock(&buffer->lock);
1030 set_bit(vmf->pgoff, buffer->dirty);
1031
1032 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1033 if (i != vmf->pgoff)
1034 continue;
1035 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
1036 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
1037 sg_page(sg));
1038 break;
1039 }
1040 mutex_unlock(&buffer->lock);
1041 return VM_FAULT_NOPAGE;
1042}
1043
1044static void ion_vm_open(struct vm_area_struct *vma)
1045{
1046 struct ion_buffer *buffer = vma->vm_private_data;
1047 struct ion_vma_list *vma_list;
1048
1049 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1050 if (!vma_list)
1051 return;
1052 vma_list->vma = vma;
1053 mutex_lock(&buffer->lock);
1054 list_add(&vma_list->list, &buffer->vmas);
1055 mutex_unlock(&buffer->lock);
1056 pr_debug("%s: adding %p\n", __func__, vma);
1057}
1058
1059static void ion_vm_close(struct vm_area_struct *vma)
1060{
1061 struct ion_buffer *buffer = vma->vm_private_data;
1062 struct ion_vma_list *vma_list, *tmp;
1063
1064 pr_debug("%s\n", __func__);
1065 mutex_lock(&buffer->lock);
1066 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1067 if (vma_list->vma != vma)
1068 continue;
1069 list_del(&vma_list->list);
1070 kfree(vma_list);
1071 pr_debug("%s: deleting %p\n", __func__, vma);
1072 break;
1073 }
1074 mutex_unlock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001075
Laura Abbotta6835092011-11-14 15:27:02 -08001076 if (buffer->heap->ops->unmap_user)
1077 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001078}
1079
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001080struct vm_operations_struct ion_vma_ops = {
1081 .open = ion_vm_open,
1082 .close = ion_vm_close,
1083 .fault = ion_vm_fault,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001084};
1085
Laura Abbottb14ed962012-01-30 14:18:08 -08001086static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001087{
Laura Abbottb14ed962012-01-30 14:18:08 -08001088 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001089 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001090
Laura Abbottb14ed962012-01-30 14:18:08 -08001091 if (!buffer->heap->ops->map_user) {
1092 pr_err("%s: this heap does not define a method for mapping "
1093 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001094 return -EINVAL;
1095 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001096
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001097 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001098 vma->vm_private_data = buffer;
1099 vma->vm_ops = &ion_vma_ops;
Mitchel Humpherys7ce0fe42013-01-10 11:30:26 -08001100 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001101 ion_vm_open(vma);
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001102 return 0;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001103 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001104
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001105 if (!(buffer->flags & ION_FLAG_CACHED))
1106 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1107
1108 mutex_lock(&buffer->lock);
1109 /* now map it to userspace */
1110 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1111 mutex_unlock(&buffer->lock);
1112
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001113 if (ret)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001114 pr_err("%s: failure mapping buffer to userspace\n",
1115 __func__);
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001116
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001117 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001118}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001119
Laura Abbottb14ed962012-01-30 14:18:08 -08001120static void ion_dma_buf_release(struct dma_buf *dmabuf)
1121{
1122 struct ion_buffer *buffer = dmabuf->priv;
1123 ion_buffer_put(buffer);
1124}
1125
1126static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1127{
1128 struct ion_buffer *buffer = dmabuf->priv;
1129 return buffer->vaddr + offset;
1130}
1131
1132static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1133 void *ptr)
1134{
1135 return;
1136}
1137
1138static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1139 size_t len,
1140 enum dma_data_direction direction)
1141{
1142 struct ion_buffer *buffer = dmabuf->priv;
1143 void *vaddr;
1144
1145 if (!buffer->heap->ops->map_kernel) {
1146 pr_err("%s: map kernel is not implemented by this heap.\n",
1147 __func__);
1148 return -ENODEV;
1149 }
1150
1151 mutex_lock(&buffer->lock);
1152 vaddr = ion_buffer_kmap_get(buffer);
1153 mutex_unlock(&buffer->lock);
1154 if (IS_ERR(vaddr))
1155 return PTR_ERR(vaddr);
1156 if (!vaddr)
1157 return -ENOMEM;
1158 return 0;
1159}
1160
1161static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1162 size_t len,
1163 enum dma_data_direction direction)
1164{
1165 struct ion_buffer *buffer = dmabuf->priv;
1166
1167 mutex_lock(&buffer->lock);
1168 ion_buffer_kmap_put(buffer);
1169 mutex_unlock(&buffer->lock);
1170}
1171
1172struct dma_buf_ops dma_buf_ops = {
1173 .map_dma_buf = ion_map_dma_buf,
1174 .unmap_dma_buf = ion_unmap_dma_buf,
1175 .mmap = ion_mmap,
1176 .release = ion_dma_buf_release,
1177 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1178 .end_cpu_access = ion_dma_buf_end_cpu_access,
1179 .kmap_atomic = ion_dma_buf_kmap,
1180 .kunmap_atomic = ion_dma_buf_kunmap,
1181 .kmap = ion_dma_buf_kmap,
1182 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001183};
1184
Laura Abbottb14ed962012-01-30 14:18:08 -08001185int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1186{
1187 struct ion_buffer *buffer;
1188 struct dma_buf *dmabuf;
1189 bool valid_handle;
1190 int fd;
1191
1192 mutex_lock(&client->lock);
1193 valid_handle = ion_handle_validate(client, handle);
1194 mutex_unlock(&client->lock);
1195 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001196 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001197 return -EINVAL;
1198 }
1199
1200 buffer = handle->buffer;
1201 ion_buffer_get(buffer);
1202 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1203 if (IS_ERR(dmabuf)) {
1204 ion_buffer_put(buffer);
1205 return PTR_ERR(dmabuf);
1206 }
1207 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001208 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001209 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001210
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001211 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001212}
Olav Hauganbd453a92012-07-05 14:21:34 -07001213EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001214
Laura Abbottb14ed962012-01-30 14:18:08 -08001215struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1216{
1217 struct dma_buf *dmabuf;
1218 struct ion_buffer *buffer;
1219 struct ion_handle *handle;
1220
1221 dmabuf = dma_buf_get(fd);
1222 if (IS_ERR_OR_NULL(dmabuf))
1223 return ERR_PTR(PTR_ERR(dmabuf));
1224 /* if this memory came from ion */
1225
1226 if (dmabuf->ops != &dma_buf_ops) {
1227 pr_err("%s: can not import dmabuf from another exporter\n",
1228 __func__);
1229 dma_buf_put(dmabuf);
1230 return ERR_PTR(-EINVAL);
1231 }
1232 buffer = dmabuf->priv;
1233
1234 mutex_lock(&client->lock);
1235 /* if a handle exists for this buffer just take a reference to it */
1236 handle = ion_handle_lookup(client, buffer);
1237 if (!IS_ERR_OR_NULL(handle)) {
1238 ion_handle_get(handle);
1239 goto end;
1240 }
1241 handle = ion_handle_create(client, buffer);
1242 if (IS_ERR_OR_NULL(handle))
1243 goto end;
1244 ion_handle_add(client, handle);
1245end:
1246 mutex_unlock(&client->lock);
1247 dma_buf_put(dmabuf);
1248 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001249}
Olav Hauganbd453a92012-07-05 14:21:34 -07001250EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001251
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001252static int ion_sync_for_device(struct ion_client *client, int fd)
1253{
1254 struct dma_buf *dmabuf;
1255 struct ion_buffer *buffer;
1256
1257 dmabuf = dma_buf_get(fd);
1258 if (IS_ERR_OR_NULL(dmabuf))
1259 return PTR_ERR(dmabuf);
1260
1261 /* if this memory came from ion */
1262 if (dmabuf->ops != &dma_buf_ops) {
1263 pr_err("%s: can not sync dmabuf from another exporter\n",
1264 __func__);
1265 dma_buf_put(dmabuf);
1266 return -EINVAL;
1267 }
1268 buffer = dmabuf->priv;
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001269
1270 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1271 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001272 dma_buf_put(dmabuf);
1273 return 0;
1274}
1275
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001276static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1277{
1278 struct ion_client *client = filp->private_data;
1279
1280 switch (cmd) {
1281 case ION_IOC_ALLOC:
1282 {
1283 struct ion_allocation_data data;
1284
1285 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1286 return -EFAULT;
1287 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh7d72bad2012-08-29 18:39:44 -07001288 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001289
Laura Abbottb14ed962012-01-30 14:18:08 -08001290 if (IS_ERR(data.handle))
1291 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001292
Laura Abbottb14ed962012-01-30 14:18:08 -08001293 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1294 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001295 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001296 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001297 break;
1298 }
1299 case ION_IOC_FREE:
1300 {
1301 struct ion_handle_data data;
1302 bool valid;
1303
1304 if (copy_from_user(&data, (void __user *)arg,
1305 sizeof(struct ion_handle_data)))
1306 return -EFAULT;
1307 mutex_lock(&client->lock);
1308 valid = ion_handle_validate(client, data.handle);
1309 mutex_unlock(&client->lock);
1310 if (!valid)
1311 return -EINVAL;
1312 ion_free(client, data.handle);
1313 break;
1314 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001315 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001316 case ION_IOC_SHARE:
1317 {
1318 struct ion_fd_data data;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001319 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1320 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001321
Laura Abbottb14ed962012-01-30 14:18:08 -08001322 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001323 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1324 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001325 if (data.fd < 0)
1326 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001327 break;
1328 }
1329 case ION_IOC_IMPORT:
1330 {
1331 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001332 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001333 if (copy_from_user(&data, (void __user *)arg,
1334 sizeof(struct ion_fd_data)))
1335 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001336 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan865e97f2012-05-15 14:40:11 -07001337 if (IS_ERR(data.handle)) {
1338 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001339 data.handle = NULL;
Olav Haugan865e97f2012-05-15 14:40:11 -07001340 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001341 if (copy_to_user((void __user *)arg, &data,
1342 sizeof(struct ion_fd_data)))
1343 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001344 if (ret < 0)
1345 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001346 break;
1347 }
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001348 case ION_IOC_SYNC:
1349 {
1350 struct ion_fd_data data;
1351 if (copy_from_user(&data, (void __user *)arg,
1352 sizeof(struct ion_fd_data)))
1353 return -EFAULT;
1354 ion_sync_for_device(client, data.fd);
1355 break;
1356 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001357 case ION_IOC_CUSTOM:
1358 {
1359 struct ion_device *dev = client->dev;
1360 struct ion_custom_data data;
1361
1362 if (!dev->custom_ioctl)
1363 return -ENOTTY;
1364 if (copy_from_user(&data, (void __user *)arg,
1365 sizeof(struct ion_custom_data)))
1366 return -EFAULT;
1367 return dev->custom_ioctl(client, data.cmd, data.arg);
1368 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001369 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001370 return client->dev->custom_ioctl(client,
1371 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001372 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001373 return client->dev->custom_ioctl(client,
1374 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001375 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001376 return client->dev->custom_ioctl(client,
1377 ION_IOC_CLEAN_INV_CACHES, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001378 default:
1379 return -ENOTTY;
1380 }
1381 return 0;
1382}
1383
1384static int ion_release(struct inode *inode, struct file *file)
1385{
1386 struct ion_client *client = file->private_data;
1387
1388 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001389 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001390 return 0;
1391}
1392
1393static int ion_open(struct inode *inode, struct file *file)
1394{
1395 struct miscdevice *miscdev = file->private_data;
1396 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1397 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001398 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001399
1400 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001401 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1402 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001403 if (IS_ERR_OR_NULL(client))
1404 return PTR_ERR(client);
1405 file->private_data = client;
1406
1407 return 0;
1408}
1409
1410static const struct file_operations ion_fops = {
1411 .owner = THIS_MODULE,
1412 .open = ion_open,
1413 .release = ion_release,
1414 .unlocked_ioctl = ion_ioctl,
1415};
1416
1417static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001418 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001419{
1420 size_t size = 0;
1421 struct rb_node *n;
1422
1423 mutex_lock(&client->lock);
1424 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1425 struct ion_handle *handle = rb_entry(n,
1426 struct ion_handle,
1427 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001428 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001429 size += handle->buffer->size;
1430 }
1431 mutex_unlock(&client->lock);
1432 return size;
1433}
1434
Olav Haugan0671b9a2012-05-25 11:58:56 -07001435/**
1436 * Searches through a clients handles to find if the buffer is owned
1437 * by this client. Used for debug output.
1438 * @param client pointer to candidate owner of buffer
1439 * @param buf pointer to buffer that we are trying to find the owner of
1440 * @return 1 if found, 0 otherwise
1441 */
1442static int ion_debug_find_buffer_owner(const struct ion_client *client,
1443 const struct ion_buffer *buf)
1444{
1445 struct rb_node *n;
1446
1447 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1448 const struct ion_handle *handle = rb_entry(n,
1449 const struct ion_handle,
1450 node);
1451 if (handle->buffer == buf)
1452 return 1;
1453 }
1454 return 0;
1455}
1456
1457/**
1458 * Adds mem_map_data pointer to the tree of mem_map
1459 * Used for debug output.
1460 * @param mem_map The mem_map tree
1461 * @param data The new data to add to the tree
1462 */
1463static void ion_debug_mem_map_add(struct rb_root *mem_map,
1464 struct mem_map_data *data)
1465{
1466 struct rb_node **p = &mem_map->rb_node;
1467 struct rb_node *parent = NULL;
1468 struct mem_map_data *entry;
1469
1470 while (*p) {
1471 parent = *p;
1472 entry = rb_entry(parent, struct mem_map_data, node);
1473
1474 if (data->addr < entry->addr) {
1475 p = &(*p)->rb_left;
1476 } else if (data->addr > entry->addr) {
1477 p = &(*p)->rb_right;
1478 } else {
1479 pr_err("%s: mem_map_data already found.", __func__);
1480 BUG();
1481 }
1482 }
1483 rb_link_node(&data->node, parent, p);
1484 rb_insert_color(&data->node, mem_map);
1485}
1486
1487/**
1488 * Search for an owner of a buffer by iterating over all ION clients.
1489 * @param dev ion device containing pointers to all the clients.
1490 * @param buffer pointer to buffer we are trying to find the owner of.
1491 * @return name of owner.
1492 */
1493const char *ion_debug_locate_owner(const struct ion_device *dev,
1494 const struct ion_buffer *buffer)
1495{
1496 struct rb_node *j;
1497 const char *client_name = NULL;
1498
Laura Abbottb14ed962012-01-30 14:18:08 -08001499 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001500 j = rb_next(j)) {
1501 struct ion_client *client = rb_entry(j, struct ion_client,
1502 node);
1503 if (ion_debug_find_buffer_owner(client, buffer))
1504 client_name = client->name;
1505 }
1506 return client_name;
1507}
1508
1509/**
1510 * Create a mem_map of the heap.
1511 * @param s seq_file to log error message to.
1512 * @param heap The heap to create mem_map for.
1513 * @param mem_map The mem map to be created.
1514 */
1515void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1516 struct rb_root *mem_map)
1517{
1518 struct ion_device *dev = heap->dev;
1519 struct rb_node *n;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301520 size_t size;
1521
1522 if (!heap->ops->phys)
1523 return;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001524
1525 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1526 struct ion_buffer *buffer =
1527 rb_entry(n, struct ion_buffer, node);
1528 if (buffer->heap->id == heap->id) {
1529 struct mem_map_data *data =
1530 kzalloc(sizeof(*data), GFP_KERNEL);
1531 if (!data) {
1532 seq_printf(s, "ERROR: out of memory. "
1533 "Part of memory map will not be logged\n");
1534 break;
1535 }
Chintan Pandyadaf75622013-01-29 19:40:01 +05301536
1537 buffer->heap->ops->phys(buffer->heap, buffer,
1538 &(data->addr), &size);
1539 data->size = (unsigned long) size;
1540 data->addr_end = data->addr + data->size - 1;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001541 data->client_name = ion_debug_locate_owner(dev, buffer);
1542 ion_debug_mem_map_add(mem_map, data);
1543 }
1544 }
1545}
1546
1547/**
1548 * Free the memory allocated by ion_debug_mem_map_create
1549 * @param mem_map The mem map to free.
1550 */
1551static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1552{
1553 if (mem_map) {
1554 struct rb_node *n;
1555 while ((n = rb_first(mem_map)) != 0) {
1556 struct mem_map_data *data =
1557 rb_entry(n, struct mem_map_data, node);
1558 rb_erase(&data->node, mem_map);
1559 kfree(data);
1560 }
1561 }
1562}
1563
1564/**
1565 * Print heap debug information.
1566 * @param s seq_file to log message to.
1567 * @param heap pointer to heap that we will print debug information for.
1568 */
1569static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1570{
1571 if (heap->ops->print_debug) {
1572 struct rb_root mem_map = RB_ROOT;
1573 ion_debug_mem_map_create(s, heap, &mem_map);
1574 heap->ops->print_debug(heap, s, &mem_map);
1575 ion_debug_mem_map_destroy(&mem_map);
1576 }
1577}
1578
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001579static int ion_debug_heap_show(struct seq_file *s, void *unused)
1580{
1581 struct ion_heap *heap = s->private;
1582 struct ion_device *dev = heap->dev;
1583 struct rb_node *n;
1584
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001585 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001586 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001587
Laura Abbottb14ed962012-01-30 14:18:08 -08001588 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001589 struct ion_client *client = rb_entry(n, struct ion_client,
1590 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001591 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001592 if (!size)
1593 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001594 if (client->task) {
1595 char task_comm[TASK_COMM_LEN];
1596
1597 get_task_comm(task_comm, client->task);
1598 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1599 client->pid, size);
1600 } else {
1601 seq_printf(s, "%16.s %16u %16u\n", client->name,
1602 client->pid, size);
1603 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001604 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001605 ion_heap_print_debug(s, heap);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001606 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001607 return 0;
1608}
1609
1610static int ion_debug_heap_open(struct inode *inode, struct file *file)
1611{
1612 return single_open(file, ion_debug_heap_show, inode->i_private);
1613}
1614
1615static const struct file_operations debug_heap_fops = {
1616 .open = ion_debug_heap_open,
1617 .read = seq_read,
1618 .llseek = seq_lseek,
1619 .release = single_release,
1620};
1621
1622void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1623{
Laura Abbottb14ed962012-01-30 14:18:08 -08001624 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1625 !heap->ops->unmap_dma)
1626 pr_err("%s: can not add heap with invalid ops struct.\n",
1627 __func__);
1628
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001629 heap->dev = dev;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001630 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001631 /* use negative heap->id to reverse the priority -- when traversing
1632 the list later attempt higher id numbers first */
1633 plist_node_init(&heap->node, -heap->id);
1634 plist_add(&heap->node, &dev->heaps);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001635 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1636 &debug_heap_fops);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001637 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001638}
1639
Laura Abbott93619302012-10-11 11:51:40 -07001640int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
1641 int version, void *data, int flags)
1642{
1643 int ret = -EINVAL;
1644 struct ion_heap *heap;
1645 struct ion_buffer *buffer;
1646
1647 mutex_lock(&client->lock);
1648 if (!ion_handle_validate(client, handle)) {
1649 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1650 goto out_unlock;
1651 }
1652
1653 buffer = handle->buffer;
1654 heap = buffer->heap;
1655
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001656 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001657 pr_err("%s: cannot secure buffer from non secure heap\n",
1658 __func__);
1659 goto out_unlock;
1660 }
1661
1662 BUG_ON(!buffer->heap->ops->secure_buffer);
1663 /*
1664 * Protect the handle via the client lock to ensure we aren't
1665 * racing with free
1666 */
1667 ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
1668
1669out_unlock:
1670 mutex_unlock(&client->lock);
1671 return ret;
1672}
1673
1674int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
1675{
1676 int ret = -EINVAL;
1677 struct ion_heap *heap;
1678 struct ion_buffer *buffer;
1679
1680 mutex_lock(&client->lock);
1681 if (!ion_handle_validate(client, handle)) {
1682 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1683 goto out_unlock;
1684 }
1685
1686 buffer = handle->buffer;
1687 heap = buffer->heap;
1688
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001689 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001690 pr_err("%s: cannot secure buffer from non secure heap\n",
1691 __func__);
1692 goto out_unlock;
1693 }
1694
1695 BUG_ON(!buffer->heap->ops->unsecure_buffer);
1696 /*
1697 * Protect the handle via the client lock to ensure we aren't
1698 * racing with free
1699 */
1700 ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
1701
1702out_unlock:
1703 mutex_unlock(&client->lock);
1704 return ret;
1705}
1706
Laura Abbott7e446482012-06-13 15:59:39 -07001707int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1708 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001709{
Olav Haugan0a852512012-01-09 10:20:55 -08001710 int ret_val = 0;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001711 struct ion_heap *heap;
Olav Haugan0a852512012-01-09 10:20:55 -08001712
1713 /*
1714 * traverse the list of heaps available in this system
1715 * and find the heap that is specified.
1716 */
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001717 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001718 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001719 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001720 continue;
1721 if (ION_HEAP(heap->id) != heap_id)
1722 continue;
1723 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001724 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001725 else
1726 ret_val = -EINVAL;
1727 break;
1728 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001729 up_write(&dev->lock);
Olav Haugan0a852512012-01-09 10:20:55 -08001730 return ret_val;
1731}
Olav Hauganbd453a92012-07-05 14:21:34 -07001732EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001733
Laura Abbott7e446482012-06-13 15:59:39 -07001734int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1735 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001736{
Olav Haugan0a852512012-01-09 10:20:55 -08001737 int ret_val = 0;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001738 struct ion_heap *heap;
Olav Haugan0a852512012-01-09 10:20:55 -08001739
1740 /*
1741 * traverse the list of heaps available in this system
1742 * and find the heap that is specified.
1743 */
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001744 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001745 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001746 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001747 continue;
1748 if (ION_HEAP(heap->id) != heap_id)
1749 continue;
1750 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001751 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001752 else
1753 ret_val = -EINVAL;
1754 break;
1755 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001756 up_write(&dev->lock);
Olav Haugan0a852512012-01-09 10:20:55 -08001757 return ret_val;
1758}
Olav Hauganbd453a92012-07-05 14:21:34 -07001759EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001760
Laura Abbott404f8242011-10-31 14:22:53 -07001761static int ion_debug_leak_show(struct seq_file *s, void *unused)
1762{
1763 struct ion_device *dev = s->private;
1764 struct rb_node *n;
Laura Abbott404f8242011-10-31 14:22:53 -07001765
Laura Abbott404f8242011-10-31 14:22:53 -07001766 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1767 "ref cnt");
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001768
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001769 ion_mark_dangling_buffers_locked(dev);
Laura Abbott404f8242011-10-31 14:22:53 -07001770
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001771 down_write(&dev->lock);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001772 /* Anyone still marked as a 1 means a leaked handle somewhere */
Laura Abbott404f8242011-10-31 14:22:53 -07001773 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1774 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1775 node);
1776
1777 if (buf->marked == 1)
1778 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1779 (int)buf, buf->heap->name, buf->size,
1780 atomic_read(&buf->ref.refcount));
1781 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001782 up_write(&dev->lock);
Laura Abbott404f8242011-10-31 14:22:53 -07001783 return 0;
1784}
1785
1786static int ion_debug_leak_open(struct inode *inode, struct file *file)
1787{
1788 return single_open(file, ion_debug_leak_show, inode->i_private);
1789}
1790
1791static const struct file_operations debug_leak_fops = {
1792 .open = ion_debug_leak_open,
1793 .read = seq_read,
1794 .llseek = seq_lseek,
1795 .release = single_release,
1796};
1797
1798
1799
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001800struct ion_device *ion_device_create(long (*custom_ioctl)
1801 (struct ion_client *client,
1802 unsigned int cmd,
1803 unsigned long arg))
1804{
1805 struct ion_device *idev;
1806 int ret;
1807
1808 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1809 if (!idev)
1810 return ERR_PTR(-ENOMEM);
1811
1812 idev->dev.minor = MISC_DYNAMIC_MINOR;
1813 idev->dev.name = "ion";
1814 idev->dev.fops = &ion_fops;
1815 idev->dev.parent = NULL;
1816 ret = misc_register(&idev->dev);
1817 if (ret) {
1818 pr_err("ion: failed to register misc device.\n");
1819 return ERR_PTR(ret);
1820 }
1821
1822 idev->debug_root = debugfs_create_dir("ion", NULL);
1823 if (IS_ERR_OR_NULL(idev->debug_root))
1824 pr_err("ion: failed to create debug files.\n");
1825
1826 idev->custom_ioctl = custom_ioctl;
1827 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001828 mutex_init(&idev->buffer_lock);
1829 init_rwsem(&idev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001830 plist_head_init(&idev->heaps);
Laura Abbottb14ed962012-01-30 14:18:08 -08001831 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001832 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1833 &debug_leak_fops);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001834
1835 setup_ion_leak_check(idev->debug_root);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001836 return idev;
1837}
1838
1839void ion_device_destroy(struct ion_device *dev)
1840{
1841 misc_deregister(&dev->dev);
1842 /* XXX need to free the heaps and clients ? */
1843 kfree(dev);
1844}
Laura Abbottb14ed962012-01-30 14:18:08 -08001845
1846void __init ion_reserve(struct ion_platform_data *data)
1847{
Rebecca Schultz Zavinfbce1512012-11-15 10:31:02 -08001848 int i;
Laura Abbottb14ed962012-01-30 14:18:08 -08001849
1850 for (i = 0; i < data->nr; i++) {
1851 if (data->heaps[i].size == 0)
1852 continue;
Rebecca Schultz Zavinfbce1512012-11-15 10:31:02 -08001853
1854 if (data->heaps[i].base == 0) {
1855 phys_addr_t paddr;
1856 paddr = memblock_alloc_base(data->heaps[i].size,
1857 data->heaps[i].align,
1858 MEMBLOCK_ALLOC_ANYWHERE);
1859 if (!paddr) {
1860 pr_err("%s: error allocating memblock for "
1861 "heap %d\n",
1862 __func__, i);
1863 continue;
1864 }
1865 data->heaps[i].base = paddr;
1866 } else {
1867 int ret = memblock_reserve(data->heaps[i].base,
1868 data->heaps[i].size);
1869 if (ret)
1870 pr_err("memblock reserve of %x@%pa failed\n",
1871 data->heaps[i].size,
1872 &data->heaps[i].base);
1873 }
1874 pr_info("%s: %s reserved base %pa size %d\n", __func__,
1875 data->heaps[i].name,
1876 &data->heaps[i].base,
1877 data->heaps[i].size);
Laura Abbottb14ed962012-01-30 14:18:08 -08001878 }
1879}