blob: f450533a0a7afd5ef92ea50135a2ad7ee24039b7 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -08002
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07003 * drivers/gpu/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
Liam Markcc2d4bd2013-01-16 10:14:40 -08006 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07007 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070020#include <linux/device.h>
21#include <linux/file.h>
22#include <linux/fs.h>
23#include <linux/anon_inodes.h>
24#include <linux/ion.h>
25#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080026#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070028#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
35#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080036#include <linux/dma-buf.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070037#include <linux/msm_ion.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080038#include <trace/events/kmem.h>
39
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070040
Laura Abbott8c017362011-09-22 20:59:12 -070041#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070042#include "ion_priv.h"
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070043
44/**
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -070047 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070050 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
52 */
53struct ion_device {
54 struct miscdevice dev;
55 struct rb_root buffers;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -070056 struct mutex buffer_lock;
57 struct rw_semaphore lock;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -080058 struct plist_head heaps;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070059 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
60 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080061 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070062 struct dentry *debug_root;
63};
64
65/**
66 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070067 * @node: node in the tree of all clients
68 * @dev: backpointer to ion device
69 * @handles: an rb tree of all the handles in this client
70 * @lock: lock protecting the tree of handles
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -080071 * @heap_type_mask: mask of all supported heap types
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070072 * @name: used for debugging
73 * @task: used for debugging
74 *
75 * A client represents a list of buffers this client may access.
76 * The mutex stored here is used to protect both handles tree
77 * as well as the handles themselves, and should be held while modifying either.
78 */
79struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070080 struct rb_node node;
81 struct ion_device *dev;
82 struct rb_root handles;
83 struct mutex lock;
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -080084 unsigned int heap_type_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080085 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070086 struct task_struct *task;
87 pid_t pid;
88 struct dentry *debug_root;
89};
90
91/**
92 * ion_handle - a client local reference to a buffer
93 * @ref: reference count
94 * @client: back pointer to the client the buffer resides in
95 * @buffer: pointer to the buffer
96 * @node: node in the client's handle rbtree
97 * @kmap_cnt: count of times this client has mapped to kernel
98 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070099 *
100 * Modifications to node, map_cnt or mapping should be protected by the
101 * lock in the client. Other fields are never changed after initialization.
102 */
103struct ion_handle {
104 struct kref ref;
105 struct ion_client *client;
106 struct ion_buffer *buffer;
107 struct rb_node node;
108 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700109 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700110};
111
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700112bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
113{
114 return ((buffer->flags & ION_FLAG_CACHED) &&
115 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
116}
117
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700118bool ion_buffer_cached(struct ion_buffer *buffer)
119{
120 return !!(buffer->flags & ION_FLAG_CACHED);
121}
122
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700123/* this function should only be called while dev->lock is held */
124static void ion_buffer_add(struct ion_device *dev,
125 struct ion_buffer *buffer)
126{
127 struct rb_node **p = &dev->buffers.rb_node;
128 struct rb_node *parent = NULL;
129 struct ion_buffer *entry;
130
131 while (*p) {
132 parent = *p;
133 entry = rb_entry(parent, struct ion_buffer, node);
134
135 if (buffer < entry) {
136 p = &(*p)->rb_left;
137 } else if (buffer > entry) {
138 p = &(*p)->rb_right;
139 } else {
140 pr_err("%s: buffer already found.", __func__);
141 BUG();
142 }
143 }
144
145 rb_link_node(&buffer->node, parent, p);
146 rb_insert_color(&buffer->node, &dev->buffers);
147}
148
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700149static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
150
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700151/* this function should only be called while dev->lock is held */
152static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
153 struct ion_device *dev,
154 unsigned long len,
155 unsigned long align,
156 unsigned long flags)
157{
158 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800159 struct sg_table *table;
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700160 struct scatterlist *sg;
161 int i, ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700162
163 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
164 if (!buffer)
165 return ERR_PTR(-ENOMEM);
166
167 buffer->heap = heap;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700168 buffer->flags = flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700169 kref_init(&buffer->ref);
170
171 ret = heap->ops->allocate(heap, buffer, len, align, flags);
172 if (ret) {
173 kfree(buffer);
174 return ERR_PTR(ret);
175 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800176
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700177 buffer->dev = dev;
178 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800179
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700180 table = heap->ops->map_dma(heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800181 if (IS_ERR_OR_NULL(table)) {
182 heap->ops->free(buffer);
183 kfree(buffer);
184 return ERR_PTR(PTR_ERR(table));
185 }
186 buffer->sg_table = table;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700187 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700188 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
189 i) {
190 if (sg_dma_len(sg) == PAGE_SIZE)
191 continue;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700192 pr_err("%s: cached mappings that will be faulted in "
193 "must have pagewise sg_lists\n", __func__);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700194 ret = -EINVAL;
195 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700196 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800197
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700198 ret = ion_buffer_alloc_dirty(buffer);
199 if (ret)
200 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700201 }
202
203 buffer->dev = dev;
204 buffer->size = len;
205 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700206 mutex_init(&buffer->lock);
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700207 /* this will set up dma addresses for the sglist -- it is not
208 technically correct as per the dma api -- a specific
209 device isn't really taking ownership here. However, in practice on
210 our systems the only dma_address space is physical addresses.
211 Additionally, we can't afford the overhead of invalidating every
212 allocation via dma_map_sg. The implicit contract here is that
213 memory comming from the heaps is ready for dma, ie if it has a
214 cached mapping that mapping has been invalidated */
Laura Abbott6c13b9822013-03-29 18:29:03 -0700215 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
216 if (sg_dma_address(sg) == 0)
217 sg_dma_address(sg) = sg_phys(sg);
218 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700219 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700220 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700221 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700222 return buffer;
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700223
224err:
225 heap->ops->unmap_dma(heap, buffer);
226 heap->ops->free(buffer);
227 kfree(buffer);
228 return ERR_PTR(ret);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700229}
230
Laura Abbott93619302012-10-11 11:51:40 -0700231static void ion_delayed_unsecure(struct ion_buffer *buffer)
232{
233 if (buffer->heap->ops->unsecure_buffer)
234 buffer->heap->ops->unsecure_buffer(buffer, 1);
235}
236
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700237static void ion_buffer_destroy(struct kref *kref)
238{
239 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
240 struct ion_device *dev = buffer->dev;
241
Laura Abbottb14ed962012-01-30 14:18:08 -0800242 if (WARN_ON(buffer->kmap_cnt > 0))
243 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800244 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
245
Laura Abbott93619302012-10-11 11:51:40 -0700246 ion_delayed_unsecure(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700247 buffer->heap->ops->free(buffer);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700248 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700249 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700250 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700251 if (buffer->flags & ION_FLAG_CACHED)
252 kfree(buffer->dirty);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700253 kfree(buffer);
254}
255
256static void ion_buffer_get(struct ion_buffer *buffer)
257{
258 kref_get(&buffer->ref);
259}
260
261static int ion_buffer_put(struct ion_buffer *buffer)
262{
263 return kref_put(&buffer->ref, ion_buffer_destroy);
264}
265
266static struct ion_handle *ion_handle_create(struct ion_client *client,
267 struct ion_buffer *buffer)
268{
269 struct ion_handle *handle;
270
271 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
272 if (!handle)
273 return ERR_PTR(-ENOMEM);
274 kref_init(&handle->ref);
275 rb_init_node(&handle->node);
276 handle->client = client;
277 ion_buffer_get(buffer);
278 handle->buffer = buffer;
279
280 return handle;
281}
282
Laura Abbottb14ed962012-01-30 14:18:08 -0800283static void ion_handle_kmap_put(struct ion_handle *);
284
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700285static void ion_handle_destroy(struct kref *kref)
286{
287 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800288 struct ion_client *client = handle->client;
289 struct ion_buffer *buffer = handle->buffer;
290
Laura Abbottb14ed962012-01-30 14:18:08 -0800291 mutex_lock(&buffer->lock);
292 while (handle->kmap_cnt)
293 ion_handle_kmap_put(handle);
294 mutex_unlock(&buffer->lock);
295
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700296 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800297 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800298
299 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700300 kfree(handle);
301}
302
303struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
304{
305 return handle->buffer;
306}
307
308static void ion_handle_get(struct ion_handle *handle)
309{
310 kref_get(&handle->ref);
311}
312
313static int ion_handle_put(struct ion_handle *handle)
314{
315 return kref_put(&handle->ref, ion_handle_destroy);
316}
317
318static struct ion_handle *ion_handle_lookup(struct ion_client *client,
319 struct ion_buffer *buffer)
320{
321 struct rb_node *n;
322
323 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
324 struct ion_handle *handle = rb_entry(n, struct ion_handle,
325 node);
326 if (handle->buffer == buffer)
327 return handle;
328 }
329 return NULL;
330}
331
332static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
333{
334 struct rb_node *n = client->handles.rb_node;
335
336 while (n) {
337 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
338 node);
339 if (handle < handle_node)
340 n = n->rb_left;
341 else if (handle > handle_node)
342 n = n->rb_right;
343 else
344 return true;
345 }
346 return false;
347}
348
349static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
350{
351 struct rb_node **p = &client->handles.rb_node;
352 struct rb_node *parent = NULL;
353 struct ion_handle *entry;
354
355 while (*p) {
356 parent = *p;
357 entry = rb_entry(parent, struct ion_handle, node);
358
359 if (handle < entry)
360 p = &(*p)->rb_left;
361 else if (handle > entry)
362 p = &(*p)->rb_right;
363 else
364 WARN(1, "%s: buffer already found.", __func__);
365 }
366
367 rb_link_node(&handle->node, parent, p);
368 rb_insert_color(&handle->node, &client->handles);
369}
370
371struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800372 size_t align, unsigned int heap_id_mask,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700373 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700374{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700375 struct ion_handle *handle;
376 struct ion_device *dev = client->dev;
377 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -0800378 struct ion_heap *heap;
Adrian Alexei92538592013-03-27 10:53:43 -0700379 unsigned long secure_allocation = flags & ION_FLAG_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800380 const unsigned int MAX_DBG_STR_LEN = 64;
381 char dbg_str[MAX_DBG_STR_LEN];
382 unsigned int dbg_str_idx = 0;
383
384 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700385
Mitchel Humpherys485650f2013-03-15 16:06:07 -0700386 /*
387 * For now, we don't want to fault in pages individually since
388 * clients are already doing manual cache maintenance. In
389 * other words, the implicit caching infrastructure is in
390 * place (in code) but should not be used.
391 */
392 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
393
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800394 pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
395 len, align, heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700396 /*
397 * traverse the list of heaps available in this system in priority
398 * order. If the heap type is supported by the client, and matches the
399 * request of the caller allocate from it. Repeat until allocate has
400 * succeeded or all heaps have been tried
401 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800402 if (WARN_ON(!len))
403 return ERR_PTR(-EINVAL);
404
405 len = PAGE_ALIGN(len);
406
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700407 down_read(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -0800408 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700409 /* if the client doesn't support this heap type */
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800410 if (!((1 << heap->type) & client->heap_type_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700411 continue;
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800412 /* if the caller didn't specify this heap id */
413 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700414 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800415 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700416 if (secure_allocation &&
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800417 !ion_heap_allow_secure_allocation(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -0800418 continue;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800419 trace_ion_alloc_buffer_start(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800420 heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700421 buffer = ion_buffer_create(heap, dev, len, align, flags);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800422 trace_ion_alloc_buffer_end(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800423 heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700424 if (!IS_ERR_OR_NULL(buffer))
425 break;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800426
427 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800428 heap_id_mask, flags,
429 PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800430 if (dbg_str_idx < MAX_DBG_STR_LEN) {
431 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
432 int ret_value = snprintf(&dbg_str[dbg_str_idx],
433 len_left, "%s ", heap->name);
434 if (ret_value >= len_left) {
435 /* overflow */
436 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
437 dbg_str_idx = MAX_DBG_STR_LEN;
438 } else if (ret_value >= 0) {
439 dbg_str_idx += ret_value;
440 } else {
441 /* error */
442 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
443 }
444 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700445 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700446 up_read(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700447
Liam Markcc2d4bd2013-01-16 10:14:40 -0800448 if (buffer == NULL) {
449 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800450 heap_id_mask, flags, -ENODEV);
Laura Abbottb14ed962012-01-30 14:18:08 -0800451 return ERR_PTR(-ENODEV);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800452 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800453
454 if (IS_ERR(buffer)) {
Liam Markcc2d4bd2013-01-16 10:14:40 -0800455 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800456 heap_id_mask, flags,
457 PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800458 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800459 "0x%x) from heap(s) %sfor client %s\n",
460 len, align, dbg_str, client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700461 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800462 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700463
464 handle = ion_handle_create(client, buffer);
465
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700466 /*
467 * ion_buffer_create will create a buffer with a ref_cnt of 1,
468 * and ion_handle_create will take a second reference, drop one here
469 */
470 ion_buffer_put(buffer);
471
Laura Abbottb14ed962012-01-30 14:18:08 -0800472 if (!IS_ERR(handle)) {
473 mutex_lock(&client->lock);
474 ion_handle_add(client, handle);
475 mutex_unlock(&client->lock);
476 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700477
Laura Abbottb14ed962012-01-30 14:18:08 -0800478
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700479 return handle;
480}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800481EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700482
483void ion_free(struct ion_client *client, struct ion_handle *handle)
484{
485 bool valid_handle;
486
487 BUG_ON(client != handle->client);
488
489 mutex_lock(&client->lock);
490 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700491 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800492 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700493 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700494 return;
495 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800496 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700497 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700498}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800499EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700500
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700501int ion_phys(struct ion_client *client, struct ion_handle *handle,
502 ion_phys_addr_t *addr, size_t *len)
503{
504 struct ion_buffer *buffer;
505 int ret;
506
507 mutex_lock(&client->lock);
508 if (!ion_handle_validate(client, handle)) {
509 mutex_unlock(&client->lock);
510 return -EINVAL;
511 }
512
513 buffer = handle->buffer;
514
515 if (!buffer->heap->ops->phys) {
516 pr_err("%s: ion_phys is not implemented by this heap.\n",
517 __func__);
518 mutex_unlock(&client->lock);
519 return -ENODEV;
520 }
521 mutex_unlock(&client->lock);
522 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
523 return ret;
524}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800525EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700526
Laura Abbottb14ed962012-01-30 14:18:08 -0800527static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700528{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700529 void *vaddr;
530
Laura Abbottb14ed962012-01-30 14:18:08 -0800531 if (buffer->kmap_cnt) {
532 buffer->kmap_cnt++;
533 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700534 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800535 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
536 if (IS_ERR_OR_NULL(vaddr))
537 return vaddr;
538 buffer->vaddr = vaddr;
539 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700540 return vaddr;
541}
Laura Abbottb14ed962012-01-30 14:18:08 -0800542
543static void *ion_handle_kmap_get(struct ion_handle *handle)
544{
545 struct ion_buffer *buffer = handle->buffer;
546 void *vaddr;
547
548 if (handle->kmap_cnt) {
549 handle->kmap_cnt++;
550 return buffer->vaddr;
551 }
552 vaddr = ion_buffer_kmap_get(buffer);
553 if (IS_ERR_OR_NULL(vaddr))
554 return vaddr;
555 handle->kmap_cnt++;
556 return vaddr;
557}
558
559static void ion_buffer_kmap_put(struct ion_buffer *buffer)
560{
561 buffer->kmap_cnt--;
562 if (!buffer->kmap_cnt) {
563 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
564 buffer->vaddr = NULL;
565 }
566}
567
568static void ion_handle_kmap_put(struct ion_handle *handle)
569{
570 struct ion_buffer *buffer = handle->buffer;
571
572 handle->kmap_cnt--;
573 if (!handle->kmap_cnt)
574 ion_buffer_kmap_put(buffer);
575}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700576
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700577void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700578{
579 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800580 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700581
582 mutex_lock(&client->lock);
583 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800584 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700585 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700586 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700587 return ERR_PTR(-EINVAL);
588 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700589
Laura Abbottb14ed962012-01-30 14:18:08 -0800590 buffer = handle->buffer;
591
592 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700593 pr_err("%s: map_kernel is not implemented by this heap.\n",
594 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700595 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700596 return ERR_PTR(-ENODEV);
597 }
Laura Abbott894fd582011-08-19 13:33:56 -0700598
Laura Abbottb14ed962012-01-30 14:18:08 -0800599 mutex_lock(&buffer->lock);
600 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700601 mutex_unlock(&buffer->lock);
602 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800603 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700604}
Olav Hauganbd453a92012-07-05 14:21:34 -0700605EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700606
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700607void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
608{
609 struct ion_buffer *buffer;
610
611 mutex_lock(&client->lock);
612 buffer = handle->buffer;
613 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800614 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700615 mutex_unlock(&buffer->lock);
616 mutex_unlock(&client->lock);
617}
Olav Hauganbd453a92012-07-05 14:21:34 -0700618EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700619
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700620static int ion_debug_client_show(struct seq_file *s, void *unused)
621{
622 struct ion_client *client = s->private;
623 struct rb_node *n;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700624
Olav Haugan854c9e12012-05-16 16:34:28 -0700625 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
626 "heap_name", "size_in_bytes", "handle refcount",
627 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700628
629 mutex_lock(&client->lock);
630 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
631 struct ion_handle *handle = rb_entry(n, struct ion_handle,
632 node);
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800633
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700634 enum ion_heap_type type = handle->buffer->heap->type;
635
Olav Haugan854c9e12012-05-16 16:34:28 -0700636 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700637 handle->buffer->heap->name,
638 handle->buffer->size,
639 atomic_read(&handle->ref.refcount),
640 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700641
642 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
643 type == ION_HEAP_TYPE_CARVEOUT ||
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700644 type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
Laura Abbott1135c9e2013-03-13 15:33:40 -0700645 seq_printf(s, " : %12pa", &handle->buffer->priv_phys);
Olav Haugan854c9e12012-05-16 16:34:28 -0700646 else
647 seq_printf(s, " : %12s", "N/A");
648
Olav Haugan854c9e12012-05-16 16:34:28 -0700649 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700650 }
651 mutex_unlock(&client->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700652 return 0;
653}
654
655static int ion_debug_client_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, ion_debug_client_show, inode->i_private);
658}
659
660static const struct file_operations debug_client_fops = {
661 .open = ion_debug_client_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700667struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800668 unsigned int heap_type_mask,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700669 const char *name)
670{
671 struct ion_client *client;
672 struct task_struct *task;
673 struct rb_node **p;
674 struct rb_node *parent = NULL;
675 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700676 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700677 unsigned int name_len;
678
679 if (!name) {
680 pr_err("%s: Name cannot be null\n", __func__);
681 return ERR_PTR(-EINVAL);
682 }
683 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700684
685 get_task_struct(current->group_leader);
686 task_lock(current->group_leader);
687 pid = task_pid_nr(current->group_leader);
688 /* don't bother to store task struct for kernel threads,
689 they can't be killed anyway */
690 if (current->group_leader->flags & PF_KTHREAD) {
691 put_task_struct(current->group_leader);
692 task = NULL;
693 } else {
694 task = current->group_leader;
695 }
696 task_unlock(current->group_leader);
697
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700698 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
699 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800700 if (task)
701 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700702 return ERR_PTR(-ENOMEM);
703 }
704
705 client->dev = dev;
706 client->handles = RB_ROOT;
707 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800708
Olav Haugan6625c7d12012-01-24 13:50:43 -0800709 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800710 if (!client->name) {
711 put_task_struct(current->group_leader);
712 kfree(client);
713 return ERR_PTR(-ENOMEM);
714 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -0800715 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800716 }
717
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800718 client->heap_type_mask = heap_type_mask;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700719 client->task = task;
720 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700721
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700722 down_write(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800723 p = &dev->clients.rb_node;
724 while (*p) {
725 parent = *p;
726 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700727
Laura Abbottb14ed962012-01-30 14:18:08 -0800728 if (client < entry)
729 p = &(*p)->rb_left;
730 else if (client > entry)
731 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700732 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800733 rb_link_node(&client->node, parent, p);
734 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700735
Laura Abbotteed86032011-12-05 15:32:36 -0800736
737 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700738 dev->debug_root, client,
739 &debug_client_fops);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700740 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700741
742 return client;
743}
744
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800745/**
746 * ion_mark_dangling_buffers_locked() - Mark dangling buffers
747 * @dev: the ion device whose buffers will be searched
748 *
749 * Sets marked=1 for all known buffers associated with `dev' that no
750 * longer have a handle pointing to them. dev->lock should be held
751 * across a call to this function (and should only be unlocked after
752 * checking for marked buffers).
753 */
754static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
755{
756 struct rb_node *n, *n2;
757 /* mark all buffers as 1 */
758 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
759 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
760 node);
761
762 buf->marked = 1;
763 }
764
765 /* now see which buffers we can access */
766 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
767 struct ion_client *client = rb_entry(n, struct ion_client,
768 node);
769
770 mutex_lock(&client->lock);
771 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
772 struct ion_handle *handle
773 = rb_entry(n2, struct ion_handle, node);
774
775 handle->buffer->marked = 0;
776
777 }
778 mutex_unlock(&client->lock);
779
780 }
781}
782
783#ifdef CONFIG_ION_LEAK_CHECK
784static u32 ion_debug_check_leaks_on_destroy;
785
786static int ion_check_for_and_print_leaks(struct ion_device *dev)
787{
788 struct rb_node *n;
789 int num_leaks = 0;
790
791 if (!ion_debug_check_leaks_on_destroy)
792 return 0;
793
794 /* check for leaked buffers (those that no longer have a
795 * handle pointing to them) */
796 ion_mark_dangling_buffers_locked(dev);
797
798 /* Anyone still marked as a 1 means a leaked handle somewhere */
799 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
800 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
801 node);
802
803 if (buf->marked == 1) {
804 pr_info("Leaked ion buffer at %p\n", buf);
805 num_leaks++;
806 }
807 }
808 return num_leaks;
809}
810static void setup_ion_leak_check(struct dentry *debug_root)
811{
812 debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
813 &ion_debug_check_leaks_on_destroy);
814}
815#else
816static int ion_check_for_and_print_leaks(struct ion_device *dev)
817{
818 return 0;
819}
820static void setup_ion_leak_check(struct dentry *debug_root)
821{
822}
823#endif
824
Laura Abbottb14ed962012-01-30 14:18:08 -0800825void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700826{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700827 struct ion_device *dev = client->dev;
828 struct rb_node *n;
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800829 int num_leaks;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700830
831 pr_debug("%s: %d\n", __func__, __LINE__);
832 while ((n = rb_first(&client->handles))) {
833 struct ion_handle *handle = rb_entry(n, struct ion_handle,
834 node);
835 ion_handle_destroy(&handle->ref);
836 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700837 down_write(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800838 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700839 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -0800840 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700841 debugfs_remove_recursive(client->debug_root);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800842
843 num_leaks = ion_check_for_and_print_leaks(dev);
844
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700845 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700846
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800847 if (num_leaks) {
848 struct task_struct *current_task = current;
849 char current_task_name[TASK_COMM_LEN];
850 get_task_comm(current_task_name, current_task);
851 WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
852 __func__, num_leaks, num_leaks == 1 ? "" : "s");
853 pr_info("task name at time of leak: %s, pid: %d\n",
854 current_task_name, current_task->pid);
855 }
856
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800857 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700858 kfree(client);
859}
Olav Hauganbd453a92012-07-05 14:21:34 -0700860EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700861
Laura Abbott273dd8e2011-10-12 14:26:33 -0700862int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
863 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700864{
865 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700866
867 mutex_lock(&client->lock);
868 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -0700869 pr_err("%s: invalid handle passed to %s.\n",
870 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700871 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800872 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700873 }
Laura Abbott273dd8e2011-10-12 14:26:33 -0700874 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700875 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700876 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700877 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700878 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800879
Laura Abbott273dd8e2011-10-12 14:26:33 -0700880 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700881}
Laura Abbott273dd8e2011-10-12 14:26:33 -0700882EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700883
Laura Abbott8c017362011-09-22 20:59:12 -0700884int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
885 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800886{
Laura Abbott8c017362011-09-22 20:59:12 -0700887 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800888
Laura Abbott8c017362011-09-22 20:59:12 -0700889 mutex_lock(&client->lock);
890 if (!ion_handle_validate(client, handle)) {
891 pr_err("%s: invalid handle passed to %s.\n",
892 __func__, __func__);
893 mutex_unlock(&client->lock);
894 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700895 }
Laura Abbott8c017362011-09-22 20:59:12 -0700896 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700897 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700898 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700899 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700900 mutex_unlock(&client->lock);
901
902 return 0;
903}
904EXPORT_SYMBOL(ion_handle_get_size);
905
Laura Abbottb14ed962012-01-30 14:18:08 -0800906struct sg_table *ion_sg_table(struct ion_client *client,
907 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700908{
Laura Abbottb14ed962012-01-30 14:18:08 -0800909 struct ion_buffer *buffer;
910 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700911
Laura Abbottb14ed962012-01-30 14:18:08 -0800912 mutex_lock(&client->lock);
913 if (!ion_handle_validate(client, handle)) {
914 pr_err("%s: invalid handle passed to map_dma.\n",
915 __func__);
916 mutex_unlock(&client->lock);
917 return ERR_PTR(-EINVAL);
918 }
919 buffer = handle->buffer;
920 table = buffer->sg_table;
921 mutex_unlock(&client->lock);
922 return table;
923}
Olav Hauganbd453a92012-07-05 14:21:34 -0700924EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -0800925
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800926struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
927 size_t chunk_size, size_t total_size)
928{
929 struct sg_table *table;
930 int i, n_chunks, ret;
931 struct scatterlist *sg;
932
933 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
934 if (!table)
935 return ERR_PTR(-ENOMEM);
936
937 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
938 pr_debug("creating sg_table with %d chunks\n", n_chunks);
939
940 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
941 if (ret)
942 goto err0;
943
944 for_each_sg(table->sgl, sg, table->nents, i) {
945 dma_addr_t addr = buffer_base + i * chunk_size;
946 sg_dma_address(sg) = addr;
Olav Hauganbbdc30a2013-03-30 06:48:35 -0700947 sg_dma_len(sg) = chunk_size;
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800948 }
949
950 return table;
951err0:
952 kfree(table);
953 return ERR_PTR(ret);
954}
955
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700956static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
957 struct device *dev,
958 enum dma_data_direction direction);
959
Laura Abbottb14ed962012-01-30 14:18:08 -0800960static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
961 enum dma_data_direction direction)
962{
963 struct dma_buf *dmabuf = attachment->dmabuf;
964 struct ion_buffer *buffer = dmabuf->priv;
965
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700966 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbottb14ed962012-01-30 14:18:08 -0800967 return buffer->sg_table;
968}
969
970static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
971 struct sg_table *table,
972 enum dma_data_direction direction)
973{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800974}
975
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700976static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
977{
978 unsigned long pages = buffer->sg_table->nents;
979 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
980
981 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
982 if (!buffer->dirty)
983 return -ENOMEM;
984 return 0;
985}
986
987struct ion_vma_list {
988 struct list_head list;
989 struct vm_area_struct *vma;
990};
991
992static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
993 struct device *dev,
994 enum dma_data_direction dir)
995{
996 struct scatterlist *sg;
997 int i;
998 struct ion_vma_list *vma_list;
999
1000 pr_debug("%s: syncing for device %s\n", __func__,
1001 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001002
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001003 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001004 return;
1005
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001006 mutex_lock(&buffer->lock);
1007 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1008 if (!test_bit(i, buffer->dirty))
1009 continue;
1010 dma_sync_sg_for_device(dev, sg, 1, dir);
1011 clear_bit(i, buffer->dirty);
1012 }
1013 list_for_each_entry(vma_list, &buffer->vmas, list) {
1014 struct vm_area_struct *vma = vma_list->vma;
1015
1016 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1017 NULL);
1018 }
1019 mutex_unlock(&buffer->lock);
1020}
1021
1022int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001023{
Laura Abbottb14ed962012-01-30 14:18:08 -08001024 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001025 struct scatterlist *sg;
1026 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001027
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001028 mutex_lock(&buffer->lock);
1029 set_bit(vmf->pgoff, buffer->dirty);
1030
1031 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1032 if (i != vmf->pgoff)
1033 continue;
1034 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
1035 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
1036 sg_page(sg));
1037 break;
1038 }
1039 mutex_unlock(&buffer->lock);
1040 return VM_FAULT_NOPAGE;
1041}
1042
1043static void ion_vm_open(struct vm_area_struct *vma)
1044{
1045 struct ion_buffer *buffer = vma->vm_private_data;
1046 struct ion_vma_list *vma_list;
1047
1048 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1049 if (!vma_list)
1050 return;
1051 vma_list->vma = vma;
1052 mutex_lock(&buffer->lock);
1053 list_add(&vma_list->list, &buffer->vmas);
1054 mutex_unlock(&buffer->lock);
1055 pr_debug("%s: adding %p\n", __func__, vma);
1056}
1057
1058static void ion_vm_close(struct vm_area_struct *vma)
1059{
1060 struct ion_buffer *buffer = vma->vm_private_data;
1061 struct ion_vma_list *vma_list, *tmp;
1062
1063 pr_debug("%s\n", __func__);
1064 mutex_lock(&buffer->lock);
1065 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1066 if (vma_list->vma != vma)
1067 continue;
1068 list_del(&vma_list->list);
1069 kfree(vma_list);
1070 pr_debug("%s: deleting %p\n", __func__, vma);
1071 break;
1072 }
1073 mutex_unlock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001074
Laura Abbotta6835092011-11-14 15:27:02 -08001075 if (buffer->heap->ops->unmap_user)
1076 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001077}
1078
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001079struct vm_operations_struct ion_vma_ops = {
1080 .open = ion_vm_open,
1081 .close = ion_vm_close,
1082 .fault = ion_vm_fault,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001083};
1084
Laura Abbottb14ed962012-01-30 14:18:08 -08001085static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001086{
Laura Abbottb14ed962012-01-30 14:18:08 -08001087 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001088 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001089
Laura Abbottb14ed962012-01-30 14:18:08 -08001090 if (!buffer->heap->ops->map_user) {
1091 pr_err("%s: this heap does not define a method for mapping "
1092 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001093 return -EINVAL;
1094 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001095
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001096 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001097 vma->vm_private_data = buffer;
1098 vma->vm_ops = &ion_vma_ops;
Mitchel Humpherys7ce0fe42013-01-10 11:30:26 -08001099 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001100 ion_vm_open(vma);
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001101 return 0;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001102 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001103
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001104 if (!(buffer->flags & ION_FLAG_CACHED))
1105 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1106
1107 mutex_lock(&buffer->lock);
1108 /* now map it to userspace */
1109 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1110 mutex_unlock(&buffer->lock);
1111
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001112 if (ret)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001113 pr_err("%s: failure mapping buffer to userspace\n",
1114 __func__);
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001115
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001116 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001117}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001118
Laura Abbottb14ed962012-01-30 14:18:08 -08001119static void ion_dma_buf_release(struct dma_buf *dmabuf)
1120{
1121 struct ion_buffer *buffer = dmabuf->priv;
1122 ion_buffer_put(buffer);
1123}
1124
1125static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1126{
1127 struct ion_buffer *buffer = dmabuf->priv;
1128 return buffer->vaddr + offset;
1129}
1130
1131static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1132 void *ptr)
1133{
1134 return;
1135}
1136
1137static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1138 size_t len,
1139 enum dma_data_direction direction)
1140{
1141 struct ion_buffer *buffer = dmabuf->priv;
1142 void *vaddr;
1143
1144 if (!buffer->heap->ops->map_kernel) {
1145 pr_err("%s: map kernel is not implemented by this heap.\n",
1146 __func__);
1147 return -ENODEV;
1148 }
1149
1150 mutex_lock(&buffer->lock);
1151 vaddr = ion_buffer_kmap_get(buffer);
1152 mutex_unlock(&buffer->lock);
1153 if (IS_ERR(vaddr))
1154 return PTR_ERR(vaddr);
1155 if (!vaddr)
1156 return -ENOMEM;
1157 return 0;
1158}
1159
1160static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1161 size_t len,
1162 enum dma_data_direction direction)
1163{
1164 struct ion_buffer *buffer = dmabuf->priv;
1165
1166 mutex_lock(&buffer->lock);
1167 ion_buffer_kmap_put(buffer);
1168 mutex_unlock(&buffer->lock);
1169}
1170
1171struct dma_buf_ops dma_buf_ops = {
1172 .map_dma_buf = ion_map_dma_buf,
1173 .unmap_dma_buf = ion_unmap_dma_buf,
1174 .mmap = ion_mmap,
1175 .release = ion_dma_buf_release,
1176 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1177 .end_cpu_access = ion_dma_buf_end_cpu_access,
1178 .kmap_atomic = ion_dma_buf_kmap,
1179 .kunmap_atomic = ion_dma_buf_kunmap,
1180 .kmap = ion_dma_buf_kmap,
1181 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001182};
1183
Laura Abbottb14ed962012-01-30 14:18:08 -08001184int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1185{
1186 struct ion_buffer *buffer;
1187 struct dma_buf *dmabuf;
1188 bool valid_handle;
1189 int fd;
1190
1191 mutex_lock(&client->lock);
1192 valid_handle = ion_handle_validate(client, handle);
1193 mutex_unlock(&client->lock);
1194 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001195 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001196 return -EINVAL;
1197 }
1198
1199 buffer = handle->buffer;
1200 ion_buffer_get(buffer);
1201 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1202 if (IS_ERR(dmabuf)) {
1203 ion_buffer_put(buffer);
1204 return PTR_ERR(dmabuf);
1205 }
1206 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001207 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001208 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001209
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001210 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001211}
Olav Hauganbd453a92012-07-05 14:21:34 -07001212EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001213
Laura Abbottb14ed962012-01-30 14:18:08 -08001214struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1215{
1216 struct dma_buf *dmabuf;
1217 struct ion_buffer *buffer;
1218 struct ion_handle *handle;
1219
1220 dmabuf = dma_buf_get(fd);
1221 if (IS_ERR_OR_NULL(dmabuf))
1222 return ERR_PTR(PTR_ERR(dmabuf));
1223 /* if this memory came from ion */
1224
1225 if (dmabuf->ops != &dma_buf_ops) {
1226 pr_err("%s: can not import dmabuf from another exporter\n",
1227 __func__);
1228 dma_buf_put(dmabuf);
1229 return ERR_PTR(-EINVAL);
1230 }
1231 buffer = dmabuf->priv;
1232
1233 mutex_lock(&client->lock);
1234 /* if a handle exists for this buffer just take a reference to it */
1235 handle = ion_handle_lookup(client, buffer);
1236 if (!IS_ERR_OR_NULL(handle)) {
1237 ion_handle_get(handle);
1238 goto end;
1239 }
1240 handle = ion_handle_create(client, buffer);
1241 if (IS_ERR_OR_NULL(handle))
1242 goto end;
1243 ion_handle_add(client, handle);
1244end:
1245 mutex_unlock(&client->lock);
1246 dma_buf_put(dmabuf);
1247 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001248}
Olav Hauganbd453a92012-07-05 14:21:34 -07001249EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001250
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001251static int ion_sync_for_device(struct ion_client *client, int fd)
1252{
1253 struct dma_buf *dmabuf;
1254 struct ion_buffer *buffer;
1255
1256 dmabuf = dma_buf_get(fd);
1257 if (IS_ERR_OR_NULL(dmabuf))
1258 return PTR_ERR(dmabuf);
1259
1260 /* if this memory came from ion */
1261 if (dmabuf->ops != &dma_buf_ops) {
1262 pr_err("%s: can not sync dmabuf from another exporter\n",
1263 __func__);
1264 dma_buf_put(dmabuf);
1265 return -EINVAL;
1266 }
1267 buffer = dmabuf->priv;
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001268
1269 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1270 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001271 dma_buf_put(dmabuf);
1272 return 0;
1273}
1274
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001275static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1276{
1277 struct ion_client *client = filp->private_data;
1278
1279 switch (cmd) {
1280 case ION_IOC_ALLOC:
1281 {
1282 struct ion_allocation_data data;
1283
1284 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1285 return -EFAULT;
1286 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh7d72bad2012-08-29 18:39:44 -07001287 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001288
Laura Abbottb14ed962012-01-30 14:18:08 -08001289 if (IS_ERR(data.handle))
1290 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001291
Laura Abbottb14ed962012-01-30 14:18:08 -08001292 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1293 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001294 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001295 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001296 break;
1297 }
1298 case ION_IOC_FREE:
1299 {
1300 struct ion_handle_data data;
1301 bool valid;
1302
1303 if (copy_from_user(&data, (void __user *)arg,
1304 sizeof(struct ion_handle_data)))
1305 return -EFAULT;
1306 mutex_lock(&client->lock);
1307 valid = ion_handle_validate(client, data.handle);
1308 mutex_unlock(&client->lock);
1309 if (!valid)
1310 return -EINVAL;
1311 ion_free(client, data.handle);
1312 break;
1313 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001314 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001315 case ION_IOC_SHARE:
1316 {
1317 struct ion_fd_data data;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001318 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1319 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001320
Laura Abbottb14ed962012-01-30 14:18:08 -08001321 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001322 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1323 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001324 if (data.fd < 0)
1325 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001326 break;
1327 }
1328 case ION_IOC_IMPORT:
1329 {
1330 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001331 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001332 if (copy_from_user(&data, (void __user *)arg,
1333 sizeof(struct ion_fd_data)))
1334 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001335 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan865e97f2012-05-15 14:40:11 -07001336 if (IS_ERR(data.handle)) {
1337 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001338 data.handle = NULL;
Olav Haugan865e97f2012-05-15 14:40:11 -07001339 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001340 if (copy_to_user((void __user *)arg, &data,
1341 sizeof(struct ion_fd_data)))
1342 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001343 if (ret < 0)
1344 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001345 break;
1346 }
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001347 case ION_IOC_SYNC:
1348 {
1349 struct ion_fd_data data;
1350 if (copy_from_user(&data, (void __user *)arg,
1351 sizeof(struct ion_fd_data)))
1352 return -EFAULT;
1353 ion_sync_for_device(client, data.fd);
1354 break;
1355 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001356 case ION_IOC_CUSTOM:
1357 {
1358 struct ion_device *dev = client->dev;
1359 struct ion_custom_data data;
1360
1361 if (!dev->custom_ioctl)
1362 return -ENOTTY;
1363 if (copy_from_user(&data, (void __user *)arg,
1364 sizeof(struct ion_custom_data)))
1365 return -EFAULT;
1366 return dev->custom_ioctl(client, data.cmd, data.arg);
1367 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001368 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001369 return client->dev->custom_ioctl(client,
1370 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001371 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001372 return client->dev->custom_ioctl(client,
1373 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001374 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001375 return client->dev->custom_ioctl(client,
1376 ION_IOC_CLEAN_INV_CACHES, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001377 default:
1378 return -ENOTTY;
1379 }
1380 return 0;
1381}
1382
1383static int ion_release(struct inode *inode, struct file *file)
1384{
1385 struct ion_client *client = file->private_data;
1386
1387 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001388 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001389 return 0;
1390}
1391
1392static int ion_open(struct inode *inode, struct file *file)
1393{
1394 struct miscdevice *miscdev = file->private_data;
1395 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1396 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001397 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001398
1399 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001400 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1401 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001402 if (IS_ERR_OR_NULL(client))
1403 return PTR_ERR(client);
1404 file->private_data = client;
1405
1406 return 0;
1407}
1408
1409static const struct file_operations ion_fops = {
1410 .owner = THIS_MODULE,
1411 .open = ion_open,
1412 .release = ion_release,
1413 .unlocked_ioctl = ion_ioctl,
1414};
1415
1416static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001417 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001418{
1419 size_t size = 0;
1420 struct rb_node *n;
1421
1422 mutex_lock(&client->lock);
1423 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1424 struct ion_handle *handle = rb_entry(n,
1425 struct ion_handle,
1426 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001427 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001428 size += handle->buffer->size;
1429 }
1430 mutex_unlock(&client->lock);
1431 return size;
1432}
1433
Olav Haugan0671b9a2012-05-25 11:58:56 -07001434/**
1435 * Searches through a clients handles to find if the buffer is owned
1436 * by this client. Used for debug output.
1437 * @param client pointer to candidate owner of buffer
1438 * @param buf pointer to buffer that we are trying to find the owner of
1439 * @return 1 if found, 0 otherwise
1440 */
1441static int ion_debug_find_buffer_owner(const struct ion_client *client,
1442 const struct ion_buffer *buf)
1443{
1444 struct rb_node *n;
1445
1446 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1447 const struct ion_handle *handle = rb_entry(n,
1448 const struct ion_handle,
1449 node);
1450 if (handle->buffer == buf)
1451 return 1;
1452 }
1453 return 0;
1454}
1455
1456/**
1457 * Adds mem_map_data pointer to the tree of mem_map
1458 * Used for debug output.
1459 * @param mem_map The mem_map tree
1460 * @param data The new data to add to the tree
1461 */
1462static void ion_debug_mem_map_add(struct rb_root *mem_map,
1463 struct mem_map_data *data)
1464{
1465 struct rb_node **p = &mem_map->rb_node;
1466 struct rb_node *parent = NULL;
1467 struct mem_map_data *entry;
1468
1469 while (*p) {
1470 parent = *p;
1471 entry = rb_entry(parent, struct mem_map_data, node);
1472
1473 if (data->addr < entry->addr) {
1474 p = &(*p)->rb_left;
1475 } else if (data->addr > entry->addr) {
1476 p = &(*p)->rb_right;
1477 } else {
1478 pr_err("%s: mem_map_data already found.", __func__);
1479 BUG();
1480 }
1481 }
1482 rb_link_node(&data->node, parent, p);
1483 rb_insert_color(&data->node, mem_map);
1484}
1485
1486/**
1487 * Search for an owner of a buffer by iterating over all ION clients.
1488 * @param dev ion device containing pointers to all the clients.
1489 * @param buffer pointer to buffer we are trying to find the owner of.
1490 * @return name of owner.
1491 */
1492const char *ion_debug_locate_owner(const struct ion_device *dev,
1493 const struct ion_buffer *buffer)
1494{
1495 struct rb_node *j;
1496 const char *client_name = NULL;
1497
Laura Abbottb14ed962012-01-30 14:18:08 -08001498 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001499 j = rb_next(j)) {
1500 struct ion_client *client = rb_entry(j, struct ion_client,
1501 node);
1502 if (ion_debug_find_buffer_owner(client, buffer))
1503 client_name = client->name;
1504 }
1505 return client_name;
1506}
1507
1508/**
1509 * Create a mem_map of the heap.
1510 * @param s seq_file to log error message to.
1511 * @param heap The heap to create mem_map for.
1512 * @param mem_map The mem map to be created.
1513 */
1514void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1515 struct rb_root *mem_map)
1516{
1517 struct ion_device *dev = heap->dev;
1518 struct rb_node *n;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301519 size_t size;
1520
1521 if (!heap->ops->phys)
1522 return;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001523
1524 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1525 struct ion_buffer *buffer =
1526 rb_entry(n, struct ion_buffer, node);
1527 if (buffer->heap->id == heap->id) {
1528 struct mem_map_data *data =
1529 kzalloc(sizeof(*data), GFP_KERNEL);
1530 if (!data) {
1531 seq_printf(s, "ERROR: out of memory. "
1532 "Part of memory map will not be logged\n");
1533 break;
1534 }
Chintan Pandyadaf75622013-01-29 19:40:01 +05301535
1536 buffer->heap->ops->phys(buffer->heap, buffer,
1537 &(data->addr), &size);
1538 data->size = (unsigned long) size;
1539 data->addr_end = data->addr + data->size - 1;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001540 data->client_name = ion_debug_locate_owner(dev, buffer);
1541 ion_debug_mem_map_add(mem_map, data);
1542 }
1543 }
1544}
1545
1546/**
1547 * Free the memory allocated by ion_debug_mem_map_create
1548 * @param mem_map The mem map to free.
1549 */
1550static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1551{
1552 if (mem_map) {
1553 struct rb_node *n;
1554 while ((n = rb_first(mem_map)) != 0) {
1555 struct mem_map_data *data =
1556 rb_entry(n, struct mem_map_data, node);
1557 rb_erase(&data->node, mem_map);
1558 kfree(data);
1559 }
1560 }
1561}
1562
1563/**
1564 * Print heap debug information.
1565 * @param s seq_file to log message to.
1566 * @param heap pointer to heap that we will print debug information for.
1567 */
1568static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1569{
1570 if (heap->ops->print_debug) {
1571 struct rb_root mem_map = RB_ROOT;
1572 ion_debug_mem_map_create(s, heap, &mem_map);
1573 heap->ops->print_debug(heap, s, &mem_map);
1574 ion_debug_mem_map_destroy(&mem_map);
1575 }
1576}
1577
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001578static int ion_debug_heap_show(struct seq_file *s, void *unused)
1579{
1580 struct ion_heap *heap = s->private;
1581 struct ion_device *dev = heap->dev;
1582 struct rb_node *n;
1583
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001584 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001585 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001586
Laura Abbottb14ed962012-01-30 14:18:08 -08001587 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001588 struct ion_client *client = rb_entry(n, struct ion_client,
1589 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001590 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001591 if (!size)
1592 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001593 if (client->task) {
1594 char task_comm[TASK_COMM_LEN];
1595
1596 get_task_comm(task_comm, client->task);
1597 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1598 client->pid, size);
1599 } else {
1600 seq_printf(s, "%16.s %16u %16u\n", client->name,
1601 client->pid, size);
1602 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001603 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001604 ion_heap_print_debug(s, heap);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001605 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001606 return 0;
1607}
1608
1609static int ion_debug_heap_open(struct inode *inode, struct file *file)
1610{
1611 return single_open(file, ion_debug_heap_show, inode->i_private);
1612}
1613
1614static const struct file_operations debug_heap_fops = {
1615 .open = ion_debug_heap_open,
1616 .read = seq_read,
1617 .llseek = seq_lseek,
1618 .release = single_release,
1619};
1620
1621void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1622{
Laura Abbottb14ed962012-01-30 14:18:08 -08001623 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1624 !heap->ops->unmap_dma)
1625 pr_err("%s: can not add heap with invalid ops struct.\n",
1626 __func__);
1627
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001628 heap->dev = dev;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001629 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001630 /* use negative heap->id to reverse the priority -- when traversing
1631 the list later attempt higher id numbers first */
1632 plist_node_init(&heap->node, -heap->id);
1633 plist_add(&heap->node, &dev->heaps);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001634 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1635 &debug_heap_fops);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001636 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001637}
1638
Laura Abbott93619302012-10-11 11:51:40 -07001639int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
1640 int version, void *data, int flags)
1641{
1642 int ret = -EINVAL;
1643 struct ion_heap *heap;
1644 struct ion_buffer *buffer;
1645
1646 mutex_lock(&client->lock);
1647 if (!ion_handle_validate(client, handle)) {
1648 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1649 goto out_unlock;
1650 }
1651
1652 buffer = handle->buffer;
1653 heap = buffer->heap;
1654
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001655 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001656 pr_err("%s: cannot secure buffer from non secure heap\n",
1657 __func__);
1658 goto out_unlock;
1659 }
1660
1661 BUG_ON(!buffer->heap->ops->secure_buffer);
1662 /*
1663 * Protect the handle via the client lock to ensure we aren't
1664 * racing with free
1665 */
1666 ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
1667
1668out_unlock:
1669 mutex_unlock(&client->lock);
1670 return ret;
1671}
1672
1673int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
1674{
1675 int ret = -EINVAL;
1676 struct ion_heap *heap;
1677 struct ion_buffer *buffer;
1678
1679 mutex_lock(&client->lock);
1680 if (!ion_handle_validate(client, handle)) {
1681 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1682 goto out_unlock;
1683 }
1684
1685 buffer = handle->buffer;
1686 heap = buffer->heap;
1687
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001688 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001689 pr_err("%s: cannot secure buffer from non secure heap\n",
1690 __func__);
1691 goto out_unlock;
1692 }
1693
1694 BUG_ON(!buffer->heap->ops->unsecure_buffer);
1695 /*
1696 * Protect the handle via the client lock to ensure we aren't
1697 * racing with free
1698 */
1699 ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
1700
1701out_unlock:
1702 mutex_unlock(&client->lock);
1703 return ret;
1704}
1705
Laura Abbott7e446482012-06-13 15:59:39 -07001706int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1707 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001708{
Olav Haugan0a852512012-01-09 10:20:55 -08001709 int ret_val = 0;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001710 struct ion_heap *heap;
Olav Haugan0a852512012-01-09 10:20:55 -08001711
1712 /*
1713 * traverse the list of heaps available in this system
1714 * and find the heap that is specified.
1715 */
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001716 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001717 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001718 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001719 continue;
1720 if (ION_HEAP(heap->id) != heap_id)
1721 continue;
1722 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001723 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001724 else
1725 ret_val = -EINVAL;
1726 break;
1727 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001728 up_write(&dev->lock);
Olav Haugan0a852512012-01-09 10:20:55 -08001729 return ret_val;
1730}
Olav Hauganbd453a92012-07-05 14:21:34 -07001731EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001732
Laura Abbott7e446482012-06-13 15:59:39 -07001733int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1734 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001735{
Olav Haugan0a852512012-01-09 10:20:55 -08001736 int ret_val = 0;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001737 struct ion_heap *heap;
Olav Haugan0a852512012-01-09 10:20:55 -08001738
1739 /*
1740 * traverse the list of heaps available in this system
1741 * and find the heap that is specified.
1742 */
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001743 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001744 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001745 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001746 continue;
1747 if (ION_HEAP(heap->id) != heap_id)
1748 continue;
1749 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001750 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001751 else
1752 ret_val = -EINVAL;
1753 break;
1754 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001755 up_write(&dev->lock);
Olav Haugan0a852512012-01-09 10:20:55 -08001756 return ret_val;
1757}
Olav Hauganbd453a92012-07-05 14:21:34 -07001758EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001759
Laura Abbott404f8242011-10-31 14:22:53 -07001760static int ion_debug_leak_show(struct seq_file *s, void *unused)
1761{
1762 struct ion_device *dev = s->private;
1763 struct rb_node *n;
Laura Abbott404f8242011-10-31 14:22:53 -07001764
Laura Abbott404f8242011-10-31 14:22:53 -07001765 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1766 "ref cnt");
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001767
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001768 ion_mark_dangling_buffers_locked(dev);
Laura Abbott404f8242011-10-31 14:22:53 -07001769
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001770 down_write(&dev->lock);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001771 /* Anyone still marked as a 1 means a leaked handle somewhere */
Laura Abbott404f8242011-10-31 14:22:53 -07001772 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1773 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1774 node);
1775
1776 if (buf->marked == 1)
1777 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1778 (int)buf, buf->heap->name, buf->size,
1779 atomic_read(&buf->ref.refcount));
1780 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001781 up_write(&dev->lock);
Laura Abbott404f8242011-10-31 14:22:53 -07001782 return 0;
1783}
1784
1785static int ion_debug_leak_open(struct inode *inode, struct file *file)
1786{
1787 return single_open(file, ion_debug_leak_show, inode->i_private);
1788}
1789
1790static const struct file_operations debug_leak_fops = {
1791 .open = ion_debug_leak_open,
1792 .read = seq_read,
1793 .llseek = seq_lseek,
1794 .release = single_release,
1795};
1796
1797
1798
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001799struct ion_device *ion_device_create(long (*custom_ioctl)
1800 (struct ion_client *client,
1801 unsigned int cmd,
1802 unsigned long arg))
1803{
1804 struct ion_device *idev;
1805 int ret;
1806
1807 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1808 if (!idev)
1809 return ERR_PTR(-ENOMEM);
1810
1811 idev->dev.minor = MISC_DYNAMIC_MINOR;
1812 idev->dev.name = "ion";
1813 idev->dev.fops = &ion_fops;
1814 idev->dev.parent = NULL;
1815 ret = misc_register(&idev->dev);
1816 if (ret) {
1817 pr_err("ion: failed to register misc device.\n");
1818 return ERR_PTR(ret);
1819 }
1820
1821 idev->debug_root = debugfs_create_dir("ion", NULL);
1822 if (IS_ERR_OR_NULL(idev->debug_root))
1823 pr_err("ion: failed to create debug files.\n");
1824
1825 idev->custom_ioctl = custom_ioctl;
1826 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001827 mutex_init(&idev->buffer_lock);
1828 init_rwsem(&idev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001829 plist_head_init(&idev->heaps);
Laura Abbottb14ed962012-01-30 14:18:08 -08001830 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001831 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1832 &debug_leak_fops);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001833
1834 setup_ion_leak_check(idev->debug_root);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001835 return idev;
1836}
1837
1838void ion_device_destroy(struct ion_device *dev)
1839{
1840 misc_deregister(&dev->dev);
1841 /* XXX need to free the heaps and clients ? */
1842 kfree(dev);
1843}
Laura Abbottb14ed962012-01-30 14:18:08 -08001844
1845void __init ion_reserve(struct ion_platform_data *data)
1846{
Rebecca Schultz Zavinfbce1512012-11-15 10:31:02 -08001847 int i;
Laura Abbottb14ed962012-01-30 14:18:08 -08001848
1849 for (i = 0; i < data->nr; i++) {
1850 if (data->heaps[i].size == 0)
1851 continue;
Rebecca Schultz Zavinfbce1512012-11-15 10:31:02 -08001852
1853 if (data->heaps[i].base == 0) {
1854 phys_addr_t paddr;
1855 paddr = memblock_alloc_base(data->heaps[i].size,
1856 data->heaps[i].align,
1857 MEMBLOCK_ALLOC_ANYWHERE);
1858 if (!paddr) {
1859 pr_err("%s: error allocating memblock for "
1860 "heap %d\n",
1861 __func__, i);
1862 continue;
1863 }
1864 data->heaps[i].base = paddr;
1865 } else {
1866 int ret = memblock_reserve(data->heaps[i].base,
1867 data->heaps[i].size);
1868 if (ret)
1869 pr_err("memblock reserve of %x@%pa failed\n",
1870 data->heaps[i].size,
1871 &data->heaps[i].base);
1872 }
1873 pr_info("%s: %s reserved base %pa size %d\n", __func__,
1874 data->heaps[i].name,
1875 &data->heaps[i].base,
1876 data->heaps[i].size);
Laura Abbottb14ed962012-01-30 14:18:08 -08001877 }
1878}