blob: 6b11c35bdf5fe3767b8d84d7e261c20419fa30e0 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -08002
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07003 * drivers/gpu/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
Liam Markcc2d4bd2013-01-16 10:14:40 -08006 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07007 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070020#include <linux/device.h>
21#include <linux/file.h>
22#include <linux/fs.h>
23#include <linux/anon_inodes.h>
24#include <linux/ion.h>
25#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080026#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070028#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
35#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080036#include <linux/dma-buf.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070037#include <linux/msm_ion.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080038#include <trace/events/kmem.h>
39
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070040
Laura Abbott8c017362011-09-22 20:59:12 -070041#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070042#include "ion_priv.h"
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070043
44/**
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -070047 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070050 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
52 */
53struct ion_device {
54 struct miscdevice dev;
55 struct rb_root buffers;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -070056 struct mutex buffer_lock;
57 struct rw_semaphore lock;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -080058 struct plist_head heaps;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070059 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
60 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080061 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070062 struct dentry *debug_root;
63};
64
65/**
66 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070067 * @node: node in the tree of all clients
68 * @dev: backpointer to ion device
69 * @handles: an rb tree of all the handles in this client
70 * @lock: lock protecting the tree of handles
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070071 * @name: used for debugging
72 * @task: used for debugging
73 *
74 * A client represents a list of buffers this client may access.
75 * The mutex stored here is used to protect both handles tree
76 * as well as the handles themselves, and should be held while modifying either.
77 */
78struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070079 struct rb_node node;
80 struct ion_device *dev;
81 struct rb_root handles;
82 struct mutex lock;
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -080083 unsigned int heap_type_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080084 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070085 struct task_struct *task;
86 pid_t pid;
87 struct dentry *debug_root;
88};
89
90/**
91 * ion_handle - a client local reference to a buffer
92 * @ref: reference count
93 * @client: back pointer to the client the buffer resides in
94 * @buffer: pointer to the buffer
95 * @node: node in the client's handle rbtree
96 * @kmap_cnt: count of times this client has mapped to kernel
97 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070098 *
99 * Modifications to node, map_cnt or mapping should be protected by the
100 * lock in the client. Other fields are never changed after initialization.
101 */
102struct ion_handle {
103 struct kref ref;
104 struct ion_client *client;
105 struct ion_buffer *buffer;
106 struct rb_node node;
107 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700108 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700109};
110
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700111bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
112{
113 return ((buffer->flags & ION_FLAG_CACHED) &&
114 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
115}
116
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700117bool ion_buffer_cached(struct ion_buffer *buffer)
118{
119 return !!(buffer->flags & ION_FLAG_CACHED);
120}
121
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700122/* this function should only be called while dev->lock is held */
123static void ion_buffer_add(struct ion_device *dev,
124 struct ion_buffer *buffer)
125{
126 struct rb_node **p = &dev->buffers.rb_node;
127 struct rb_node *parent = NULL;
128 struct ion_buffer *entry;
129
130 while (*p) {
131 parent = *p;
132 entry = rb_entry(parent, struct ion_buffer, node);
133
134 if (buffer < entry) {
135 p = &(*p)->rb_left;
136 } else if (buffer > entry) {
137 p = &(*p)->rb_right;
138 } else {
139 pr_err("%s: buffer already found.", __func__);
140 BUG();
141 }
142 }
143
144 rb_link_node(&buffer->node, parent, p);
145 rb_insert_color(&buffer->node, &dev->buffers);
146}
147
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700148static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
149
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700150/* this function should only be called while dev->lock is held */
151static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
152 struct ion_device *dev,
153 unsigned long len,
154 unsigned long align,
155 unsigned long flags)
156{
157 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800158 struct sg_table *table;
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700159 struct scatterlist *sg;
160 int i, ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700161
162 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
163 if (!buffer)
164 return ERR_PTR(-ENOMEM);
165
166 buffer->heap = heap;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700167 buffer->flags = flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700168 kref_init(&buffer->ref);
169
170 ret = heap->ops->allocate(heap, buffer, len, align, flags);
171 if (ret) {
172 kfree(buffer);
173 return ERR_PTR(ret);
174 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800175
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700176 buffer->dev = dev;
177 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800178
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700179 table = heap->ops->map_dma(heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800180 if (IS_ERR_OR_NULL(table)) {
181 heap->ops->free(buffer);
182 kfree(buffer);
183 return ERR_PTR(PTR_ERR(table));
184 }
185 buffer->sg_table = table;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700186 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700187 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
188 i) {
189 if (sg_dma_len(sg) == PAGE_SIZE)
190 continue;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700191 pr_err("%s: cached mappings that will be faulted in "
192 "must have pagewise sg_lists\n", __func__);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700193 ret = -EINVAL;
194 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700195 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800196
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700197 ret = ion_buffer_alloc_dirty(buffer);
198 if (ret)
199 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700200 }
201
202 buffer->dev = dev;
203 buffer->size = len;
204 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700205 mutex_init(&buffer->lock);
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700206 /* this will set up dma addresses for the sglist -- it is not
207 technically correct as per the dma api -- a specific
208 device isn't really taking ownership here. However, in practice on
209 our systems the only dma_address space is physical addresses.
210 Additionally, we can't afford the overhead of invalidating every
211 allocation via dma_map_sg. The implicit contract here is that
212 memory comming from the heaps is ready for dma, ie if it has a
213 cached mapping that mapping has been invalidated */
Laura Abbott6c13b9822013-03-29 18:29:03 -0700214 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
215 if (sg_dma_address(sg) == 0)
216 sg_dma_address(sg) = sg_phys(sg);
217 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700218 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700219 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700220 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700221 return buffer;
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700222
223err:
224 heap->ops->unmap_dma(heap, buffer);
225 heap->ops->free(buffer);
226 kfree(buffer);
227 return ERR_PTR(ret);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700228}
229
Laura Abbott93619302012-10-11 11:51:40 -0700230static void ion_delayed_unsecure(struct ion_buffer *buffer)
231{
232 if (buffer->heap->ops->unsecure_buffer)
233 buffer->heap->ops->unsecure_buffer(buffer, 1);
234}
235
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700236static void ion_buffer_destroy(struct kref *kref)
237{
238 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
239 struct ion_device *dev = buffer->dev;
240
Laura Abbottb14ed962012-01-30 14:18:08 -0800241 if (WARN_ON(buffer->kmap_cnt > 0))
242 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800243 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
244
Laura Abbott93619302012-10-11 11:51:40 -0700245 ion_delayed_unsecure(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700246 buffer->heap->ops->free(buffer);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700247 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700248 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700249 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700250 if (buffer->flags & ION_FLAG_CACHED)
251 kfree(buffer->dirty);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700252 kfree(buffer);
253}
254
255static void ion_buffer_get(struct ion_buffer *buffer)
256{
257 kref_get(&buffer->ref);
258}
259
260static int ion_buffer_put(struct ion_buffer *buffer)
261{
262 return kref_put(&buffer->ref, ion_buffer_destroy);
263}
264
265static struct ion_handle *ion_handle_create(struct ion_client *client,
266 struct ion_buffer *buffer)
267{
268 struct ion_handle *handle;
269
270 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
271 if (!handle)
272 return ERR_PTR(-ENOMEM);
273 kref_init(&handle->ref);
274 rb_init_node(&handle->node);
275 handle->client = client;
276 ion_buffer_get(buffer);
277 handle->buffer = buffer;
278
279 return handle;
280}
281
Laura Abbottb14ed962012-01-30 14:18:08 -0800282static void ion_handle_kmap_put(struct ion_handle *);
283
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700284static void ion_handle_destroy(struct kref *kref)
285{
286 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800287 struct ion_client *client = handle->client;
288 struct ion_buffer *buffer = handle->buffer;
289
Laura Abbottb14ed962012-01-30 14:18:08 -0800290 mutex_lock(&buffer->lock);
291 while (handle->kmap_cnt)
292 ion_handle_kmap_put(handle);
293 mutex_unlock(&buffer->lock);
294
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700295 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800296 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800297
298 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700299 kfree(handle);
300}
301
302struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
303{
304 return handle->buffer;
305}
306
307static void ion_handle_get(struct ion_handle *handle)
308{
309 kref_get(&handle->ref);
310}
311
312static int ion_handle_put(struct ion_handle *handle)
313{
314 return kref_put(&handle->ref, ion_handle_destroy);
315}
316
317static struct ion_handle *ion_handle_lookup(struct ion_client *client,
318 struct ion_buffer *buffer)
319{
320 struct rb_node *n;
321
322 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
323 struct ion_handle *handle = rb_entry(n, struct ion_handle,
324 node);
325 if (handle->buffer == buffer)
326 return handle;
327 }
328 return NULL;
329}
330
331static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
332{
333 struct rb_node *n = client->handles.rb_node;
334
335 while (n) {
336 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
337 node);
338 if (handle < handle_node)
339 n = n->rb_left;
340 else if (handle > handle_node)
341 n = n->rb_right;
342 else
343 return true;
344 }
345 return false;
346}
347
348static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
349{
350 struct rb_node **p = &client->handles.rb_node;
351 struct rb_node *parent = NULL;
352 struct ion_handle *entry;
353
354 while (*p) {
355 parent = *p;
356 entry = rb_entry(parent, struct ion_handle, node);
357
358 if (handle < entry)
359 p = &(*p)->rb_left;
360 else if (handle > entry)
361 p = &(*p)->rb_right;
362 else
363 WARN(1, "%s: buffer already found.", __func__);
364 }
365
366 rb_link_node(&handle->node, parent, p);
367 rb_insert_color(&handle->node, &client->handles);
368}
369
370struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800371 size_t align, unsigned int heap_id_mask,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700372 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700373{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700374 struct ion_handle *handle;
375 struct ion_device *dev = client->dev;
376 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -0800377 struct ion_heap *heap;
Adrian Alexei92538592013-03-27 10:53:43 -0700378 unsigned long secure_allocation = flags & ION_FLAG_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800379 const unsigned int MAX_DBG_STR_LEN = 64;
380 char dbg_str[MAX_DBG_STR_LEN];
381 unsigned int dbg_str_idx = 0;
382
383 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700384
Mitchel Humpherys485650f2013-03-15 16:06:07 -0700385 /*
386 * For now, we don't want to fault in pages individually since
387 * clients are already doing manual cache maintenance. In
388 * other words, the implicit caching infrastructure is in
389 * place (in code) but should not be used.
390 */
391 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
392
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800393 pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
394 len, align, heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700395 /*
396 * traverse the list of heaps available in this system in priority
397 * order. If the heap type is supported by the client, and matches the
398 * request of the caller allocate from it. Repeat until allocate has
399 * succeeded or all heaps have been tried
400 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800401 if (WARN_ON(!len))
402 return ERR_PTR(-EINVAL);
403
404 len = PAGE_ALIGN(len);
405
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700406 down_read(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -0800407 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800408 /* if the caller didn't specify this heap id */
409 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700410 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800411 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700412 if (secure_allocation &&
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800413 !ion_heap_allow_secure_allocation(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -0800414 continue;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800415 trace_ion_alloc_buffer_start(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800416 heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700417 buffer = ion_buffer_create(heap, dev, len, align, flags);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800418 trace_ion_alloc_buffer_end(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800419 heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700420 if (!IS_ERR_OR_NULL(buffer))
421 break;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800422
423 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800424 heap_id_mask, flags,
425 PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800426 if (dbg_str_idx < MAX_DBG_STR_LEN) {
427 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
428 int ret_value = snprintf(&dbg_str[dbg_str_idx],
429 len_left, "%s ", heap->name);
430 if (ret_value >= len_left) {
431 /* overflow */
432 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
433 dbg_str_idx = MAX_DBG_STR_LEN;
434 } else if (ret_value >= 0) {
435 dbg_str_idx += ret_value;
436 } else {
437 /* error */
438 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
439 }
440 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700441 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700442 up_read(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700443
Liam Markcc2d4bd2013-01-16 10:14:40 -0800444 if (buffer == NULL) {
445 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800446 heap_id_mask, flags, -ENODEV);
Laura Abbottb14ed962012-01-30 14:18:08 -0800447 return ERR_PTR(-ENODEV);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800448 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800449
450 if (IS_ERR(buffer)) {
Liam Markcc2d4bd2013-01-16 10:14:40 -0800451 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800452 heap_id_mask, flags,
453 PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800454 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800455 "0x%x) from heap(s) %sfor client %s\n",
456 len, align, dbg_str, client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700457 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800458 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700459
460 handle = ion_handle_create(client, buffer);
461
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700462 /*
463 * ion_buffer_create will create a buffer with a ref_cnt of 1,
464 * and ion_handle_create will take a second reference, drop one here
465 */
466 ion_buffer_put(buffer);
467
Laura Abbottb14ed962012-01-30 14:18:08 -0800468 if (!IS_ERR(handle)) {
469 mutex_lock(&client->lock);
470 ion_handle_add(client, handle);
471 mutex_unlock(&client->lock);
472 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700473
Laura Abbottb14ed962012-01-30 14:18:08 -0800474
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700475 return handle;
476}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800477EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700478
479void ion_free(struct ion_client *client, struct ion_handle *handle)
480{
481 bool valid_handle;
482
483 BUG_ON(client != handle->client);
484
485 mutex_lock(&client->lock);
486 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700487 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800488 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700489 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700490 return;
491 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800492 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700493 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700494}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800495EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700496
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700497int ion_phys(struct ion_client *client, struct ion_handle *handle,
498 ion_phys_addr_t *addr, size_t *len)
499{
500 struct ion_buffer *buffer;
501 int ret;
502
503 mutex_lock(&client->lock);
504 if (!ion_handle_validate(client, handle)) {
505 mutex_unlock(&client->lock);
506 return -EINVAL;
507 }
508
509 buffer = handle->buffer;
510
511 if (!buffer->heap->ops->phys) {
512 pr_err("%s: ion_phys is not implemented by this heap.\n",
513 __func__);
514 mutex_unlock(&client->lock);
515 return -ENODEV;
516 }
517 mutex_unlock(&client->lock);
518 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
519 return ret;
520}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800521EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700522
Laura Abbottb14ed962012-01-30 14:18:08 -0800523static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700524{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700525 void *vaddr;
526
Laura Abbottb14ed962012-01-30 14:18:08 -0800527 if (buffer->kmap_cnt) {
528 buffer->kmap_cnt++;
529 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700530 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800531 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
532 if (IS_ERR_OR_NULL(vaddr))
533 return vaddr;
534 buffer->vaddr = vaddr;
535 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700536 return vaddr;
537}
Laura Abbottb14ed962012-01-30 14:18:08 -0800538
539static void *ion_handle_kmap_get(struct ion_handle *handle)
540{
541 struct ion_buffer *buffer = handle->buffer;
542 void *vaddr;
543
544 if (handle->kmap_cnt) {
545 handle->kmap_cnt++;
546 return buffer->vaddr;
547 }
548 vaddr = ion_buffer_kmap_get(buffer);
549 if (IS_ERR_OR_NULL(vaddr))
550 return vaddr;
551 handle->kmap_cnt++;
552 return vaddr;
553}
554
555static void ion_buffer_kmap_put(struct ion_buffer *buffer)
556{
557 buffer->kmap_cnt--;
558 if (!buffer->kmap_cnt) {
559 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
560 buffer->vaddr = NULL;
561 }
562}
563
564static void ion_handle_kmap_put(struct ion_handle *handle)
565{
566 struct ion_buffer *buffer = handle->buffer;
567
568 handle->kmap_cnt--;
569 if (!handle->kmap_cnt)
570 ion_buffer_kmap_put(buffer);
571}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700572
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700573void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700574{
575 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800576 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700577
578 mutex_lock(&client->lock);
579 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800580 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700581 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700582 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700583 return ERR_PTR(-EINVAL);
584 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700585
Laura Abbottb14ed962012-01-30 14:18:08 -0800586 buffer = handle->buffer;
587
588 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700589 pr_err("%s: map_kernel is not implemented by this heap.\n",
590 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700591 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700592 return ERR_PTR(-ENODEV);
593 }
Laura Abbott894fd582011-08-19 13:33:56 -0700594
Laura Abbottb14ed962012-01-30 14:18:08 -0800595 mutex_lock(&buffer->lock);
596 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700597 mutex_unlock(&buffer->lock);
598 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800599 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700600}
Olav Hauganbd453a92012-07-05 14:21:34 -0700601EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700602
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700603void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
604{
605 struct ion_buffer *buffer;
606
607 mutex_lock(&client->lock);
608 buffer = handle->buffer;
609 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800610 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700611 mutex_unlock(&buffer->lock);
612 mutex_unlock(&client->lock);
613}
Olav Hauganbd453a92012-07-05 14:21:34 -0700614EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700615
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700616static int ion_debug_client_show(struct seq_file *s, void *unused)
617{
618 struct ion_client *client = s->private;
619 struct rb_node *n;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700620
Olav Haugan854c9e12012-05-16 16:34:28 -0700621 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
622 "heap_name", "size_in_bytes", "handle refcount",
623 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700624
625 mutex_lock(&client->lock);
626 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
627 struct ion_handle *handle = rb_entry(n, struct ion_handle,
628 node);
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800629
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700630 enum ion_heap_type type = handle->buffer->heap->type;
631
Olav Haugan854c9e12012-05-16 16:34:28 -0700632 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700633 handle->buffer->heap->name,
634 handle->buffer->size,
635 atomic_read(&handle->ref.refcount),
636 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700637
638 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
639 type == ION_HEAP_TYPE_CARVEOUT ||
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700640 type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
Laura Abbott1135c9e2013-03-13 15:33:40 -0700641 seq_printf(s, " : %12pa", &handle->buffer->priv_phys);
Olav Haugan854c9e12012-05-16 16:34:28 -0700642 else
643 seq_printf(s, " : %12s", "N/A");
644
Olav Haugan854c9e12012-05-16 16:34:28 -0700645 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700646 }
647 mutex_unlock(&client->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700648 return 0;
649}
650
651static int ion_debug_client_open(struct inode *inode, struct file *file)
652{
653 return single_open(file, ion_debug_client_show, inode->i_private);
654}
655
656static const struct file_operations debug_client_fops = {
657 .open = ion_debug_client_open,
658 .read = seq_read,
659 .llseek = seq_lseek,
660 .release = single_release,
661};
662
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700663struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700664 const char *name)
665{
666 struct ion_client *client;
667 struct task_struct *task;
668 struct rb_node **p;
669 struct rb_node *parent = NULL;
670 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700671 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700672 unsigned int name_len;
673
674 if (!name) {
675 pr_err("%s: Name cannot be null\n", __func__);
676 return ERR_PTR(-EINVAL);
677 }
678 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700679
680 get_task_struct(current->group_leader);
681 task_lock(current->group_leader);
682 pid = task_pid_nr(current->group_leader);
683 /* don't bother to store task struct for kernel threads,
684 they can't be killed anyway */
685 if (current->group_leader->flags & PF_KTHREAD) {
686 put_task_struct(current->group_leader);
687 task = NULL;
688 } else {
689 task = current->group_leader;
690 }
691 task_unlock(current->group_leader);
692
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700693 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
694 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800695 if (task)
696 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700697 return ERR_PTR(-ENOMEM);
698 }
699
700 client->dev = dev;
701 client->handles = RB_ROOT;
702 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800703
Olav Haugan6625c7d12012-01-24 13:50:43 -0800704 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800705 if (!client->name) {
706 put_task_struct(current->group_leader);
707 kfree(client);
708 return ERR_PTR(-ENOMEM);
709 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -0800710 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800711 }
712
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700713 client->task = task;
714 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700715
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700716 down_write(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800717 p = &dev->clients.rb_node;
718 while (*p) {
719 parent = *p;
720 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700721
Laura Abbottb14ed962012-01-30 14:18:08 -0800722 if (client < entry)
723 p = &(*p)->rb_left;
724 else if (client > entry)
725 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700726 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800727 rb_link_node(&client->node, parent, p);
728 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700729
Laura Abbotteed86032011-12-05 15:32:36 -0800730
731 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700732 dev->debug_root, client,
733 &debug_client_fops);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700734 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700735
736 return client;
737}
Johan Mossberg73080182012-12-10 17:46:16 +0100738EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700739
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800740/**
741 * ion_mark_dangling_buffers_locked() - Mark dangling buffers
742 * @dev: the ion device whose buffers will be searched
743 *
744 * Sets marked=1 for all known buffers associated with `dev' that no
745 * longer have a handle pointing to them. dev->lock should be held
746 * across a call to this function (and should only be unlocked after
747 * checking for marked buffers).
748 */
749static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
750{
751 struct rb_node *n, *n2;
752 /* mark all buffers as 1 */
753 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
754 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
755 node);
756
757 buf->marked = 1;
758 }
759
760 /* now see which buffers we can access */
761 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
762 struct ion_client *client = rb_entry(n, struct ion_client,
763 node);
764
765 mutex_lock(&client->lock);
766 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
767 struct ion_handle *handle
768 = rb_entry(n2, struct ion_handle, node);
769
770 handle->buffer->marked = 0;
771
772 }
773 mutex_unlock(&client->lock);
774
775 }
776}
777
778#ifdef CONFIG_ION_LEAK_CHECK
779static u32 ion_debug_check_leaks_on_destroy;
780
781static int ion_check_for_and_print_leaks(struct ion_device *dev)
782{
783 struct rb_node *n;
784 int num_leaks = 0;
785
786 if (!ion_debug_check_leaks_on_destroy)
787 return 0;
788
789 /* check for leaked buffers (those that no longer have a
790 * handle pointing to them) */
791 ion_mark_dangling_buffers_locked(dev);
792
793 /* Anyone still marked as a 1 means a leaked handle somewhere */
794 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
795 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
796 node);
797
798 if (buf->marked == 1) {
799 pr_info("Leaked ion buffer at %p\n", buf);
800 num_leaks++;
801 }
802 }
803 return num_leaks;
804}
805static void setup_ion_leak_check(struct dentry *debug_root)
806{
807 debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
808 &ion_debug_check_leaks_on_destroy);
809}
810#else
811static int ion_check_for_and_print_leaks(struct ion_device *dev)
812{
813 return 0;
814}
815static void setup_ion_leak_check(struct dentry *debug_root)
816{
817}
818#endif
819
Laura Abbottb14ed962012-01-30 14:18:08 -0800820void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700821{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700822 struct ion_device *dev = client->dev;
823 struct rb_node *n;
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800824 int num_leaks;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700825
826 pr_debug("%s: %d\n", __func__, __LINE__);
827 while ((n = rb_first(&client->handles))) {
828 struct ion_handle *handle = rb_entry(n, struct ion_handle,
829 node);
830 ion_handle_destroy(&handle->ref);
831 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700832 down_write(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800833 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700834 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -0800835 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700836 debugfs_remove_recursive(client->debug_root);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800837
838 num_leaks = ion_check_for_and_print_leaks(dev);
839
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700840 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700841
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800842 if (num_leaks) {
843 struct task_struct *current_task = current;
844 char current_task_name[TASK_COMM_LEN];
845 get_task_comm(current_task_name, current_task);
846 WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
847 __func__, num_leaks, num_leaks == 1 ? "" : "s");
848 pr_info("task name at time of leak: %s, pid: %d\n",
849 current_task_name, current_task->pid);
850 }
851
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800852 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700853 kfree(client);
854}
Olav Hauganbd453a92012-07-05 14:21:34 -0700855EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700856
Laura Abbott273dd8e2011-10-12 14:26:33 -0700857int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
858 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700859{
860 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700861
862 mutex_lock(&client->lock);
863 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -0700864 pr_err("%s: invalid handle passed to %s.\n",
865 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700866 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800867 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700868 }
Laura Abbott273dd8e2011-10-12 14:26:33 -0700869 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700870 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700871 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700872 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700873 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800874
Laura Abbott273dd8e2011-10-12 14:26:33 -0700875 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700876}
Laura Abbott273dd8e2011-10-12 14:26:33 -0700877EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700878
Laura Abbott8c017362011-09-22 20:59:12 -0700879int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
880 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800881{
Laura Abbott8c017362011-09-22 20:59:12 -0700882 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800883
Laura Abbott8c017362011-09-22 20:59:12 -0700884 mutex_lock(&client->lock);
885 if (!ion_handle_validate(client, handle)) {
886 pr_err("%s: invalid handle passed to %s.\n",
887 __func__, __func__);
888 mutex_unlock(&client->lock);
889 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700890 }
Laura Abbott8c017362011-09-22 20:59:12 -0700891 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700892 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700893 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700894 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700895 mutex_unlock(&client->lock);
896
897 return 0;
898}
899EXPORT_SYMBOL(ion_handle_get_size);
900
Laura Abbottb14ed962012-01-30 14:18:08 -0800901struct sg_table *ion_sg_table(struct ion_client *client,
902 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700903{
Laura Abbottb14ed962012-01-30 14:18:08 -0800904 struct ion_buffer *buffer;
905 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700906
Laura Abbottb14ed962012-01-30 14:18:08 -0800907 mutex_lock(&client->lock);
908 if (!ion_handle_validate(client, handle)) {
909 pr_err("%s: invalid handle passed to map_dma.\n",
910 __func__);
911 mutex_unlock(&client->lock);
912 return ERR_PTR(-EINVAL);
913 }
914 buffer = handle->buffer;
915 table = buffer->sg_table;
916 mutex_unlock(&client->lock);
917 return table;
918}
Olav Hauganbd453a92012-07-05 14:21:34 -0700919EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -0800920
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800921struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
922 size_t chunk_size, size_t total_size)
923{
924 struct sg_table *table;
925 int i, n_chunks, ret;
926 struct scatterlist *sg;
927
928 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
929 if (!table)
930 return ERR_PTR(-ENOMEM);
931
932 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
933 pr_debug("creating sg_table with %d chunks\n", n_chunks);
934
935 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
936 if (ret)
937 goto err0;
938
939 for_each_sg(table->sgl, sg, table->nents, i) {
940 dma_addr_t addr = buffer_base + i * chunk_size;
941 sg_dma_address(sg) = addr;
Olav Hauganbbdc30a2013-03-30 06:48:35 -0700942 sg_dma_len(sg) = chunk_size;
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800943 }
944
945 return table;
946err0:
947 kfree(table);
948 return ERR_PTR(ret);
949}
950
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700951static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
952 struct device *dev,
953 enum dma_data_direction direction);
954
Laura Abbottb14ed962012-01-30 14:18:08 -0800955static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
956 enum dma_data_direction direction)
957{
958 struct dma_buf *dmabuf = attachment->dmabuf;
959 struct ion_buffer *buffer = dmabuf->priv;
960
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700961 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbottb14ed962012-01-30 14:18:08 -0800962 return buffer->sg_table;
963}
964
965static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
966 struct sg_table *table,
967 enum dma_data_direction direction)
968{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800969}
970
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700971static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
972{
973 unsigned long pages = buffer->sg_table->nents;
974 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
975
976 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
977 if (!buffer->dirty)
978 return -ENOMEM;
979 return 0;
980}
981
982struct ion_vma_list {
983 struct list_head list;
984 struct vm_area_struct *vma;
985};
986
987static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
988 struct device *dev,
989 enum dma_data_direction dir)
990{
991 struct scatterlist *sg;
992 int i;
993 struct ion_vma_list *vma_list;
994
995 pr_debug("%s: syncing for device %s\n", __func__,
996 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700997
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700998 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700999 return;
1000
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001001 mutex_lock(&buffer->lock);
1002 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1003 if (!test_bit(i, buffer->dirty))
1004 continue;
1005 dma_sync_sg_for_device(dev, sg, 1, dir);
1006 clear_bit(i, buffer->dirty);
1007 }
1008 list_for_each_entry(vma_list, &buffer->vmas, list) {
1009 struct vm_area_struct *vma = vma_list->vma;
1010
1011 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1012 NULL);
1013 }
1014 mutex_unlock(&buffer->lock);
1015}
1016
1017int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001018{
Laura Abbottb14ed962012-01-30 14:18:08 -08001019 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001020 struct scatterlist *sg;
1021 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001022
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001023 mutex_lock(&buffer->lock);
1024 set_bit(vmf->pgoff, buffer->dirty);
1025
1026 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1027 if (i != vmf->pgoff)
1028 continue;
1029 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
1030 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
1031 sg_page(sg));
1032 break;
1033 }
1034 mutex_unlock(&buffer->lock);
1035 return VM_FAULT_NOPAGE;
1036}
1037
1038static void ion_vm_open(struct vm_area_struct *vma)
1039{
1040 struct ion_buffer *buffer = vma->vm_private_data;
1041 struct ion_vma_list *vma_list;
1042
1043 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1044 if (!vma_list)
1045 return;
1046 vma_list->vma = vma;
1047 mutex_lock(&buffer->lock);
1048 list_add(&vma_list->list, &buffer->vmas);
1049 mutex_unlock(&buffer->lock);
1050 pr_debug("%s: adding %p\n", __func__, vma);
1051}
1052
1053static void ion_vm_close(struct vm_area_struct *vma)
1054{
1055 struct ion_buffer *buffer = vma->vm_private_data;
1056 struct ion_vma_list *vma_list, *tmp;
1057
1058 pr_debug("%s\n", __func__);
1059 mutex_lock(&buffer->lock);
1060 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1061 if (vma_list->vma != vma)
1062 continue;
1063 list_del(&vma_list->list);
1064 kfree(vma_list);
1065 pr_debug("%s: deleting %p\n", __func__, vma);
1066 break;
1067 }
1068 mutex_unlock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001069
Laura Abbotta6835092011-11-14 15:27:02 -08001070 if (buffer->heap->ops->unmap_user)
1071 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001072}
1073
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001074struct vm_operations_struct ion_vma_ops = {
1075 .open = ion_vm_open,
1076 .close = ion_vm_close,
1077 .fault = ion_vm_fault,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001078};
1079
Laura Abbottb14ed962012-01-30 14:18:08 -08001080static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001081{
Laura Abbottb14ed962012-01-30 14:18:08 -08001082 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001083 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001084
Laura Abbottb14ed962012-01-30 14:18:08 -08001085 if (!buffer->heap->ops->map_user) {
1086 pr_err("%s: this heap does not define a method for mapping "
1087 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001088 return -EINVAL;
1089 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001090
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001091 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001092 vma->vm_private_data = buffer;
1093 vma->vm_ops = &ion_vma_ops;
Mitchel Humpherys7ce0fe42013-01-10 11:30:26 -08001094 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001095 ion_vm_open(vma);
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001096 return 0;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001097 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001098
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001099 if (!(buffer->flags & ION_FLAG_CACHED))
1100 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1101
1102 mutex_lock(&buffer->lock);
1103 /* now map it to userspace */
1104 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1105 mutex_unlock(&buffer->lock);
1106
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001107 if (ret)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001108 pr_err("%s: failure mapping buffer to userspace\n",
1109 __func__);
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001110
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001111 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001112}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001113
Laura Abbottb14ed962012-01-30 14:18:08 -08001114static void ion_dma_buf_release(struct dma_buf *dmabuf)
1115{
1116 struct ion_buffer *buffer = dmabuf->priv;
1117 ion_buffer_put(buffer);
1118}
1119
1120static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1121{
1122 struct ion_buffer *buffer = dmabuf->priv;
1123 return buffer->vaddr + offset;
1124}
1125
1126static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1127 void *ptr)
1128{
1129 return;
1130}
1131
1132static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1133 size_t len,
1134 enum dma_data_direction direction)
1135{
1136 struct ion_buffer *buffer = dmabuf->priv;
1137 void *vaddr;
1138
1139 if (!buffer->heap->ops->map_kernel) {
1140 pr_err("%s: map kernel is not implemented by this heap.\n",
1141 __func__);
1142 return -ENODEV;
1143 }
1144
1145 mutex_lock(&buffer->lock);
1146 vaddr = ion_buffer_kmap_get(buffer);
1147 mutex_unlock(&buffer->lock);
1148 if (IS_ERR(vaddr))
1149 return PTR_ERR(vaddr);
1150 if (!vaddr)
1151 return -ENOMEM;
1152 return 0;
1153}
1154
1155static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1156 size_t len,
1157 enum dma_data_direction direction)
1158{
1159 struct ion_buffer *buffer = dmabuf->priv;
1160
1161 mutex_lock(&buffer->lock);
1162 ion_buffer_kmap_put(buffer);
1163 mutex_unlock(&buffer->lock);
1164}
1165
1166struct dma_buf_ops dma_buf_ops = {
1167 .map_dma_buf = ion_map_dma_buf,
1168 .unmap_dma_buf = ion_unmap_dma_buf,
1169 .mmap = ion_mmap,
1170 .release = ion_dma_buf_release,
1171 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1172 .end_cpu_access = ion_dma_buf_end_cpu_access,
1173 .kmap_atomic = ion_dma_buf_kmap,
1174 .kunmap_atomic = ion_dma_buf_kunmap,
1175 .kmap = ion_dma_buf_kmap,
1176 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001177};
1178
Johan Mossberg748c11d2013-01-11 13:38:13 +01001179struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1180 struct ion_handle *handle)
Laura Abbottb14ed962012-01-30 14:18:08 -08001181{
1182 struct ion_buffer *buffer;
1183 struct dma_buf *dmabuf;
1184 bool valid_handle;
Laura Abbottb14ed962012-01-30 14:18:08 -08001185
1186 mutex_lock(&client->lock);
1187 valid_handle = ion_handle_validate(client, handle);
1188 mutex_unlock(&client->lock);
1189 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001190 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Johan Mossberg748c11d2013-01-11 13:38:13 +01001191 return ERR_PTR(-EINVAL);
Laura Abbottb14ed962012-01-30 14:18:08 -08001192 }
1193
1194 buffer = handle->buffer;
1195 ion_buffer_get(buffer);
1196 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1197 if (IS_ERR(dmabuf)) {
1198 ion_buffer_put(buffer);
Johan Mossberg748c11d2013-01-11 13:38:13 +01001199 return dmabuf;
Laura Abbottb14ed962012-01-30 14:18:08 -08001200 }
Johan Mossberg748c11d2013-01-11 13:38:13 +01001201
1202 return dmabuf;
1203}
1204EXPORT_SYMBOL(ion_share_dma_buf);
1205
1206int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1207{
1208 struct dma_buf *dmabuf;
1209 int fd;
1210
1211 dmabuf = ion_share_dma_buf(client, handle);
1212 if (IS_ERR(dmabuf))
1213 return PTR_ERR(dmabuf);
1214
Laura Abbottb14ed962012-01-30 14:18:08 -08001215 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001216 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001217 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001218
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001219 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001220}
Johan Mossberg748c11d2013-01-11 13:38:13 +01001221EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001222
Laura Abbottb14ed962012-01-30 14:18:08 -08001223struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1224{
1225 struct dma_buf *dmabuf;
1226 struct ion_buffer *buffer;
1227 struct ion_handle *handle;
1228
1229 dmabuf = dma_buf_get(fd);
1230 if (IS_ERR_OR_NULL(dmabuf))
1231 return ERR_PTR(PTR_ERR(dmabuf));
1232 /* if this memory came from ion */
1233
1234 if (dmabuf->ops != &dma_buf_ops) {
1235 pr_err("%s: can not import dmabuf from another exporter\n",
1236 __func__);
1237 dma_buf_put(dmabuf);
1238 return ERR_PTR(-EINVAL);
1239 }
1240 buffer = dmabuf->priv;
1241
1242 mutex_lock(&client->lock);
1243 /* if a handle exists for this buffer just take a reference to it */
1244 handle = ion_handle_lookup(client, buffer);
1245 if (!IS_ERR_OR_NULL(handle)) {
1246 ion_handle_get(handle);
1247 goto end;
1248 }
1249 handle = ion_handle_create(client, buffer);
1250 if (IS_ERR_OR_NULL(handle))
1251 goto end;
1252 ion_handle_add(client, handle);
1253end:
1254 mutex_unlock(&client->lock);
1255 dma_buf_put(dmabuf);
1256 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001257}
Olav Hauganbd453a92012-07-05 14:21:34 -07001258EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001259
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001260static int ion_sync_for_device(struct ion_client *client, int fd)
1261{
1262 struct dma_buf *dmabuf;
1263 struct ion_buffer *buffer;
1264
1265 dmabuf = dma_buf_get(fd);
1266 if (IS_ERR_OR_NULL(dmabuf))
1267 return PTR_ERR(dmabuf);
1268
1269 /* if this memory came from ion */
1270 if (dmabuf->ops != &dma_buf_ops) {
1271 pr_err("%s: can not sync dmabuf from another exporter\n",
1272 __func__);
1273 dma_buf_put(dmabuf);
1274 return -EINVAL;
1275 }
1276 buffer = dmabuf->priv;
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001277
1278 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1279 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001280 dma_buf_put(dmabuf);
1281 return 0;
1282}
1283
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001284static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1285{
1286 struct ion_client *client = filp->private_data;
1287
1288 switch (cmd) {
1289 case ION_IOC_ALLOC:
1290 {
1291 struct ion_allocation_data data;
1292
1293 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1294 return -EFAULT;
1295 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh7d72bad2012-08-29 18:39:44 -07001296 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001297
Laura Abbottb14ed962012-01-30 14:18:08 -08001298 if (IS_ERR(data.handle))
1299 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001300
Laura Abbottb14ed962012-01-30 14:18:08 -08001301 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1302 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001303 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001304 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001305 break;
1306 }
1307 case ION_IOC_FREE:
1308 {
1309 struct ion_handle_data data;
1310 bool valid;
1311
1312 if (copy_from_user(&data, (void __user *)arg,
1313 sizeof(struct ion_handle_data)))
1314 return -EFAULT;
1315 mutex_lock(&client->lock);
1316 valid = ion_handle_validate(client, data.handle);
1317 mutex_unlock(&client->lock);
1318 if (!valid)
1319 return -EINVAL;
1320 ion_free(client, data.handle);
1321 break;
1322 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001323 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001324 case ION_IOC_SHARE:
1325 {
1326 struct ion_fd_data data;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001327 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1328 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001329
Johan Mossberg748c11d2013-01-11 13:38:13 +01001330 data.fd = ion_share_dma_buf_fd(client, data.handle);
1331
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001332 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1333 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001334 if (data.fd < 0)
1335 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001336 break;
1337 }
1338 case ION_IOC_IMPORT:
1339 {
1340 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001341 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001342 if (copy_from_user(&data, (void __user *)arg,
1343 sizeof(struct ion_fd_data)))
1344 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001345 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan865e97f2012-05-15 14:40:11 -07001346 if (IS_ERR(data.handle)) {
1347 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001348 data.handle = NULL;
Olav Haugan865e97f2012-05-15 14:40:11 -07001349 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001350 if (copy_to_user((void __user *)arg, &data,
1351 sizeof(struct ion_fd_data)))
1352 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001353 if (ret < 0)
1354 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001355 break;
1356 }
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001357 case ION_IOC_SYNC:
1358 {
1359 struct ion_fd_data data;
1360 if (copy_from_user(&data, (void __user *)arg,
1361 sizeof(struct ion_fd_data)))
1362 return -EFAULT;
1363 ion_sync_for_device(client, data.fd);
1364 break;
1365 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001366 case ION_IOC_CUSTOM:
1367 {
1368 struct ion_device *dev = client->dev;
1369 struct ion_custom_data data;
1370
1371 if (!dev->custom_ioctl)
1372 return -ENOTTY;
1373 if (copy_from_user(&data, (void __user *)arg,
1374 sizeof(struct ion_custom_data)))
1375 return -EFAULT;
1376 return dev->custom_ioctl(client, data.cmd, data.arg);
1377 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001378 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001379 return client->dev->custom_ioctl(client,
1380 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001381 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001382 return client->dev->custom_ioctl(client,
1383 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001384 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001385 return client->dev->custom_ioctl(client,
1386 ION_IOC_CLEAN_INV_CACHES, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001387 default:
1388 return -ENOTTY;
1389 }
1390 return 0;
1391}
1392
1393static int ion_release(struct inode *inode, struct file *file)
1394{
1395 struct ion_client *client = file->private_data;
1396
1397 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001398 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001399 return 0;
1400}
1401
1402static int ion_open(struct inode *inode, struct file *file)
1403{
1404 struct miscdevice *miscdev = file->private_data;
1405 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1406 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001407 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001408
1409 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001410 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
Rebecca Schultz Zavin75aec5b2012-12-11 15:23:14 -08001411 client = ion_client_create(dev, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001412 if (IS_ERR_OR_NULL(client))
1413 return PTR_ERR(client);
1414 file->private_data = client;
1415
1416 return 0;
1417}
1418
1419static const struct file_operations ion_fops = {
1420 .owner = THIS_MODULE,
1421 .open = ion_open,
1422 .release = ion_release,
1423 .unlocked_ioctl = ion_ioctl,
1424};
1425
1426static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001427 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001428{
1429 size_t size = 0;
1430 struct rb_node *n;
1431
1432 mutex_lock(&client->lock);
1433 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1434 struct ion_handle *handle = rb_entry(n,
1435 struct ion_handle,
1436 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001437 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001438 size += handle->buffer->size;
1439 }
1440 mutex_unlock(&client->lock);
1441 return size;
1442}
1443
Olav Haugan0671b9a2012-05-25 11:58:56 -07001444/**
1445 * Searches through a clients handles to find if the buffer is owned
1446 * by this client. Used for debug output.
1447 * @param client pointer to candidate owner of buffer
1448 * @param buf pointer to buffer that we are trying to find the owner of
1449 * @return 1 if found, 0 otherwise
1450 */
1451static int ion_debug_find_buffer_owner(const struct ion_client *client,
1452 const struct ion_buffer *buf)
1453{
1454 struct rb_node *n;
1455
1456 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1457 const struct ion_handle *handle = rb_entry(n,
1458 const struct ion_handle,
1459 node);
1460 if (handle->buffer == buf)
1461 return 1;
1462 }
1463 return 0;
1464}
1465
1466/**
1467 * Adds mem_map_data pointer to the tree of mem_map
1468 * Used for debug output.
1469 * @param mem_map The mem_map tree
1470 * @param data The new data to add to the tree
1471 */
1472static void ion_debug_mem_map_add(struct rb_root *mem_map,
1473 struct mem_map_data *data)
1474{
1475 struct rb_node **p = &mem_map->rb_node;
1476 struct rb_node *parent = NULL;
1477 struct mem_map_data *entry;
1478
1479 while (*p) {
1480 parent = *p;
1481 entry = rb_entry(parent, struct mem_map_data, node);
1482
1483 if (data->addr < entry->addr) {
1484 p = &(*p)->rb_left;
1485 } else if (data->addr > entry->addr) {
1486 p = &(*p)->rb_right;
1487 } else {
1488 pr_err("%s: mem_map_data already found.", __func__);
1489 BUG();
1490 }
1491 }
1492 rb_link_node(&data->node, parent, p);
1493 rb_insert_color(&data->node, mem_map);
1494}
1495
1496/**
1497 * Search for an owner of a buffer by iterating over all ION clients.
1498 * @param dev ion device containing pointers to all the clients.
1499 * @param buffer pointer to buffer we are trying to find the owner of.
1500 * @return name of owner.
1501 */
1502const char *ion_debug_locate_owner(const struct ion_device *dev,
1503 const struct ion_buffer *buffer)
1504{
1505 struct rb_node *j;
1506 const char *client_name = NULL;
1507
Laura Abbottb14ed962012-01-30 14:18:08 -08001508 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001509 j = rb_next(j)) {
1510 struct ion_client *client = rb_entry(j, struct ion_client,
1511 node);
1512 if (ion_debug_find_buffer_owner(client, buffer))
1513 client_name = client->name;
1514 }
1515 return client_name;
1516}
1517
1518/**
1519 * Create a mem_map of the heap.
1520 * @param s seq_file to log error message to.
1521 * @param heap The heap to create mem_map for.
1522 * @param mem_map The mem map to be created.
1523 */
1524void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1525 struct rb_root *mem_map)
1526{
1527 struct ion_device *dev = heap->dev;
1528 struct rb_node *n;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301529 size_t size;
1530
1531 if (!heap->ops->phys)
1532 return;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001533
1534 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1535 struct ion_buffer *buffer =
1536 rb_entry(n, struct ion_buffer, node);
1537 if (buffer->heap->id == heap->id) {
1538 struct mem_map_data *data =
1539 kzalloc(sizeof(*data), GFP_KERNEL);
1540 if (!data) {
1541 seq_printf(s, "ERROR: out of memory. "
1542 "Part of memory map will not be logged\n");
1543 break;
1544 }
Chintan Pandyadaf75622013-01-29 19:40:01 +05301545
1546 buffer->heap->ops->phys(buffer->heap, buffer,
1547 &(data->addr), &size);
1548 data->size = (unsigned long) size;
1549 data->addr_end = data->addr + data->size - 1;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001550 data->client_name = ion_debug_locate_owner(dev, buffer);
1551 ion_debug_mem_map_add(mem_map, data);
1552 }
1553 }
1554}
1555
1556/**
1557 * Free the memory allocated by ion_debug_mem_map_create
1558 * @param mem_map The mem map to free.
1559 */
1560static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1561{
1562 if (mem_map) {
1563 struct rb_node *n;
1564 while ((n = rb_first(mem_map)) != 0) {
1565 struct mem_map_data *data =
1566 rb_entry(n, struct mem_map_data, node);
1567 rb_erase(&data->node, mem_map);
1568 kfree(data);
1569 }
1570 }
1571}
1572
1573/**
1574 * Print heap debug information.
1575 * @param s seq_file to log message to.
1576 * @param heap pointer to heap that we will print debug information for.
1577 */
1578static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1579{
1580 if (heap->ops->print_debug) {
1581 struct rb_root mem_map = RB_ROOT;
1582 ion_debug_mem_map_create(s, heap, &mem_map);
1583 heap->ops->print_debug(heap, s, &mem_map);
1584 ion_debug_mem_map_destroy(&mem_map);
1585 }
1586}
1587
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001588static int ion_debug_heap_show(struct seq_file *s, void *unused)
1589{
1590 struct ion_heap *heap = s->private;
1591 struct ion_device *dev = heap->dev;
1592 struct rb_node *n;
1593
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001594 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001595 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001596
Laura Abbottb14ed962012-01-30 14:18:08 -08001597 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001598 struct ion_client *client = rb_entry(n, struct ion_client,
1599 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001600 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001601 if (!size)
1602 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001603 if (client->task) {
1604 char task_comm[TASK_COMM_LEN];
1605
1606 get_task_comm(task_comm, client->task);
1607 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1608 client->pid, size);
1609 } else {
1610 seq_printf(s, "%16.s %16u %16u\n", client->name,
1611 client->pid, size);
1612 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001613 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001614 ion_heap_print_debug(s, heap);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001615 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001616 return 0;
1617}
1618
1619static int ion_debug_heap_open(struct inode *inode, struct file *file)
1620{
1621 return single_open(file, ion_debug_heap_show, inode->i_private);
1622}
1623
1624static const struct file_operations debug_heap_fops = {
1625 .open = ion_debug_heap_open,
1626 .read = seq_read,
1627 .llseek = seq_lseek,
1628 .release = single_release,
1629};
1630
1631void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1632{
Laura Abbottb14ed962012-01-30 14:18:08 -08001633 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1634 !heap->ops->unmap_dma)
1635 pr_err("%s: can not add heap with invalid ops struct.\n",
1636 __func__);
1637
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001638 heap->dev = dev;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001639 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001640 /* use negative heap->id to reverse the priority -- when traversing
1641 the list later attempt higher id numbers first */
1642 plist_node_init(&heap->node, -heap->id);
1643 plist_add(&heap->node, &dev->heaps);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001644 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1645 &debug_heap_fops);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001646 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001647}
1648
Laura Abbott93619302012-10-11 11:51:40 -07001649int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
1650 int version, void *data, int flags)
1651{
1652 int ret = -EINVAL;
1653 struct ion_heap *heap;
1654 struct ion_buffer *buffer;
1655
1656 mutex_lock(&client->lock);
1657 if (!ion_handle_validate(client, handle)) {
1658 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1659 goto out_unlock;
1660 }
1661
1662 buffer = handle->buffer;
1663 heap = buffer->heap;
1664
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001665 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001666 pr_err("%s: cannot secure buffer from non secure heap\n",
1667 __func__);
1668 goto out_unlock;
1669 }
1670
1671 BUG_ON(!buffer->heap->ops->secure_buffer);
1672 /*
1673 * Protect the handle via the client lock to ensure we aren't
1674 * racing with free
1675 */
1676 ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
1677
1678out_unlock:
1679 mutex_unlock(&client->lock);
1680 return ret;
1681}
1682
1683int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
1684{
1685 int ret = -EINVAL;
1686 struct ion_heap *heap;
1687 struct ion_buffer *buffer;
1688
1689 mutex_lock(&client->lock);
1690 if (!ion_handle_validate(client, handle)) {
1691 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1692 goto out_unlock;
1693 }
1694
1695 buffer = handle->buffer;
1696 heap = buffer->heap;
1697
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001698 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001699 pr_err("%s: cannot secure buffer from non secure heap\n",
1700 __func__);
1701 goto out_unlock;
1702 }
1703
1704 BUG_ON(!buffer->heap->ops->unsecure_buffer);
1705 /*
1706 * Protect the handle via the client lock to ensure we aren't
1707 * racing with free
1708 */
1709 ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
1710
1711out_unlock:
1712 mutex_unlock(&client->lock);
1713 return ret;
1714}
1715
Laura Abbott7e446482012-06-13 15:59:39 -07001716int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1717 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001718{
Olav Haugan0a852512012-01-09 10:20:55 -08001719 int ret_val = 0;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001720 struct ion_heap *heap;
Olav Haugan0a852512012-01-09 10:20:55 -08001721
1722 /*
1723 * traverse the list of heaps available in this system
1724 * and find the heap that is specified.
1725 */
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001726 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001727 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001728 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001729 continue;
1730 if (ION_HEAP(heap->id) != heap_id)
1731 continue;
1732 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001733 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001734 else
1735 ret_val = -EINVAL;
1736 break;
1737 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001738 up_write(&dev->lock);
Olav Haugan0a852512012-01-09 10:20:55 -08001739 return ret_val;
1740}
Olav Hauganbd453a92012-07-05 14:21:34 -07001741EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001742
Laura Abbott7e446482012-06-13 15:59:39 -07001743int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1744 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001745{
Olav Haugan0a852512012-01-09 10:20:55 -08001746 int ret_val = 0;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001747 struct ion_heap *heap;
Olav Haugan0a852512012-01-09 10:20:55 -08001748
1749 /*
1750 * traverse the list of heaps available in this system
1751 * and find the heap that is specified.
1752 */
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001753 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001754 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001755 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001756 continue;
1757 if (ION_HEAP(heap->id) != heap_id)
1758 continue;
1759 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001760 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001761 else
1762 ret_val = -EINVAL;
1763 break;
1764 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001765 up_write(&dev->lock);
Olav Haugan0a852512012-01-09 10:20:55 -08001766 return ret_val;
1767}
Olav Hauganbd453a92012-07-05 14:21:34 -07001768EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001769
Laura Abbott404f8242011-10-31 14:22:53 -07001770static int ion_debug_leak_show(struct seq_file *s, void *unused)
1771{
1772 struct ion_device *dev = s->private;
1773 struct rb_node *n;
Laura Abbott404f8242011-10-31 14:22:53 -07001774
Laura Abbott404f8242011-10-31 14:22:53 -07001775 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1776 "ref cnt");
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001777
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001778 ion_mark_dangling_buffers_locked(dev);
Laura Abbott404f8242011-10-31 14:22:53 -07001779
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001780 down_write(&dev->lock);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001781 /* Anyone still marked as a 1 means a leaked handle somewhere */
Laura Abbott404f8242011-10-31 14:22:53 -07001782 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1783 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1784 node);
1785
1786 if (buf->marked == 1)
1787 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1788 (int)buf, buf->heap->name, buf->size,
1789 atomic_read(&buf->ref.refcount));
1790 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001791 up_write(&dev->lock);
Laura Abbott404f8242011-10-31 14:22:53 -07001792 return 0;
1793}
1794
1795static int ion_debug_leak_open(struct inode *inode, struct file *file)
1796{
1797 return single_open(file, ion_debug_leak_show, inode->i_private);
1798}
1799
1800static const struct file_operations debug_leak_fops = {
1801 .open = ion_debug_leak_open,
1802 .read = seq_read,
1803 .llseek = seq_lseek,
1804 .release = single_release,
1805};
1806
1807
1808
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001809struct ion_device *ion_device_create(long (*custom_ioctl)
1810 (struct ion_client *client,
1811 unsigned int cmd,
1812 unsigned long arg))
1813{
1814 struct ion_device *idev;
1815 int ret;
1816
1817 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1818 if (!idev)
1819 return ERR_PTR(-ENOMEM);
1820
1821 idev->dev.minor = MISC_DYNAMIC_MINOR;
1822 idev->dev.name = "ion";
1823 idev->dev.fops = &ion_fops;
1824 idev->dev.parent = NULL;
1825 ret = misc_register(&idev->dev);
1826 if (ret) {
1827 pr_err("ion: failed to register misc device.\n");
1828 return ERR_PTR(ret);
1829 }
1830
1831 idev->debug_root = debugfs_create_dir("ion", NULL);
1832 if (IS_ERR_OR_NULL(idev->debug_root))
1833 pr_err("ion: failed to create debug files.\n");
1834
1835 idev->custom_ioctl = custom_ioctl;
1836 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001837 mutex_init(&idev->buffer_lock);
1838 init_rwsem(&idev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001839 plist_head_init(&idev->heaps);
Laura Abbottb14ed962012-01-30 14:18:08 -08001840 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001841 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1842 &debug_leak_fops);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08001843
1844 setup_ion_leak_check(idev->debug_root);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001845 return idev;
1846}
1847
1848void ion_device_destroy(struct ion_device *dev)
1849{
1850 misc_deregister(&dev->dev);
1851 /* XXX need to free the heaps and clients ? */
1852 kfree(dev);
1853}
Laura Abbottb14ed962012-01-30 14:18:08 -08001854
1855void __init ion_reserve(struct ion_platform_data *data)
1856{
Rebecca Schultz Zavinfbce1512012-11-15 10:31:02 -08001857 int i;
Laura Abbottb14ed962012-01-30 14:18:08 -08001858
1859 for (i = 0; i < data->nr; i++) {
1860 if (data->heaps[i].size == 0)
1861 continue;
Rebecca Schultz Zavinfbce1512012-11-15 10:31:02 -08001862
1863 if (data->heaps[i].base == 0) {
1864 phys_addr_t paddr;
1865 paddr = memblock_alloc_base(data->heaps[i].size,
1866 data->heaps[i].align,
1867 MEMBLOCK_ALLOC_ANYWHERE);
1868 if (!paddr) {
1869 pr_err("%s: error allocating memblock for "
1870 "heap %d\n",
1871 __func__, i);
1872 continue;
1873 }
1874 data->heaps[i].base = paddr;
1875 } else {
1876 int ret = memblock_reserve(data->heaps[i].base,
1877 data->heaps[i].size);
1878 if (ret)
1879 pr_err("memblock reserve of %x@%pa failed\n",
1880 data->heaps[i].size,
1881 &data->heaps[i].base);
1882 }
1883 pr_info("%s: %s reserved base %pa size %d\n", __func__,
1884 data->heaps[i].name,
1885 &data->heaps[i].base,
1886 data->heaps[i].size);
Laura Abbottb14ed962012-01-30 14:18:08 -08001887 }
1888}