blob: 514385c9d9604ddbf9e06cd777ea26f6406236b4 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -08002
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07003 * drivers/gpu/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
Liam Markcc2d4bd2013-01-16 10:14:40 -08006 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07007 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/device.h>
20#include <linux/file.h>
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -080021#include <linux/freezer.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070022#include <linux/fs.h>
23#include <linux/anon_inodes.h>
24#include <linux/ion.h>
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -080025#include <linux/kthread.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/list.h>
Mitchel Humpherysee0aa9c2013-11-15 22:56:04 -080027#include <linux/list_sort.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080028#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070029#include <linux/miscdevice.h>
Laura Abbottb629a822013-04-18 09:56:04 -070030#include <linux/export.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070031#include <linux/mm.h>
32#include <linux/mm_types.h>
33#include <linux/rbtree.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070034#include <linux/slab.h>
35#include <linux/seq_file.h>
36#include <linux/uaccess.h>
37#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080038#include <linux/dma-buf.h>
Colin Crossafde3d32013-08-20 22:59:41 -070039#include <linux/idr.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070040#include <linux/msm_ion.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080041#include <trace/events/kmem.h>
42
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070043
44#include "ion_priv.h"
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070045
46/**
47 * struct ion_device - the metadata of the ion device node
48 * @dev: the actual misc device
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -070049 * @buffers: an rb tree of all the existing buffers
50 * @buffer_lock: lock protecting the tree of buffers
51 * @lock: rwsem protecting the tree of heaps and clients
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070052 * @heaps: list of all the heaps in the system
53 * @user_clients: list of all the clients created from userspace
54 */
55struct ion_device {
56 struct miscdevice dev;
57 struct rb_root buffers;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -070058 struct mutex buffer_lock;
59 struct rw_semaphore lock;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -080060 struct plist_head heaps;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070061 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
62 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080063 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070064 struct dentry *debug_root;
Mitchel Humpherys1fb95df2013-08-29 15:28:58 -070065 struct dentry *heaps_debug_root;
66 struct dentry *clients_debug_root;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070067};
68
69/**
70 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070071 * @node: node in the tree of all clients
72 * @dev: backpointer to ion device
73 * @handles: an rb tree of all the handles in this client
Colin Crossafde3d32013-08-20 22:59:41 -070074 * @idr: an idr space for allocating handle ids
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070075 * @lock: lock protecting the tree of handles
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070076 * @name: used for debugging
77 * @task: used for debugging
78 *
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
82 */
83struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070084 struct rb_node node;
85 struct ion_device *dev;
86 struct rb_root handles;
Colin Crossafde3d32013-08-20 22:59:41 -070087 struct idr idr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070088 struct mutex lock;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080089 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070090 struct task_struct *task;
91 pid_t pid;
92 struct dentry *debug_root;
93};
94
95/**
96 * ion_handle - a client local reference to a buffer
97 * @ref: reference count
98 * @client: back pointer to the client the buffer resides in
99 * @buffer: pointer to the buffer
100 * @node: node in the client's handle rbtree
101 * @kmap_cnt: count of times this client has mapped to kernel
Colin Crossafde3d32013-08-20 22:59:41 -0700102 * @id: client-unique id allocated by client->idr
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700103 *
104 * Modifications to node, map_cnt or mapping should be protected by the
105 * lock in the client. Other fields are never changed after initialization.
106 */
107struct ion_handle {
108 struct kref ref;
109 struct ion_client *client;
110 struct ion_buffer *buffer;
111 struct rb_node node;
112 unsigned int kmap_cnt;
Colin Crossafde3d32013-08-20 22:59:41 -0700113 int id;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700114};
115
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700116bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
117{
118 return ((buffer->flags & ION_FLAG_CACHED) &&
119 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
120}
121
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700122bool ion_buffer_cached(struct ion_buffer *buffer)
123{
124 return !!(buffer->flags & ION_FLAG_CACHED);
125}
126
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700127/* this function should only be called while dev->lock is held */
128static void ion_buffer_add(struct ion_device *dev,
129 struct ion_buffer *buffer)
130{
131 struct rb_node **p = &dev->buffers.rb_node;
132 struct rb_node *parent = NULL;
133 struct ion_buffer *entry;
134
135 while (*p) {
136 parent = *p;
137 entry = rb_entry(parent, struct ion_buffer, node);
138
139 if (buffer < entry) {
140 p = &(*p)->rb_left;
141 } else if (buffer > entry) {
142 p = &(*p)->rb_right;
143 } else {
144 pr_err("%s: buffer already found.", __func__);
145 BUG();
146 }
147 }
148
149 rb_link_node(&buffer->node, parent, p);
150 rb_insert_color(&buffer->node, &dev->buffers);
151}
152
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700153static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
154
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700155/* this function should only be called while dev->lock is held */
156static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
157 struct ion_device *dev,
158 unsigned long len,
159 unsigned long align,
160 unsigned long flags)
161{
162 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800163 struct sg_table *table;
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700164 struct scatterlist *sg;
165 int i, ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700166
167 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
168 if (!buffer)
169 return ERR_PTR(-ENOMEM);
170
171 buffer->heap = heap;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700172 buffer->flags = flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700173 kref_init(&buffer->ref);
174
175 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800176
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700177 if (ret) {
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800178 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
179 goto err2;
180
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -0700181 ion_heap_freelist_drain(heap, 0);
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800182 ret = heap->ops->allocate(heap, buffer, len, align,
183 flags);
184 if (ret)
185 goto err2;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700186 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800187
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700188 buffer->dev = dev;
189 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800190
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700191 table = heap->ops->map_dma(heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800192 if (IS_ERR_OR_NULL(table)) {
193 heap->ops->free(buffer);
194 kfree(buffer);
195 return ERR_PTR(PTR_ERR(table));
196 }
197 buffer->sg_table = table;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700198 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700199 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
200 i) {
201 if (sg_dma_len(sg) == PAGE_SIZE)
202 continue;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700203 pr_err("%s: cached mappings that will be faulted in "
204 "must have pagewise sg_lists\n", __func__);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700205 ret = -EINVAL;
206 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700207 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800208
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700209 ret = ion_buffer_alloc_dirty(buffer);
210 if (ret)
211 goto err;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700212 }
213
214 buffer->dev = dev;
215 buffer->size = len;
216 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700217 mutex_init(&buffer->lock);
Rebecca Schultz Zavinf3f59ed2012-06-07 14:51:21 -0700218 /* this will set up dma addresses for the sglist -- it is not
219 technically correct as per the dma api -- a specific
220 device isn't really taking ownership here. However, in practice on
221 our systems the only dma_address space is physical addresses.
222 Additionally, we can't afford the overhead of invalidating every
223 allocation via dma_map_sg. The implicit contract here is that
224 memory comming from the heaps is ready for dma, ie if it has a
225 cached mapping that mapping has been invalidated */
Laura Abbott6c13b9822013-03-29 18:29:03 -0700226 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
227 if (sg_dma_address(sg) == 0)
228 sg_dma_address(sg) = sg_phys(sg);
229 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700230 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700231 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700232 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700233 return buffer;
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700234
235err:
236 heap->ops->unmap_dma(heap, buffer);
237 heap->ops->free(buffer);
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800238err2:
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700239 kfree(buffer);
240 return ERR_PTR(ret);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700241}
242
Laura Abbott93619302012-10-11 11:51:40 -0700243static void ion_delayed_unsecure(struct ion_buffer *buffer)
244{
245 if (buffer->heap->ops->unsecure_buffer)
246 buffer->heap->ops->unsecure_buffer(buffer, 1);
247}
248
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -0700249void ion_buffer_destroy(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700250{
Laura Abbottb14ed962012-01-30 14:18:08 -0800251 if (WARN_ON(buffer->kmap_cnt > 0))
252 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800253 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
254
Laura Abbott93619302012-10-11 11:51:40 -0700255 ion_delayed_unsecure(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700256 buffer->heap->ops->free(buffer);
Rebecca Schultz Zavin7aea56a2012-09-10 16:12:01 -0700257 if (buffer->flags & ION_FLAG_CACHED)
258 kfree(buffer->dirty);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700259 kfree(buffer);
260}
261
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -0700262static void _ion_buffer_destroy(struct kref *kref)
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800263{
264 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
265 struct ion_heap *heap = buffer->heap;
266 struct ion_device *dev = buffer->dev;
267
268 mutex_lock(&dev->buffer_lock);
269 rb_erase(&buffer->node, &dev->buffers);
270 mutex_unlock(&dev->buffer_lock);
271
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -0700272 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
273 ion_heap_freelist_add(heap, buffer);
274 else
275 ion_buffer_destroy(buffer);
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800276}
277
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700278static void ion_buffer_get(struct ion_buffer *buffer)
279{
280 kref_get(&buffer->ref);
281}
282
283static int ion_buffer_put(struct ion_buffer *buffer)
284{
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -0700285 return kref_put(&buffer->ref, _ion_buffer_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700286}
287
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -0700288static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
289{
290 mutex_lock(&buffer->lock);
291 buffer->handle_count++;
292 mutex_unlock(&buffer->lock);
293}
294
295static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
296{
297 /*
298 * when a buffer is removed from a handle, if it is not in
299 * any other handles, copy the taskcomm and the pid of the
300 * process it's being removed from into the buffer. At this
301 * point there will be no way to track what processes this buffer is
302 * being used by, it only exists as a dma_buf file descriptor.
303 * The taskcomm and pid can provide a debug hint as to where this fd
304 * is in the system
305 */
306 mutex_lock(&buffer->lock);
307 buffer->handle_count--;
308 BUG_ON(buffer->handle_count < 0);
309 if (!buffer->handle_count) {
310 struct task_struct *task;
311
312 task = current->group_leader;
313 get_task_comm(buffer->task_comm, task);
314 buffer->pid = task_pid_nr(task);
315 }
316 mutex_unlock(&buffer->lock);
317}
318
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700319static struct ion_handle *ion_handle_create(struct ion_client *client,
320 struct ion_buffer *buffer)
321{
322 struct ion_handle *handle;
323
324 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
325 if (!handle)
326 return ERR_PTR(-ENOMEM);
327 kref_init(&handle->ref);
328 rb_init_node(&handle->node);
329 handle->client = client;
330 ion_buffer_get(buffer);
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -0700331 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700332 handle->buffer = buffer;
333
334 return handle;
335}
336
Laura Abbottb14ed962012-01-30 14:18:08 -0800337static void ion_handle_kmap_put(struct ion_handle *);
338
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700339static void ion_handle_destroy(struct kref *kref)
340{
341 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800342 struct ion_client *client = handle->client;
343 struct ion_buffer *buffer = handle->buffer;
344
Laura Abbottb14ed962012-01-30 14:18:08 -0800345 mutex_lock(&buffer->lock);
346 while (handle->kmap_cnt)
347 ion_handle_kmap_put(handle);
348 mutex_unlock(&buffer->lock);
349
Colin Crossafde3d32013-08-20 22:59:41 -0700350 idr_remove(&client->idr, handle->id);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700351 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800352 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800353
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -0700354 ion_buffer_remove_from_handle(buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800355 ion_buffer_put(buffer);
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -0700356
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700357 kfree(handle);
358}
359
360struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
361{
362 return handle->buffer;
363}
364
365static void ion_handle_get(struct ion_handle *handle)
366{
367 kref_get(&handle->ref);
368}
369
Mitchel Humpherys6f304ed2013-12-10 12:57:48 -0800370int ion_handle_put(struct ion_handle *handle)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700371{
Colin Cross6b051302013-11-05 16:51:27 -0800372 struct ion_client *client = handle->client;
373 int ret;
374
375 mutex_lock(&client->lock);
376 ret = kref_put(&handle->ref, ion_handle_destroy);
377 mutex_unlock(&client->lock);
378
379 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700380}
381
382static struct ion_handle *ion_handle_lookup(struct ion_client *client,
383 struct ion_buffer *buffer)
384{
385 struct rb_node *n;
386
387 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
388 struct ion_handle *handle = rb_entry(n, struct ion_handle,
Colin Crossafde3d32013-08-20 22:59:41 -0700389 node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700390 if (handle->buffer == buffer)
391 return handle;
392 }
393 return NULL;
394}
395
Mitchel Humpherys6f304ed2013-12-10 12:57:48 -0800396struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
Colin Cross6b051302013-11-05 16:51:27 -0800397 int id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700398{
Colin Cross6b051302013-11-05 16:51:27 -0800399 struct ion_handle *handle;
400
401 mutex_lock(&client->lock);
402 handle = idr_find(&client->idr, id);
403 if (handle)
404 ion_handle_get(handle);
405 mutex_unlock(&client->lock);
406
407 return handle ? handle : ERR_PTR(-EINVAL);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700408}
409
Colin Crossafde3d32013-08-20 22:59:41 -0700410static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700411{
Colin Cross6b051302013-11-05 16:51:27 -0800412 WARN_ON(!mutex_is_locked(&client->lock));
413 return (idr_find(&client->idr, handle->id) == handle);
Colin Crossafde3d32013-08-20 22:59:41 -0700414}
415
416static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
417{
418 int rc;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700419 struct rb_node **p = &client->handles.rb_node;
420 struct rb_node *parent = NULL;
421 struct ion_handle *entry;
422
Colin Crossafde3d32013-08-20 22:59:41 -0700423 do {
424 int id;
425 rc = idr_pre_get(&client->idr, GFP_KERNEL);
426 if (!rc)
427 return -ENOMEM;
Colin Cross081f76c2013-08-22 19:29:44 -0700428 rc = idr_get_new_above(&client->idr, handle, 1, &id);
Colin Crossafde3d32013-08-20 22:59:41 -0700429 handle->id = id;
430 } while (rc == -EAGAIN);
431
432 if (rc < 0)
433 return rc;
434
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700435 while (*p) {
436 parent = *p;
437 entry = rb_entry(parent, struct ion_handle, node);
438
439 if (handle < entry)
440 p = &(*p)->rb_left;
441 else if (handle > entry)
442 p = &(*p)->rb_right;
443 else
444 WARN(1, "%s: buffer already found.", __func__);
445 }
446
447 rb_link_node(&handle->node, parent, p);
448 rb_insert_color(&handle->node, &client->handles);
Colin Crossafde3d32013-08-20 22:59:41 -0700449
450 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700451}
452
453struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800454 size_t align, unsigned int heap_id_mask,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700455 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700456{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700457 struct ion_handle *handle;
458 struct ion_device *dev = client->dev;
459 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -0800460 struct ion_heap *heap;
Colin Crossafde3d32013-08-20 22:59:41 -0700461 int ret;
Adrian Alexei92538592013-03-27 10:53:43 -0700462 unsigned long secure_allocation = flags & ION_FLAG_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800463 const unsigned int MAX_DBG_STR_LEN = 64;
464 char dbg_str[MAX_DBG_STR_LEN];
465 unsigned int dbg_str_idx = 0;
466
467 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700468
Mitchel Humpherys485650f2013-03-15 16:06:07 -0700469 /*
470 * For now, we don't want to fault in pages individually since
471 * clients are already doing manual cache maintenance. In
472 * other words, the implicit caching infrastructure is in
473 * place (in code) but should not be used.
474 */
475 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
476
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800477 pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
478 len, align, heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700479 /*
480 * traverse the list of heaps available in this system in priority
481 * order. If the heap type is supported by the client, and matches the
482 * request of the caller allocate from it. Repeat until allocate has
483 * succeeded or all heaps have been tried
484 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800485 if (WARN_ON(!len))
486 return ERR_PTR(-EINVAL);
487
488 len = PAGE_ALIGN(len);
489
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700490 down_read(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -0800491 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800492 /* if the caller didn't specify this heap id */
493 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700494 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800495 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700496 if (secure_allocation &&
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800497 !ion_heap_allow_secure_allocation(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -0800498 continue;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800499 trace_ion_alloc_buffer_start(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800500 heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700501 buffer = ion_buffer_create(heap, dev, len, align, flags);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800502 trace_ion_alloc_buffer_end(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800503 heap_id_mask, flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700504 if (!IS_ERR_OR_NULL(buffer))
505 break;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800506
507 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800508 heap_id_mask, flags,
509 PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800510 if (dbg_str_idx < MAX_DBG_STR_LEN) {
511 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
512 int ret_value = snprintf(&dbg_str[dbg_str_idx],
513 len_left, "%s ", heap->name);
514 if (ret_value >= len_left) {
515 /* overflow */
516 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
517 dbg_str_idx = MAX_DBG_STR_LEN;
518 } else if (ret_value >= 0) {
519 dbg_str_idx += ret_value;
520 } else {
521 /* error */
522 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
523 }
524 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700525 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700526 up_read(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700527
Liam Markcc2d4bd2013-01-16 10:14:40 -0800528 if (buffer == NULL) {
529 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800530 heap_id_mask, flags, -ENODEV);
Laura Abbottb14ed962012-01-30 14:18:08 -0800531 return ERR_PTR(-ENODEV);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800532 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800533
534 if (IS_ERR(buffer)) {
Liam Markcc2d4bd2013-01-16 10:14:40 -0800535 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800536 heap_id_mask, flags,
537 PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800538 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800539 "0x%x) from heap(s) %sfor client %s\n",
540 len, align, dbg_str, client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700541 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800542 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700543
544 handle = ion_handle_create(client, buffer);
545
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700546 /*
547 * ion_buffer_create will create a buffer with a ref_cnt of 1,
548 * and ion_handle_create will take a second reference, drop one here
549 */
550 ion_buffer_put(buffer);
551
Colin Crossafde3d32013-08-20 22:59:41 -0700552 if (IS_ERR(handle))
553 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700554
Colin Crossafde3d32013-08-20 22:59:41 -0700555 mutex_lock(&client->lock);
556 ret = ion_handle_add(client, handle);
Colin Cross6b051302013-11-05 16:51:27 -0800557 mutex_unlock(&client->lock);
Colin Crossafde3d32013-08-20 22:59:41 -0700558 if (ret) {
559 ion_handle_put(handle);
560 handle = ERR_PTR(ret);
561 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800562
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700563 return handle;
564}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800565EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700566
567void ion_free(struct ion_client *client, struct ion_handle *handle)
568{
569 bool valid_handle;
570
571 BUG_ON(client != handle->client);
572
573 mutex_lock(&client->lock);
574 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700575 if (!valid_handle) {
Olav Haugan6ede5672012-04-19 10:20:22 -0700576 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Laura Abbottb629a822013-04-18 09:56:04 -0700577 mutex_unlock(&client->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700578 return;
579 }
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700580 mutex_unlock(&client->lock);
Colin Cross6b051302013-11-05 16:51:27 -0800581 ion_handle_put(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700582}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800583EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700584
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700585int ion_phys(struct ion_client *client, struct ion_handle *handle,
586 ion_phys_addr_t *addr, size_t *len)
587{
588 struct ion_buffer *buffer;
589 int ret;
590
591 mutex_lock(&client->lock);
592 if (!ion_handle_validate(client, handle)) {
593 mutex_unlock(&client->lock);
594 return -EINVAL;
595 }
596
597 buffer = handle->buffer;
598
599 if (!buffer->heap->ops->phys) {
600 pr_err("%s: ion_phys is not implemented by this heap.\n",
601 __func__);
602 mutex_unlock(&client->lock);
603 return -ENODEV;
604 }
605 mutex_unlock(&client->lock);
606 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
607 return ret;
608}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800609EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700610
Laura Abbottb14ed962012-01-30 14:18:08 -0800611static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700612{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700613 void *vaddr;
614
Laura Abbottb14ed962012-01-30 14:18:08 -0800615 if (buffer->kmap_cnt) {
616 buffer->kmap_cnt++;
617 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700618 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800619 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
620 if (IS_ERR_OR_NULL(vaddr))
621 return vaddr;
622 buffer->vaddr = vaddr;
623 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700624 return vaddr;
625}
Laura Abbottb14ed962012-01-30 14:18:08 -0800626
627static void *ion_handle_kmap_get(struct ion_handle *handle)
628{
629 struct ion_buffer *buffer = handle->buffer;
630 void *vaddr;
631
632 if (handle->kmap_cnt) {
633 handle->kmap_cnt++;
634 return buffer->vaddr;
635 }
636 vaddr = ion_buffer_kmap_get(buffer);
637 if (IS_ERR_OR_NULL(vaddr))
638 return vaddr;
639 handle->kmap_cnt++;
640 return vaddr;
641}
642
643static void ion_buffer_kmap_put(struct ion_buffer *buffer)
644{
645 buffer->kmap_cnt--;
646 if (!buffer->kmap_cnt) {
647 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
648 buffer->vaddr = NULL;
649 }
650}
651
652static void ion_handle_kmap_put(struct ion_handle *handle)
653{
654 struct ion_buffer *buffer = handle->buffer;
655
656 handle->kmap_cnt--;
657 if (!handle->kmap_cnt)
658 ion_buffer_kmap_put(buffer);
659}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700660
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700661void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700662{
663 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800664 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700665
666 mutex_lock(&client->lock);
667 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800668 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700669 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700670 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700671 return ERR_PTR(-EINVAL);
672 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700673
Laura Abbottb14ed962012-01-30 14:18:08 -0800674 buffer = handle->buffer;
675
676 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700677 pr_err("%s: map_kernel is not implemented by this heap.\n",
678 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700679 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700680 return ERR_PTR(-ENODEV);
681 }
Laura Abbott894fd582011-08-19 13:33:56 -0700682
Laura Abbottb14ed962012-01-30 14:18:08 -0800683 mutex_lock(&buffer->lock);
684 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700685 mutex_unlock(&buffer->lock);
686 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800687 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700688}
Olav Hauganbd453a92012-07-05 14:21:34 -0700689EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700690
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700691void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
692{
693 struct ion_buffer *buffer;
694
695 mutex_lock(&client->lock);
696 buffer = handle->buffer;
697 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800698 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700699 mutex_unlock(&buffer->lock);
700 mutex_unlock(&client->lock);
701}
Olav Hauganbd453a92012-07-05 14:21:34 -0700702EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700703
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700704static int ion_debug_client_show(struct seq_file *s, void *unused)
705{
706 struct ion_client *client = s->private;
707 struct rb_node *n;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700708
Laura Abbott5046c952013-04-18 09:40:43 -0700709 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n",
Olav Haugan854c9e12012-05-16 16:34:28 -0700710 "heap_name", "size_in_bytes", "handle refcount",
Laura Abbott5046c952013-04-18 09:40:43 -0700711 "buffer");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700712
713 mutex_lock(&client->lock);
714 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
715 struct ion_handle *handle = rb_entry(n, struct ion_handle,
716 node);
Rebecca Schultz Zavin2536d602012-12-11 11:45:59 -0800717
Olav Haugan854c9e12012-05-16 16:34:28 -0700718 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700719 handle->buffer->heap->name,
720 handle->buffer->size,
721 atomic_read(&handle->ref.refcount),
722 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700723
Olav Haugan854c9e12012-05-16 16:34:28 -0700724 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700725 }
726 mutex_unlock(&client->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700727 return 0;
728}
729
730static int ion_debug_client_open(struct inode *inode, struct file *file)
731{
732 return single_open(file, ion_debug_client_show, inode->i_private);
733}
734
735static const struct file_operations debug_client_fops = {
736 .open = ion_debug_client_open,
737 .read = seq_read,
738 .llseek = seq_lseek,
739 .release = single_release,
740};
741
Mitchel Humpherysa9500cb2013-10-07 09:24:29 -0700742static bool startswith(const char *string, const char *prefix)
743{
744 size_t l1 = strlen(string);
745 size_t l2 = strlen(prefix);
746 return strncmp(string, prefix, min(l1, l2)) == 0;
747}
748
749static int ion_get_client_serial(const struct rb_root *root,
750 const unsigned char *name)
751{
752 int serial = -1;
753 struct rb_node *node;
754 for (node = rb_first(root); node; node = rb_next(node)) {
755 int n;
756 char *serial_string;
757 struct ion_client *client = rb_entry(node, struct ion_client,
758 node);
759 if (!startswith(client->name, name))
760 continue;
761 serial_string = strrchr(client->name, '-');
762 if (!serial_string)
763 continue;
764 serial_string++;
765 sscanf(serial_string, "%d", &n);
766 serial = max(serial, n);
767 }
768 return serial + 1;
769}
770
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700771struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700772 const char *name)
773{
774 struct ion_client *client;
775 struct task_struct *task;
776 struct rb_node **p;
777 struct rb_node *parent = NULL;
778 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700779 pid_t pid;
Mitchel Humpherysa9500cb2013-10-07 09:24:29 -0700780 int name_len;
781 int client_serial;
Olav Haugane8a31972012-05-16 13:11:41 -0700782
783 if (!name) {
784 pr_err("%s: Name cannot be null\n", __func__);
785 return ERR_PTR(-EINVAL);
786 }
787 name_len = strnlen(name, 64);
Mitchel Humpherysa9500cb2013-10-07 09:24:29 -0700788 /* add some space to accommodate the serial number suffix */
789 name_len = min(64, name_len + 11);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700790
791 get_task_struct(current->group_leader);
792 task_lock(current->group_leader);
793 pid = task_pid_nr(current->group_leader);
794 /* don't bother to store task struct for kernel threads,
795 they can't be killed anyway */
796 if (current->group_leader->flags & PF_KTHREAD) {
797 put_task_struct(current->group_leader);
798 task = NULL;
799 } else {
800 task = current->group_leader;
801 }
802 task_unlock(current->group_leader);
803
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700804 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
805 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800806 if (task)
807 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700808 return ERR_PTR(-ENOMEM);
809 }
810
811 client->dev = dev;
812 client->handles = RB_ROOT;
Colin Crossafde3d32013-08-20 22:59:41 -0700813 idr_init(&client->idr);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700814 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800815
Olav Haugan6625c7d12012-01-24 13:50:43 -0800816 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800817 if (!client->name) {
818 put_task_struct(current->group_leader);
819 kfree(client);
820 return ERR_PTR(-ENOMEM);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800821 }
822
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700823 client->task = task;
824 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700825
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700826 down_write(&dev->lock);
Mitchel Humpherysa9500cb2013-10-07 09:24:29 -0700827 client_serial = ion_get_client_serial(&dev->clients, name);
828 snprintf(client->name, name_len, "%s-%d", name, client_serial);
Laura Abbottb14ed962012-01-30 14:18:08 -0800829 p = &dev->clients.rb_node;
830 while (*p) {
831 parent = *p;
832 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700833
Laura Abbottb14ed962012-01-30 14:18:08 -0800834 if (client < entry)
835 p = &(*p)->rb_left;
836 else if (client > entry)
837 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700838 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800839 rb_link_node(&client->node, parent, p);
840 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700841
Laura Abbotteed86032011-12-05 15:32:36 -0800842
Mitchel Humpherysa9500cb2013-10-07 09:24:29 -0700843 client->debug_root = debugfs_create_file(client->name, 0664,
Mitchel Humpherys1fb95df2013-08-29 15:28:58 -0700844 dev->clients_debug_root,
845 client, &debug_client_fops);
846 if (!client->debug_root) {
847 char buf[256], *path;
848 path = dentry_path(dev->clients_debug_root, buf, 256);
849 pr_err("Failed to created client debugfs at %s/%s\n",
Mitchel Humpherysa9500cb2013-10-07 09:24:29 -0700850 path, client->name);
Mitchel Humpherys1fb95df2013-08-29 15:28:58 -0700851 }
852
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700853 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700854
855 return client;
856}
Johan Mossberg73080182012-12-10 17:46:16 +0100857EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700858
Laura Abbottb14ed962012-01-30 14:18:08 -0800859void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700860{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700861 struct ion_device *dev = client->dev;
862 struct rb_node *n;
863
864 pr_debug("%s: %d\n", __func__, __LINE__);
865 while ((n = rb_first(&client->handles))) {
866 struct ion_handle *handle = rb_entry(n, struct ion_handle,
867 node);
868 ion_handle_destroy(&handle->ref);
869 }
Colin Crossafde3d32013-08-20 22:59:41 -0700870
871 idr_remove_all(&client->idr);
872 idr_destroy(&client->idr);
873
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700874 down_write(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800875 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700876 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -0800877 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700878 debugfs_remove_recursive(client->debug_root);
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800879
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -0700880 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700881
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800882 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700883 kfree(client);
884}
Olav Hauganbd453a92012-07-05 14:21:34 -0700885EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700886
Laura Abbott273dd8e2011-10-12 14:26:33 -0700887int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
888 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700889{
890 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700891
892 mutex_lock(&client->lock);
893 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -0700894 pr_err("%s: invalid handle passed to %s.\n",
895 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -0700896 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800897 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700898 }
Laura Abbott273dd8e2011-10-12 14:26:33 -0700899 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700900 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700901 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700902 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700903 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800904
Laura Abbott273dd8e2011-10-12 14:26:33 -0700905 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700906}
Laura Abbott273dd8e2011-10-12 14:26:33 -0700907EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700908
Laura Abbott8c017362011-09-22 20:59:12 -0700909int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
910 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800911{
Laura Abbott8c017362011-09-22 20:59:12 -0700912 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800913
Laura Abbott8c017362011-09-22 20:59:12 -0700914 mutex_lock(&client->lock);
915 if (!ion_handle_validate(client, handle)) {
916 pr_err("%s: invalid handle passed to %s.\n",
917 __func__, __func__);
918 mutex_unlock(&client->lock);
919 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700920 }
Laura Abbott8c017362011-09-22 20:59:12 -0700921 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700922 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700923 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -0700924 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -0700925 mutex_unlock(&client->lock);
926
927 return 0;
928}
929EXPORT_SYMBOL(ion_handle_get_size);
930
Laura Abbottb14ed962012-01-30 14:18:08 -0800931struct sg_table *ion_sg_table(struct ion_client *client,
932 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700933{
Laura Abbottb14ed962012-01-30 14:18:08 -0800934 struct ion_buffer *buffer;
935 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700936
Laura Abbottb14ed962012-01-30 14:18:08 -0800937 mutex_lock(&client->lock);
938 if (!ion_handle_validate(client, handle)) {
939 pr_err("%s: invalid handle passed to map_dma.\n",
940 __func__);
941 mutex_unlock(&client->lock);
942 return ERR_PTR(-EINVAL);
943 }
944 buffer = handle->buffer;
945 table = buffer->sg_table;
946 mutex_unlock(&client->lock);
947 return table;
948}
Olav Hauganbd453a92012-07-05 14:21:34 -0700949EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -0800950
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800951struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
952 size_t chunk_size, size_t total_size)
953{
954 struct sg_table *table;
955 int i, n_chunks, ret;
956 struct scatterlist *sg;
957
958 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
959 if (!table)
960 return ERR_PTR(-ENOMEM);
961
962 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
963 pr_debug("creating sg_table with %d chunks\n", n_chunks);
964
965 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
966 if (ret)
967 goto err0;
968
969 for_each_sg(table->sgl, sg, table->nents, i) {
970 dma_addr_t addr = buffer_base + i * chunk_size;
971 sg_dma_address(sg) = addr;
Olav Hauganbbdc30a2013-03-30 06:48:35 -0700972 sg_dma_len(sg) = chunk_size;
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800973 }
974
975 return table;
976err0:
977 kfree(table);
978 return ERR_PTR(ret);
979}
980
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700981static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
982 struct device *dev,
983 enum dma_data_direction direction);
984
Laura Abbottb14ed962012-01-30 14:18:08 -0800985static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
986 enum dma_data_direction direction)
987{
988 struct dma_buf *dmabuf = attachment->dmabuf;
989 struct ion_buffer *buffer = dmabuf->priv;
990
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700991 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbottb14ed962012-01-30 14:18:08 -0800992 return buffer->sg_table;
993}
994
995static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
996 struct sg_table *table,
997 enum dma_data_direction direction)
998{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -0800999}
1000
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001001static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
1002{
1003 unsigned long pages = buffer->sg_table->nents;
1004 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
1005
1006 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
1007 if (!buffer->dirty)
1008 return -ENOMEM;
1009 return 0;
1010}
1011
1012struct ion_vma_list {
1013 struct list_head list;
1014 struct vm_area_struct *vma;
1015};
1016
1017static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1018 struct device *dev,
1019 enum dma_data_direction dir)
1020{
1021 struct scatterlist *sg;
1022 int i;
1023 struct ion_vma_list *vma_list;
1024
1025 pr_debug("%s: syncing for device %s\n", __func__,
1026 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001027
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001028 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001029 return;
1030
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001031 mutex_lock(&buffer->lock);
1032 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1033 if (!test_bit(i, buffer->dirty))
1034 continue;
1035 dma_sync_sg_for_device(dev, sg, 1, dir);
1036 clear_bit(i, buffer->dirty);
1037 }
1038 list_for_each_entry(vma_list, &buffer->vmas, list) {
1039 struct vm_area_struct *vma = vma_list->vma;
1040
1041 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1042 NULL);
1043 }
1044 mutex_unlock(&buffer->lock);
1045}
1046
1047int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001048{
Laura Abbottb14ed962012-01-30 14:18:08 -08001049 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001050 struct scatterlist *sg;
1051 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001052
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001053 mutex_lock(&buffer->lock);
1054 set_bit(vmf->pgoff, buffer->dirty);
1055
1056 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
1057 if (i != vmf->pgoff)
1058 continue;
1059 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
1060 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
1061 sg_page(sg));
1062 break;
1063 }
1064 mutex_unlock(&buffer->lock);
1065 return VM_FAULT_NOPAGE;
1066}
1067
1068static void ion_vm_open(struct vm_area_struct *vma)
1069{
1070 struct ion_buffer *buffer = vma->vm_private_data;
1071 struct ion_vma_list *vma_list;
1072
1073 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1074 if (!vma_list)
1075 return;
1076 vma_list->vma = vma;
1077 mutex_lock(&buffer->lock);
1078 list_add(&vma_list->list, &buffer->vmas);
1079 mutex_unlock(&buffer->lock);
1080 pr_debug("%s: adding %p\n", __func__, vma);
1081}
1082
1083static void ion_vm_close(struct vm_area_struct *vma)
1084{
1085 struct ion_buffer *buffer = vma->vm_private_data;
1086 struct ion_vma_list *vma_list, *tmp;
1087
1088 pr_debug("%s\n", __func__);
1089 mutex_lock(&buffer->lock);
1090 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1091 if (vma_list->vma != vma)
1092 continue;
1093 list_del(&vma_list->list);
1094 kfree(vma_list);
1095 pr_debug("%s: deleting %p\n", __func__, vma);
1096 break;
1097 }
1098 mutex_unlock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001099
Laura Abbotta6835092011-11-14 15:27:02 -08001100 if (buffer->heap->ops->unmap_user)
1101 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001102}
1103
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001104struct vm_operations_struct ion_vma_ops = {
1105 .open = ion_vm_open,
1106 .close = ion_vm_close,
1107 .fault = ion_vm_fault,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001108};
1109
Laura Abbottb14ed962012-01-30 14:18:08 -08001110static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001111{
Laura Abbottb14ed962012-01-30 14:18:08 -08001112 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001113 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001114
Laura Abbottb14ed962012-01-30 14:18:08 -08001115 if (!buffer->heap->ops->map_user) {
1116 pr_err("%s: this heap does not define a method for mapping "
1117 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001118 return -EINVAL;
1119 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001120
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -07001121 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001122 vma->vm_private_data = buffer;
1123 vma->vm_ops = &ion_vma_ops;
Mitchel Humpherys7ce0fe42013-01-10 11:30:26 -08001124 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001125 ion_vm_open(vma);
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001126 return 0;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001127 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001128
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001129 if (!(buffer->flags & ION_FLAG_CACHED))
1130 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1131
1132 mutex_lock(&buffer->lock);
1133 /* now map it to userspace */
1134 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1135 mutex_unlock(&buffer->lock);
1136
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001137 if (ret)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001138 pr_err("%s: failure mapping buffer to userspace\n",
1139 __func__);
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -07001140
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001141 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001142}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001143
Laura Abbottb14ed962012-01-30 14:18:08 -08001144static void ion_dma_buf_release(struct dma_buf *dmabuf)
1145{
1146 struct ion_buffer *buffer = dmabuf->priv;
1147 ion_buffer_put(buffer);
1148}
1149
1150static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1151{
1152 struct ion_buffer *buffer = dmabuf->priv;
Greg Hackmannd1b0c6c2012-08-22 17:38:04 -07001153 return buffer->vaddr + offset * PAGE_SIZE;
Laura Abbottb14ed962012-01-30 14:18:08 -08001154}
1155
1156static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1157 void *ptr)
1158{
1159 return;
1160}
1161
1162static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1163 size_t len,
1164 enum dma_data_direction direction)
1165{
1166 struct ion_buffer *buffer = dmabuf->priv;
1167 void *vaddr;
1168
1169 if (!buffer->heap->ops->map_kernel) {
1170 pr_err("%s: map kernel is not implemented by this heap.\n",
1171 __func__);
1172 return -ENODEV;
1173 }
1174
1175 mutex_lock(&buffer->lock);
1176 vaddr = ion_buffer_kmap_get(buffer);
1177 mutex_unlock(&buffer->lock);
1178 if (IS_ERR(vaddr))
1179 return PTR_ERR(vaddr);
1180 if (!vaddr)
1181 return -ENOMEM;
1182 return 0;
1183}
1184
1185static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1186 size_t len,
1187 enum dma_data_direction direction)
1188{
1189 struct ion_buffer *buffer = dmabuf->priv;
1190
1191 mutex_lock(&buffer->lock);
1192 ion_buffer_kmap_put(buffer);
1193 mutex_unlock(&buffer->lock);
1194}
1195
1196struct dma_buf_ops dma_buf_ops = {
1197 .map_dma_buf = ion_map_dma_buf,
1198 .unmap_dma_buf = ion_unmap_dma_buf,
1199 .mmap = ion_mmap,
1200 .release = ion_dma_buf_release,
1201 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1202 .end_cpu_access = ion_dma_buf_end_cpu_access,
1203 .kmap_atomic = ion_dma_buf_kmap,
1204 .kunmap_atomic = ion_dma_buf_kunmap,
1205 .kmap = ion_dma_buf_kmap,
1206 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001207};
1208
Johan Mossberg748c11d2013-01-11 13:38:13 +01001209struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1210 struct ion_handle *handle)
Laura Abbottb14ed962012-01-30 14:18:08 -08001211{
1212 struct ion_buffer *buffer;
1213 struct dma_buf *dmabuf;
1214 bool valid_handle;
Laura Abbottb14ed962012-01-30 14:18:08 -08001215
1216 mutex_lock(&client->lock);
1217 valid_handle = ion_handle_validate(client, handle);
Laura Abbottb14ed962012-01-30 14:18:08 -08001218 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001219 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Colin Cross6b051302013-11-05 16:51:27 -08001220 mutex_unlock(&client->lock);
Johan Mossberg748c11d2013-01-11 13:38:13 +01001221 return ERR_PTR(-EINVAL);
Laura Abbottb14ed962012-01-30 14:18:08 -08001222 }
Laura Abbottb14ed962012-01-30 14:18:08 -08001223 buffer = handle->buffer;
1224 ion_buffer_get(buffer);
Colin Cross6b051302013-11-05 16:51:27 -08001225 mutex_unlock(&client->lock);
1226
Laura Abbottb14ed962012-01-30 14:18:08 -08001227 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1228 if (IS_ERR(dmabuf)) {
1229 ion_buffer_put(buffer);
Johan Mossberg748c11d2013-01-11 13:38:13 +01001230 return dmabuf;
Laura Abbottb14ed962012-01-30 14:18:08 -08001231 }
Johan Mossberg748c11d2013-01-11 13:38:13 +01001232
1233 return dmabuf;
1234}
1235EXPORT_SYMBOL(ion_share_dma_buf);
1236
1237int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1238{
1239 struct dma_buf *dmabuf;
1240 int fd;
1241
1242 dmabuf = ion_share_dma_buf(client, handle);
1243 if (IS_ERR(dmabuf))
1244 return PTR_ERR(dmabuf);
1245
Laura Abbottb14ed962012-01-30 14:18:08 -08001246 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001247 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001248 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001249
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001250 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001251}
Johan Mossberg748c11d2013-01-11 13:38:13 +01001252EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001253
Laura Abbottb14ed962012-01-30 14:18:08 -08001254struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1255{
1256 struct dma_buf *dmabuf;
1257 struct ion_buffer *buffer;
1258 struct ion_handle *handle;
Colin Crossafde3d32013-08-20 22:59:41 -07001259 int ret;
Laura Abbottb14ed962012-01-30 14:18:08 -08001260
1261 dmabuf = dma_buf_get(fd);
1262 if (IS_ERR_OR_NULL(dmabuf))
1263 return ERR_PTR(PTR_ERR(dmabuf));
1264 /* if this memory came from ion */
1265
1266 if (dmabuf->ops != &dma_buf_ops) {
1267 pr_err("%s: can not import dmabuf from another exporter\n",
1268 __func__);
1269 dma_buf_put(dmabuf);
1270 return ERR_PTR(-EINVAL);
1271 }
1272 buffer = dmabuf->priv;
1273
1274 mutex_lock(&client->lock);
1275 /* if a handle exists for this buffer just take a reference to it */
1276 handle = ion_handle_lookup(client, buffer);
1277 if (!IS_ERR_OR_NULL(handle)) {
1278 ion_handle_get(handle);
Colin Cross6b051302013-11-05 16:51:27 -08001279 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001280 goto end;
1281 }
Colin Cross6b051302013-11-05 16:51:27 -08001282 mutex_unlock(&client->lock);
1283
Laura Abbottb14ed962012-01-30 14:18:08 -08001284 handle = ion_handle_create(client, buffer);
1285 if (IS_ERR_OR_NULL(handle))
1286 goto end;
Colin Cross6b051302013-11-05 16:51:27 -08001287
1288 mutex_lock(&client->lock);
Colin Crossafde3d32013-08-20 22:59:41 -07001289 ret = ion_handle_add(client, handle);
Colin Cross6b051302013-11-05 16:51:27 -08001290 mutex_unlock(&client->lock);
Colin Crossafde3d32013-08-20 22:59:41 -07001291 if (ret) {
1292 ion_handle_put(handle);
1293 handle = ERR_PTR(ret);
1294 }
Colin Cross6b051302013-11-05 16:51:27 -08001295
Laura Abbottb14ed962012-01-30 14:18:08 -08001296end:
Laura Abbottb14ed962012-01-30 14:18:08 -08001297 dma_buf_put(dmabuf);
1298 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001299}
Olav Hauganbd453a92012-07-05 14:21:34 -07001300EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001301
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001302static int ion_sync_for_device(struct ion_client *client, int fd)
1303{
1304 struct dma_buf *dmabuf;
1305 struct ion_buffer *buffer;
1306
1307 dmabuf = dma_buf_get(fd);
1308 if (IS_ERR_OR_NULL(dmabuf))
1309 return PTR_ERR(dmabuf);
1310
1311 /* if this memory came from ion */
1312 if (dmabuf->ops != &dma_buf_ops) {
1313 pr_err("%s: can not sync dmabuf from another exporter\n",
1314 __func__);
1315 dma_buf_put(dmabuf);
1316 return -EINVAL;
1317 }
1318 buffer = dmabuf->priv;
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -07001319
1320 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1321 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001322 dma_buf_put(dmabuf);
1323 return 0;
1324}
1325
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001326static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1327{
1328 struct ion_client *client = filp->private_data;
1329
1330 switch (cmd) {
1331 case ION_IOC_ALLOC:
1332 {
1333 struct ion_allocation_data data;
Colin Crossafde3d32013-08-20 22:59:41 -07001334 struct ion_handle *handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001335
1336 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1337 return -EFAULT;
Colin Crossafde3d32013-08-20 22:59:41 -07001338 handle = ion_alloc(client, data.len, data.align,
Hanumant Singh7d72bad2012-08-29 18:39:44 -07001339 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001340
Colin Crossafde3d32013-08-20 22:59:41 -07001341 if (IS_ERR(handle))
1342 return PTR_ERR(handle);
1343
Rom Lemarchand7c2d6792013-10-23 15:09:11 -07001344 data.handle = (ion_user_handle_t)handle->id;
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001345
Laura Abbottb14ed962012-01-30 14:18:08 -08001346 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
Colin Crossafde3d32013-08-20 22:59:41 -07001347 ion_free(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001348 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001349 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001350 break;
1351 }
1352 case ION_IOC_FREE:
1353 {
1354 struct ion_handle_data data;
Colin Crossafde3d32013-08-20 22:59:41 -07001355 struct ion_handle *handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001356
1357 if (copy_from_user(&data, (void __user *)arg,
1358 sizeof(struct ion_handle_data)))
1359 return -EFAULT;
Colin Cross6b051302013-11-05 16:51:27 -08001360 handle = ion_handle_get_by_id(client, (int)data.handle);
1361 if (IS_ERR(handle))
1362 return PTR_ERR(handle);
Colin Crossafde3d32013-08-20 22:59:41 -07001363 ion_free(client, handle);
Colin Cross6b051302013-11-05 16:51:27 -08001364 ion_handle_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001365 break;
1366 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001367 case ION_IOC_SHARE:
Laura Abbottb629a822013-04-18 09:56:04 -07001368 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001369 {
1370 struct ion_fd_data data;
Colin Crossafde3d32013-08-20 22:59:41 -07001371 struct ion_handle *handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001372 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1373 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001374
Colin Cross6b051302013-11-05 16:51:27 -08001375 handle = ion_handle_get_by_id(client, (int)data.handle);
1376 if (IS_ERR(handle))
1377 return PTR_ERR(handle);
Colin Crossafde3d32013-08-20 22:59:41 -07001378 data.fd = ion_share_dma_buf_fd(client, handle);
Colin Cross6b051302013-11-05 16:51:27 -08001379 ion_handle_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001380 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1381 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001382 if (data.fd < 0)
1383 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001384 break;
1385 }
1386 case ION_IOC_IMPORT:
1387 {
1388 struct ion_fd_data data;
Colin Crossafde3d32013-08-20 22:59:41 -07001389 struct ion_handle *handle;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001390 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001391 if (copy_from_user(&data, (void __user *)arg,
1392 sizeof(struct ion_fd_data)))
1393 return -EFAULT;
Colin Crossafde3d32013-08-20 22:59:41 -07001394 handle = ion_import_dma_buf(client, data.fd);
1395 if (IS_ERR(handle))
1396 ret = PTR_ERR(handle);
1397 else
Rom Lemarchand7c2d6792013-10-23 15:09:11 -07001398 data.handle = (ion_user_handle_t)handle->id;
Colin Crossafde3d32013-08-20 22:59:41 -07001399
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001400 if (copy_to_user((void __user *)arg, &data,
1401 sizeof(struct ion_fd_data)))
1402 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001403 if (ret < 0)
1404 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001405 break;
1406 }
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -07001407 case ION_IOC_SYNC:
1408 {
1409 struct ion_fd_data data;
1410 if (copy_from_user(&data, (void __user *)arg,
1411 sizeof(struct ion_fd_data)))
1412 return -EFAULT;
1413 ion_sync_for_device(client, data.fd);
1414 break;
1415 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001416 case ION_IOC_CUSTOM:
1417 {
1418 struct ion_device *dev = client->dev;
1419 struct ion_custom_data data;
1420
1421 if (!dev->custom_ioctl)
1422 return -ENOTTY;
1423 if (copy_from_user(&data, (void __user *)arg,
1424 sizeof(struct ion_custom_data)))
1425 return -EFAULT;
1426 return dev->custom_ioctl(client, data.cmd, data.arg);
1427 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001428 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001429 return client->dev->custom_ioctl(client,
1430 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001431 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001432 return client->dev->custom_ioctl(client,
1433 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001434 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001435 return client->dev->custom_ioctl(client,
1436 ION_IOC_CLEAN_INV_CACHES, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001437 default:
1438 return -ENOTTY;
1439 }
1440 return 0;
1441}
1442
1443static int ion_release(struct inode *inode, struct file *file)
1444{
1445 struct ion_client *client = file->private_data;
1446
1447 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001448 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001449 return 0;
1450}
1451
1452static int ion_open(struct inode *inode, struct file *file)
1453{
1454 struct miscdevice *miscdev = file->private_data;
1455 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1456 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001457 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001458
1459 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001460 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
Rebecca Schultz Zavin75aec5b2012-12-11 15:23:14 -08001461 client = ion_client_create(dev, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001462 if (IS_ERR_OR_NULL(client))
1463 return PTR_ERR(client);
1464 file->private_data = client;
1465
1466 return 0;
1467}
1468
1469static const struct file_operations ion_fops = {
1470 .owner = THIS_MODULE,
1471 .open = ion_open,
1472 .release = ion_release,
1473 .unlocked_ioctl = ion_ioctl,
1474};
1475
1476static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbottb629a822013-04-18 09:56:04 -07001477 unsigned int id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001478{
1479 size_t size = 0;
1480 struct rb_node *n;
1481
1482 mutex_lock(&client->lock);
1483 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1484 struct ion_handle *handle = rb_entry(n,
1485 struct ion_handle,
1486 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001487 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001488 size += handle->buffer->size;
1489 }
1490 mutex_unlock(&client->lock);
1491 return size;
1492}
1493
Olav Haugan0671b9a2012-05-25 11:58:56 -07001494/**
Olav Haugan0671b9a2012-05-25 11:58:56 -07001495 * Create a mem_map of the heap.
1496 * @param s seq_file to log error message to.
1497 * @param heap The heap to create mem_map for.
1498 * @param mem_map The mem map to be created.
1499 */
1500void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
Mitchel Humpherysee0aa9c2013-11-15 22:56:04 -08001501 struct list_head *mem_map)
Olav Haugan0671b9a2012-05-25 11:58:56 -07001502{
1503 struct ion_device *dev = heap->dev;
Mitchel Humpherys864a3bc2013-11-04 15:23:05 -08001504 struct rb_node *cnode;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301505 size_t size;
Mitchel Humpherys864a3bc2013-11-04 15:23:05 -08001506 struct ion_client *client;
Chintan Pandyadaf75622013-01-29 19:40:01 +05301507
1508 if (!heap->ops->phys)
1509 return;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001510
Mitchel Humpherys864a3bc2013-11-04 15:23:05 -08001511 down_read(&dev->lock);
1512 for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) {
1513 struct rb_node *hnode;
1514 client = rb_entry(cnode, struct ion_client, node);
Chintan Pandyadaf75622013-01-29 19:40:01 +05301515
Mitchel Humpherys864a3bc2013-11-04 15:23:05 -08001516 mutex_lock(&client->lock);
1517 for (hnode = rb_first(&client->handles);
1518 hnode;
1519 hnode = rb_next(hnode)) {
1520 struct ion_handle *handle = rb_entry(
1521 hnode, struct ion_handle, node);
1522 if (handle->buffer->heap == heap) {
1523 struct mem_map_data *data =
1524 kzalloc(sizeof(*data), GFP_KERNEL);
1525 if (!data)
1526 goto inner_error;
1527 heap->ops->phys(heap, handle->buffer,
1528 &(data->addr), &size);
1529 data->size = (unsigned long) size;
1530 data->addr_end = data->addr + data->size - 1;
1531 data->client_name = kstrdup(client->name,
1532 GFP_KERNEL);
1533 if (!data->client_name) {
1534 kfree(data);
1535 goto inner_error;
1536 }
1537 list_add(&data->node, mem_map);
1538 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001539 }
Mitchel Humpherys864a3bc2013-11-04 15:23:05 -08001540 mutex_unlock(&client->lock);
Olav Haugan0671b9a2012-05-25 11:58:56 -07001541 }
Mitchel Humpherys864a3bc2013-11-04 15:23:05 -08001542 up_read(&dev->lock);
1543 return;
1544
1545inner_error:
1546 seq_puts(s,
1547 "ERROR: out of memory. Part of memory map will not be logged\n");
1548 mutex_unlock(&client->lock);
1549 up_read(&dev->lock);
Olav Haugan0671b9a2012-05-25 11:58:56 -07001550}
1551
1552/**
1553 * Free the memory allocated by ion_debug_mem_map_create
1554 * @param mem_map The mem map to free.
1555 */
Mitchel Humpherysee0aa9c2013-11-15 22:56:04 -08001556static void ion_debug_mem_map_destroy(struct list_head *mem_map)
Olav Haugan0671b9a2012-05-25 11:58:56 -07001557{
1558 if (mem_map) {
Mitchel Humpherysee0aa9c2013-11-15 22:56:04 -08001559 struct mem_map_data *data, *tmp;
1560 list_for_each_entry_safe(data, tmp, mem_map, node) {
1561 list_del(&data->node);
Mitchel Humpherys864a3bc2013-11-04 15:23:05 -08001562 kfree(data->client_name);
Olav Haugan0671b9a2012-05-25 11:58:56 -07001563 kfree(data);
1564 }
1565 }
1566}
1567
Mitchel Humpherysee0aa9c2013-11-15 22:56:04 -08001568static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b)
1569{
1570 struct mem_map_data *d1, *d2;
1571 d1 = list_entry(a, struct mem_map_data, node);
1572 d2 = list_entry(b, struct mem_map_data, node);
1573 if (d1->addr == d2->addr)
1574 return d1->size - d2->size;
1575 return d1->addr - d2->addr;
1576}
1577
Olav Haugan0671b9a2012-05-25 11:58:56 -07001578/**
1579 * Print heap debug information.
1580 * @param s seq_file to log message to.
1581 * @param heap pointer to heap that we will print debug information for.
1582 */
1583static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1584{
1585 if (heap->ops->print_debug) {
Mitchel Humpherysee0aa9c2013-11-15 22:56:04 -08001586 struct list_head mem_map = LIST_HEAD_INIT(mem_map);
Olav Haugan0671b9a2012-05-25 11:58:56 -07001587 ion_debug_mem_map_create(s, heap, &mem_map);
Mitchel Humpherysee0aa9c2013-11-15 22:56:04 -08001588 list_sort(NULL, &mem_map, mem_map_cmp);
Olav Haugan0671b9a2012-05-25 11:58:56 -07001589 heap->ops->print_debug(heap, s, &mem_map);
1590 ion_debug_mem_map_destroy(&mem_map);
1591 }
1592}
1593
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001594static int ion_debug_heap_show(struct seq_file *s, void *unused)
1595{
1596 struct ion_heap *heap = s->private;
1597 struct ion_device *dev = heap->dev;
1598 struct rb_node *n;
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -07001599 size_t total_size = 0;
1600 size_t total_orphaned_size = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001601
1602 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -07001603 seq_printf(s, "----------------------------------------------------\n");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001604
Mitchel Humpherysc1e36d02013-11-18 16:47:00 -08001605 down_read(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001606 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001607 struct ion_client *client = rb_entry(n, struct ion_client,
1608 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001609 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001610 if (!size)
1611 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001612 if (client->task) {
1613 char task_comm[TASK_COMM_LEN];
1614
1615 get_task_comm(task_comm, client->task);
1616 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1617 client->pid, size);
1618 } else {
1619 seq_printf(s, "%16.s %16u %16u\n", client->name,
1620 client->pid, size);
1621 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001622 }
Mitchel Humpherysc1e36d02013-11-18 16:47:00 -08001623 up_read(&dev->lock);
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -07001624 seq_printf(s, "----------------------------------------------------\n");
1625 seq_printf(s, "orphaned allocations (info is from last known client):"
1626 "\n");
Laura Abbott5046c952013-04-18 09:40:43 -07001627 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -07001628 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1629 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1630 node);
Laura Abbott5046c952013-04-18 09:40:43 -07001631 if (buffer->heap->id != heap->id)
1632 continue;
1633 total_size += buffer->size;
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -07001634 if (!buffer->handle_count) {
Laura Abbott5046c952013-04-18 09:40:43 -07001635 seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1636 buffer->pid, buffer->size, buffer->kmap_cnt,
1637 atomic_read(&buffer->ref.refcount));
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -07001638 total_orphaned_size += buffer->size;
1639 }
1640 }
Laura Abbott5046c952013-04-18 09:40:43 -07001641 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavindb70ae62012-08-28 17:27:22 -07001642 seq_printf(s, "----------------------------------------------------\n");
1643 seq_printf(s, "%16.s %16u\n", "total orphaned",
1644 total_orphaned_size);
1645 seq_printf(s, "%16.s %16u\n", "total ", total_size);
1646 seq_printf(s, "----------------------------------------------------\n");
1647
Laura Abbott5046c952013-04-18 09:40:43 -07001648 if (heap->debug_show)
1649 heap->debug_show(heap, s, unused);
1650
Olav Haugan0671b9a2012-05-25 11:58:56 -07001651 ion_heap_print_debug(s, heap);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001652 return 0;
1653}
1654
1655static int ion_debug_heap_open(struct inode *inode, struct file *file)
1656{
1657 return single_open(file, ion_debug_heap_show, inode->i_private);
1658}
1659
1660static const struct file_operations debug_heap_fops = {
1661 .open = ion_debug_heap_open,
1662 .read = seq_read,
1663 .llseek = seq_lseek,
1664 .release = single_release,
1665};
1666
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001667#ifdef DEBUG_HEAP_SHRINKER
1668static int debug_shrink_set(void *data, u64 val)
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001669{
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001670 struct ion_heap *heap = data;
1671 struct shrink_control sc;
1672 int objs;
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001673
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001674 sc.gfp_mask = -1;
1675 sc.nr_to_scan = 0;
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001676
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001677 if (!val)
1678 return 0;
1679
1680 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1681 sc.nr_to_scan = objs;
1682
1683 heap->shrinker.shrink(&heap->shrinker, &sc);
1684 return 0;
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001685}
1686
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001687static int debug_shrink_get(void *data, u64 *val)
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001688{
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001689 struct ion_heap *heap = data;
1690 struct shrink_control sc;
1691 int objs;
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001692
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001693 sc.gfp_mask = -1;
1694 sc.nr_to_scan = 0;
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001695
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001696 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1697 *val = objs;
1698 return 0;
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001699}
1700
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001701DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1702 debug_shrink_set, "%llu\n");
1703#endif
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001704
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001705void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1706{
Mitchel Humpherys1fb95df2013-08-29 15:28:58 -07001707 struct dentry *debug_file;
1708
Laura Abbottb14ed962012-01-30 14:18:08 -08001709 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1710 !heap->ops->unmap_dma)
1711 pr_err("%s: can not add heap with invalid ops struct.\n",
1712 __func__);
1713
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001714 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1715 ion_heap_init_deferred_free(heap);
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -08001716
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001717 heap->dev = dev;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001718 down_write(&dev->lock);
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001719 /* use negative heap->id to reverse the priority -- when traversing
1720 the list later attempt higher id numbers first */
1721 plist_node_init(&heap->node, -heap->id);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001722 plist_add(&heap->node, &dev->heaps);
Mitchel Humpherys1fb95df2013-08-29 15:28:58 -07001723 debug_file = debugfs_create_file(heap->name, 0664,
1724 dev->heaps_debug_root, heap,
1725 &debug_heap_fops);
1726
1727 if (!debug_file) {
1728 char buf[256], *path;
1729 path = dentry_path(dev->heaps_debug_root, buf, 256);
1730 pr_err("Failed to created heap debugfs at %s/%s\n",
1731 path, heap->name);
1732 }
1733
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001734#ifdef DEBUG_HEAP_SHRINKER
1735 if (heap->shrinker.shrink) {
1736 char debug_name[64];
1737
1738 snprintf(debug_name, 64, "%s_shrink", heap->name);
Mitchel Humpherys1fb95df2013-08-29 15:28:58 -07001739 debug_file = debugfs_create_file(
1740 debug_name, 0644, dev->heaps_debug_root, heap,
1741 &debug_shrink_fops);
1742 if (!debug_file) {
1743 char buf[256], *path;
1744 path = dentry_path(dev->heaps_debug_root, buf, 256);
1745 pr_err("Failed to created heap shrinker debugfs at %s/%s\n",
1746 path, debug_name);
1747 }
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -07001748 }
1749#endif
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001750 up_write(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001751}
1752
Laura Abbott93619302012-10-11 11:51:40 -07001753int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
1754 int version, void *data, int flags)
1755{
1756 int ret = -EINVAL;
1757 struct ion_heap *heap;
1758 struct ion_buffer *buffer;
1759
1760 mutex_lock(&client->lock);
1761 if (!ion_handle_validate(client, handle)) {
1762 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1763 goto out_unlock;
1764 }
1765
1766 buffer = handle->buffer;
1767 heap = buffer->heap;
1768
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001769 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001770 pr_err("%s: cannot secure buffer from non secure heap\n",
1771 __func__);
1772 goto out_unlock;
1773 }
1774
1775 BUG_ON(!buffer->heap->ops->secure_buffer);
1776 /*
1777 * Protect the handle via the client lock to ensure we aren't
1778 * racing with free
1779 */
1780 ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
1781
1782out_unlock:
1783 mutex_unlock(&client->lock);
1784 return ret;
1785}
1786
1787int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
1788{
1789 int ret = -EINVAL;
1790 struct ion_heap *heap;
1791 struct ion_buffer *buffer;
1792
1793 mutex_lock(&client->lock);
1794 if (!ion_handle_validate(client, handle)) {
1795 WARN(1, "%s: invalid handle passed to secure.\n", __func__);
1796 goto out_unlock;
1797 }
1798
1799 buffer = handle->buffer;
1800 heap = buffer->heap;
1801
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001802 if (!ion_heap_allow_handle_secure(heap->type)) {
Laura Abbott93619302012-10-11 11:51:40 -07001803 pr_err("%s: cannot secure buffer from non secure heap\n",
1804 __func__);
1805 goto out_unlock;
1806 }
1807
1808 BUG_ON(!buffer->heap->ops->unsecure_buffer);
1809 /*
1810 * Protect the handle via the client lock to ensure we aren't
1811 * racing with free
1812 */
1813 ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
1814
1815out_unlock:
1816 mutex_unlock(&client->lock);
1817 return ret;
1818}
1819
Laura Abbott7e446482012-06-13 15:59:39 -07001820int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1821 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001822{
Olav Haugan0a852512012-01-09 10:20:55 -08001823 int ret_val = 0;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001824 struct ion_heap *heap;
Olav Haugan0a852512012-01-09 10:20:55 -08001825
1826 /*
1827 * traverse the list of heaps available in this system
1828 * and find the heap that is specified.
1829 */
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001830 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001831 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001832 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001833 continue;
1834 if (ION_HEAP(heap->id) != heap_id)
1835 continue;
1836 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001837 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001838 else
1839 ret_val = -EINVAL;
1840 break;
1841 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001842 up_write(&dev->lock);
Olav Haugan0a852512012-01-09 10:20:55 -08001843 return ret_val;
1844}
Olav Hauganbd453a92012-07-05 14:21:34 -07001845EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001846
Laura Abbott7c1b8aa2013-06-27 18:20:47 -07001847int ion_walk_heaps(struct ion_client *client, int heap_id, void *data,
1848 int (*f)(struct ion_heap *heap, void *data))
1849{
1850 int ret_val = -EINVAL;
1851 struct ion_heap *heap;
1852 struct ion_device *dev = client->dev;
1853 /*
1854 * traverse the list of heaps available in this system
1855 * and find the heap that is specified.
1856 */
1857 down_write(&dev->lock);
1858 plist_for_each_entry(heap, &dev->heaps, node) {
1859 if (ION_HEAP(heap->id) != heap_id)
1860 continue;
1861 ret_val = f(heap, data);
1862 break;
1863 }
1864 up_write(&dev->lock);
1865 return ret_val;
1866}
1867EXPORT_SYMBOL(ion_walk_heaps);
1868
Laura Abbott7e446482012-06-13 15:59:39 -07001869int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1870 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001871{
Olav Haugan0a852512012-01-09 10:20:55 -08001872 int ret_val = 0;
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001873 struct ion_heap *heap;
Olav Haugan0a852512012-01-09 10:20:55 -08001874
1875 /*
1876 * traverse the list of heaps available in this system
1877 * and find the heap that is specified.
1878 */
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001879 down_write(&dev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001880 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott4afbd8b2013-02-15 09:21:33 -08001881 if (!ion_heap_allow_heap_secure(heap->type))
Olav Haugan0a852512012-01-09 10:20:55 -08001882 continue;
1883 if (ION_HEAP(heap->id) != heap_id)
1884 continue;
1885 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001886 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001887 else
1888 ret_val = -EINVAL;
1889 break;
1890 }
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001891 up_write(&dev->lock);
Olav Haugan0a852512012-01-09 10:20:55 -08001892 return ret_val;
1893}
Olav Hauganbd453a92012-07-05 14:21:34 -07001894EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001895
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001896struct ion_device *ion_device_create(long (*custom_ioctl)
1897 (struct ion_client *client,
1898 unsigned int cmd,
1899 unsigned long arg))
1900{
1901 struct ion_device *idev;
1902 int ret;
1903
1904 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1905 if (!idev)
1906 return ERR_PTR(-ENOMEM);
1907
1908 idev->dev.minor = MISC_DYNAMIC_MINOR;
1909 idev->dev.name = "ion";
1910 idev->dev.fops = &ion_fops;
1911 idev->dev.parent = NULL;
1912 ret = misc_register(&idev->dev);
1913 if (ret) {
1914 pr_err("ion: failed to register misc device.\n");
1915 return ERR_PTR(ret);
1916 }
1917
1918 idev->debug_root = debugfs_create_dir("ion", NULL);
Mitchel Humpherys1fb95df2013-08-29 15:28:58 -07001919 if (!idev->debug_root) {
1920 pr_err("ion: failed to create debugfs root directory.\n");
1921 goto debugfs_done;
1922 }
1923 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1924 if (!idev->heaps_debug_root) {
1925 pr_err("ion: failed to create debugfs heaps directory.\n");
1926 goto debugfs_done;
1927 }
1928 idev->clients_debug_root = debugfs_create_dir("clients",
1929 idev->debug_root);
1930 if (!idev->clients_debug_root)
1931 pr_err("ion: failed to create debugfs clients directory.\n");
1932
1933debugfs_done:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001934
1935 idev->custom_ioctl = custom_ioctl;
1936 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin52421e22012-10-11 12:46:05 -07001937 mutex_init(&idev->buffer_lock);
1938 init_rwsem(&idev->lock);
Rebecca Schultz Zavin47b98882012-11-15 10:36:10 -08001939 plist_head_init(&idev->heaps);
Laura Abbottb14ed962012-01-30 14:18:08 -08001940 idev->clients = RB_ROOT;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001941 return idev;
1942}
1943
1944void ion_device_destroy(struct ion_device *dev)
1945{
1946 misc_deregister(&dev->dev);
Mitchel Humpherys1fb95df2013-08-29 15:28:58 -07001947 debugfs_remove_recursive(dev->debug_root);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001948 /* XXX need to free the heaps and clients ? */
1949 kfree(dev);
1950}
Laura Abbottb14ed962012-01-30 14:18:08 -08001951
1952void __init ion_reserve(struct ion_platform_data *data)
1953{
Rebecca Schultz Zavinfbce1512012-11-15 10:31:02 -08001954 int i;
Laura Abbottb14ed962012-01-30 14:18:08 -08001955
1956 for (i = 0; i < data->nr; i++) {
1957 if (data->heaps[i].size == 0)
1958 continue;
Rebecca Schultz Zavinfbce1512012-11-15 10:31:02 -08001959
1960 if (data->heaps[i].base == 0) {
1961 phys_addr_t paddr;
1962 paddr = memblock_alloc_base(data->heaps[i].size,
1963 data->heaps[i].align,
1964 MEMBLOCK_ALLOC_ANYWHERE);
1965 if (!paddr) {
1966 pr_err("%s: error allocating memblock for "
1967 "heap %d\n",
1968 __func__, i);
1969 continue;
1970 }
1971 data->heaps[i].base = paddr;
1972 } else {
1973 int ret = memblock_reserve(data->heaps[i].base,
1974 data->heaps[i].size);
1975 if (ret)
1976 pr_err("memblock reserve of %x@%pa failed\n",
1977 data->heaps[i].size,
1978 &data->heaps[i].base);
1979 }
1980 pr_info("%s: %s reserved base %pa size %d\n", __func__,
1981 data->heaps[i].name,
1982 &data->heaps[i].base,
1983 data->heaps[i].size);
Laura Abbottb14ed962012-01-30 14:18:08 -08001984 }
1985}