blob: 806e9b30b9dc82e2cec9bbf9765b74c46d5c63aa [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
Sriram Raghunathan7e416172015-09-22 22:35:51 +05302 *
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08003 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Daniel Rosenberg3af20bd2018-08-30 16:09:46 -070018#include <linux/atomic.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080019#include <linux/device.h>
Sachin Kamatab0c0692014-01-27 12:17:05 +053020#include <linux/err.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080021#include <linux/file.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080022#include <linux/freezer.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080023#include <linux/fs.h>
24#include <linux/anon_inodes.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080025#include <linux/kthread.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080026#include <linux/list.h>
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -080027#include <linux/memblock.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080028#include <linux/miscdevice.h>
29#include <linux/export.h>
30#include <linux/mm.h>
31#include <linux/mm_types.h>
32#include <linux/rbtree.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080033#include <linux/slab.h>
34#include <linux/seq_file.h>
35#include <linux/uaccess.h>
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080036#include <linux/vmalloc.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080037#include <linux/debugfs.h>
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080038#include <linux/dma-buf.h>
Colin Cross47b40452013-12-13 14:24:50 -080039#include <linux/idr.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080040
41#include "ion.h"
42#include "ion_priv.h"
Rom Lemarchand827c8492013-12-13 14:24:55 -080043#include "compat_ion.h"
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080044
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -080045bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
46{
John Stultze1d855b2013-12-13 19:26:33 -080047 return (buffer->flags & ION_FLAG_CACHED) &&
48 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -080049}
50
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080051bool ion_buffer_cached(struct ion_buffer *buffer)
52{
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080053 return !!(buffer->flags & ION_FLAG_CACHED);
54}
55
56static inline struct page *ion_buffer_page(struct page *page)
57{
58 return (struct page *)((unsigned long)page & ~(1UL));
59}
60
61static inline bool ion_buffer_page_is_dirty(struct page *page)
62{
63 return !!((unsigned long)page & 1UL);
64}
65
66static inline void ion_buffer_page_dirty(struct page **page)
67{
68 *page = (struct page *)((unsigned long)(*page) | 1UL);
69}
70
71static inline void ion_buffer_page_clean(struct page **page)
72{
73 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080074}
75
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080076/* this function should only be called while dev->lock is held */
77static void ion_buffer_add(struct ion_device *dev,
78 struct ion_buffer *buffer)
79{
80 struct rb_node **p = &dev->buffers.rb_node;
81 struct rb_node *parent = NULL;
82 struct ion_buffer *entry;
83
84 while (*p) {
85 parent = *p;
86 entry = rb_entry(parent, struct ion_buffer, node);
87
88 if (buffer < entry) {
89 p = &(*p)->rb_left;
90 } else if (buffer > entry) {
91 p = &(*p)->rb_right;
92 } else {
93 pr_err("%s: buffer already found.", __func__);
94 BUG();
95 }
96 }
97
98 rb_link_node(&buffer->node, parent, p);
99 rb_insert_color(&buffer->node, &dev->buffers);
100}
101
102/* this function should only be called while dev->lock is held */
103static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200104 struct ion_device *dev,
105 unsigned long len,
106 unsigned long align,
107 unsigned long flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800108{
109 struct ion_buffer *buffer;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800110 struct sg_table *table;
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800111 struct scatterlist *sg;
112 int i, ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800113
Ben Marsh411059f2016-03-28 19:26:19 +0200114 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800115 if (!buffer)
116 return ERR_PTR(-ENOMEM);
117
118 buffer->heap = heap;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800119 buffer->flags = flags;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800120 kref_init(&buffer->ref);
121
122 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800123
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800124 if (ret) {
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800125 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
126 goto err2;
127
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800128 ion_heap_freelist_drain(heap, 0);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800129 ret = heap->ops->allocate(heap, buffer, len, align,
130 flags);
131 if (ret)
132 goto err2;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800133 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800134
Laura Abbottf82ad602016-08-08 09:52:56 -0700135 if (buffer->sg_table == NULL) {
136 WARN_ONCE(1, "This heap needs to set the sgtable");
Rohit kumara56d0922015-09-30 11:07:35 +0530137 ret = -EINVAL;
138 goto err1;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800139 }
Rohit kumara56d0922015-09-30 11:07:35 +0530140
Laura Abbottf82ad602016-08-08 09:52:56 -0700141 table = buffer->sg_table;
142 buffer->dev = dev;
143 buffer->size = len;
144
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800145 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800146 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
147 struct scatterlist *sg;
148 int i, j, k = 0;
149
150 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
151 if (!buffer->pages) {
152 ret = -ENOMEM;
Laura Abbottf82ad602016-08-08 09:52:56 -0700153 goto err1;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800154 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800155
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800156 for_each_sg(table->sgl, sg, table->nents, i) {
157 struct page *page = sg_page(sg);
158
Colin Cross06e0dca2013-12-13 14:25:02 -0800159 for (j = 0; j < sg->length / PAGE_SIZE; j++)
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800160 buffer->pages[k++] = page++;
161 }
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800162 }
163
164 buffer->dev = dev;
165 buffer->size = len;
166 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800167 mutex_init(&buffer->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530168 /*
169 * this will set up dma addresses for the sglist -- it is not
170 * technically correct as per the dma api -- a specific
171 * device isn't really taking ownership here. However, in practice on
172 * our systems the only dma_address space is physical addresses.
173 * Additionally, we can't afford the overhead of invalidating every
174 * allocation via dma_map_sg. The implicit contract here is that
175 * memory coming from the heaps is ready for dma, ie if it has a
176 * cached mapping that mapping has been invalidated
177 */
Liviu Dudau70bc9162016-01-21 11:57:47 +0000178 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800179 sg_dma_address(sg) = sg_phys(sg);
Liviu Dudau70bc9162016-01-21 11:57:47 +0000180 sg_dma_len(sg) = sg->length;
181 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800182 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800183 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800184 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800185 return buffer;
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800186
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800187err1:
Rohit kumara56d0922015-09-30 11:07:35 +0530188 heap->ops->free(buffer);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800189err2:
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800190 kfree(buffer);
191 return ERR_PTR(ret);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800192}
193
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800194void ion_buffer_destroy(struct ion_buffer *buffer)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800195{
Laura Abbott8da07ee2018-05-14 14:35:09 -0700196 if (buffer->kmap_cnt > 0) {
197 pr_warn_once("%s: buffer still mapped in the kernel\n",
198 __func__);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800199 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbott8da07ee2018-05-14 14:35:09 -0700200 }
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800201 buffer->heap->ops->free(buffer);
Markus Elfring698f1402014-11-23 18:48:15 +0100202 vfree(buffer->pages);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800203 kfree(buffer);
204}
205
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800206static void _ion_buffer_destroy(struct kref *kref)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800207{
208 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800209 struct ion_heap *heap = buffer->heap;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800210 struct ion_device *dev = buffer->dev;
211
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800212 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800213 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800214 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800215
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800216 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
217 ion_heap_freelist_add(heap, buffer);
218 else
219 ion_buffer_destroy(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800220}
221
222static void ion_buffer_get(struct ion_buffer *buffer)
223{
224 kref_get(&buffer->ref);
225}
226
227static int ion_buffer_put(struct ion_buffer *buffer)
228{
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800229 return kref_put(&buffer->ref, _ion_buffer_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800230}
231
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800232static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
233{
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800234 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800235 buffer->handle_count++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800236 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800237}
238
239static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
240{
241 /*
242 * when a buffer is removed from a handle, if it is not in
243 * any other handles, copy the taskcomm and the pid of the
244 * process it's being removed from into the buffer. At this
245 * point there will be no way to track what processes this buffer is
246 * being used by, it only exists as a dma_buf file descriptor.
247 * The taskcomm and pid can provide a debug hint as to where this fd
248 * is in the system
249 */
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800250 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800251 buffer->handle_count--;
252 BUG_ON(buffer->handle_count < 0);
253 if (!buffer->handle_count) {
254 struct task_struct *task;
255
256 task = current->group_leader;
257 get_task_comm(buffer->task_comm, task);
258 buffer->pid = task_pid_nr(task);
259 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800260 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800261}
262
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800263static struct ion_handle *ion_handle_create(struct ion_client *client,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200264 struct ion_buffer *buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800265{
266 struct ion_handle *handle;
267
Ben Marsh411059f2016-03-28 19:26:19 +0200268 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800269 if (!handle)
270 return ERR_PTR(-ENOMEM);
271 kref_init(&handle->ref);
272 RB_CLEAR_NODE(&handle->node);
273 handle->client = client;
274 ion_buffer_get(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800275 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800276 handle->buffer = buffer;
277
278 return handle;
279}
280
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800281static void ion_handle_kmap_put(struct ion_handle *);
282
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800283static void ion_handle_destroy(struct kref *kref)
284{
285 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800286 struct ion_client *client = handle->client;
287 struct ion_buffer *buffer = handle->buffer;
288
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800289 mutex_lock(&buffer->lock);
Laura Abbott2900cd72013-12-13 14:23:48 -0800290 while (handle->kmap_cnt)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800291 ion_handle_kmap_put(handle);
292 mutex_unlock(&buffer->lock);
293
Colin Cross47b40452013-12-13 14:24:50 -0800294 idr_remove(&client->idr, handle->id);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800295 if (!RB_EMPTY_NODE(&handle->node))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800296 rb_erase(&handle->node, &client->handles);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800297
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800298 ion_buffer_remove_from_handle(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800299 ion_buffer_put(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800300
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800301 kfree(handle);
302}
303
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800304static void ion_handle_get(struct ion_handle *handle)
305{
306 kref_get(&handle->ref);
307}
308
Daniel Rosenberg3af20bd2018-08-30 16:09:46 -0700309/* Must hold the client lock */
310static struct ion_handle *ion_handle_get_check_overflow(
311 struct ion_handle *handle)
312{
313 if (atomic_read(&handle->ref.refcount) + 1 == 0)
314 return ERR_PTR(-EOVERFLOW);
315 ion_handle_get(handle);
316 return handle;
317}
318
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700319int ion_handle_put_nolock(struct ion_handle *handle)
EunTaik Lee95902322016-02-24 04:38:06 +0000320{
Johanna Abrahamsson45052462016-08-24 00:41:54 +0200321 return kref_put(&handle->ref, ion_handle_destroy);
EunTaik Lee95902322016-02-24 04:38:06 +0000322}
323
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700324int ion_handle_put(struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800325{
Colin Cross83271f62013-12-13 14:24:59 -0800326 struct ion_client *client = handle->client;
327 int ret;
328
329 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000330 ret = ion_handle_put_nolock(handle);
Colin Cross83271f62013-12-13 14:24:59 -0800331 mutex_unlock(&client->lock);
332
333 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800334}
335
336static struct ion_handle *ion_handle_lookup(struct ion_client *client,
337 struct ion_buffer *buffer)
338{
Colin Crosse1cf3682013-12-13 14:24:51 -0800339 struct rb_node *n = client->handles.rb_node;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800340
Colin Crosse1cf3682013-12-13 14:24:51 -0800341 while (n) {
342 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900343
Colin Crosse1cf3682013-12-13 14:24:51 -0800344 if (buffer < entry->buffer)
345 n = n->rb_left;
346 else if (buffer > entry->buffer)
347 n = n->rb_right;
348 else
349 return entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800350 }
Colin Cross9e907652013-12-13 14:24:49 -0800351 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800352}
353
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700354struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
355 int id)
EunTaik Lee95902322016-02-24 04:38:06 +0000356{
357 struct ion_handle *handle;
358
359 handle = idr_find(&client->idr, id);
360 if (handle)
Daniel Rosenberg3af20bd2018-08-30 16:09:46 -0700361 return ion_handle_get_check_overflow(handle);
EunTaik Lee95902322016-02-24 04:38:06 +0000362
Daniel Rosenberg3af20bd2018-08-30 16:09:46 -0700363 return ERR_PTR(-EINVAL);
EunTaik Lee95902322016-02-24 04:38:06 +0000364}
365
John Stultze1d855b2013-12-13 19:26:33 -0800366static bool ion_handle_validate(struct ion_client *client,
367 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800368{
Colin Cross83271f62013-12-13 14:24:59 -0800369 WARN_ON(!mutex_is_locked(&client->lock));
Daeseok Youn51108982014-02-10 20:16:50 +0900370 return idr_find(&client->idr, handle->id) == handle;
Colin Cross47b40452013-12-13 14:24:50 -0800371}
372
373static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
374{
Colin Crossb26661d2013-12-13 14:25:05 -0800375 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800376 struct rb_node **p = &client->handles.rb_node;
377 struct rb_node *parent = NULL;
378 struct ion_handle *entry;
379
Colin Crossb26661d2013-12-13 14:25:05 -0800380 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
381 if (id < 0)
382 return id;
Colin Cross47b40452013-12-13 14:24:50 -0800383
Colin Crossb26661d2013-12-13 14:25:05 -0800384 handle->id = id;
Colin Cross47b40452013-12-13 14:24:50 -0800385
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800386 while (*p) {
387 parent = *p;
388 entry = rb_entry(parent, struct ion_handle, node);
389
Colin Crosse1cf3682013-12-13 14:24:51 -0800390 if (handle->buffer < entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800391 p = &(*p)->rb_left;
Colin Crosse1cf3682013-12-13 14:24:51 -0800392 else if (handle->buffer > entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800393 p = &(*p)->rb_right;
394 else
395 WARN(1, "%s: buffer already found.", __func__);
396 }
397
398 rb_link_node(&handle->node, parent, p);
399 rb_insert_color(&handle->node, &client->handles);
Colin Cross47b40452013-12-13 14:24:50 -0800400
401 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800402}
403
404struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800405 size_t align, unsigned int heap_id_mask,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800406 unsigned int flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800407{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800408 struct ion_handle *handle;
409 struct ion_device *dev = client->dev;
410 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800411 struct ion_heap *heap;
Colin Cross47b40452013-12-13 14:24:50 -0800412 int ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800413
Colin Crosse61fc912013-12-13 19:26:14 -0800414 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800415 len, align, heap_id_mask, flags);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800416 /*
417 * traverse the list of heaps available in this system in priority
418 * order. If the heap type is supported by the client, and matches the
419 * request of the caller allocate from it. Repeat until allocate has
420 * succeeded or all heaps have been tried
421 */
KyongHo Cho54ac07842013-12-13 14:23:39 -0800422 len = PAGE_ALIGN(len);
423
Colin Crossa14baf72013-12-13 14:25:00 -0800424 if (!len)
425 return ERR_PTR(-EINVAL);
426
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800427 down_read(&dev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800428 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800429 /* if the caller didn't specify this heap id */
430 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800431 continue;
432 buffer = ion_buffer_create(heap, dev, len, align, flags);
Colin Cross9e907652013-12-13 14:24:49 -0800433 if (!IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800434 break;
435 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800436 up_read(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800437
KyongHo Cho54ac07842013-12-13 14:23:39 -0800438 if (buffer == NULL)
439 return ERR_PTR(-ENODEV);
440
441 if (IS_ERR(buffer))
Iulia Manda464a5022014-03-11 20:14:36 +0200442 return ERR_CAST(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800443
444 handle = ion_handle_create(client, buffer);
445
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800446 /*
447 * ion_buffer_create will create a buffer with a ref_cnt of 1,
448 * and ion_handle_create will take a second reference, drop one here
449 */
450 ion_buffer_put(buffer);
451
Colin Cross47b40452013-12-13 14:24:50 -0800452 if (IS_ERR(handle))
453 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800454
Colin Cross47b40452013-12-13 14:24:50 -0800455 mutex_lock(&client->lock);
456 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -0800457 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -0800458 if (ret) {
459 ion_handle_put(handle);
460 handle = ERR_PTR(ret);
461 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800462
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800463 return handle;
464}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800465EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800466
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700467void ion_free_nolock(struct ion_client *client,
468 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800469{
Johanna Abrahamssonc2bbedf2016-08-24 00:02:45 +0200470 if (!ion_handle_validate(client, handle)) {
Olav Haugana9bb0752013-12-13 14:23:54 -0800471 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800472 return;
473 }
EunTaik Lee95902322016-02-24 04:38:06 +0000474 ion_handle_put_nolock(handle);
475}
476
477void ion_free(struct ion_client *client, struct ion_handle *handle)
478{
479 BUG_ON(client != handle->client);
480
481 mutex_lock(&client->lock);
482 ion_free_nolock(client, handle);
Rebecca Schultz Zavin0e9c03a2013-12-13 14:24:01 -0800483 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800484}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800485EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800486
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800487static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
488{
489 void *vaddr;
490
491 if (buffer->kmap_cnt) {
492 buffer->kmap_cnt++;
493 return buffer->vaddr;
494 }
495 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
John Stultze1d855b2013-12-13 19:26:33 -0800496 if (WARN_ONCE(vaddr == NULL,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200497 "heap->ops->map_kernel should return ERR_PTR on error"))
Colin Cross9e907652013-12-13 14:24:49 -0800498 return ERR_PTR(-EINVAL);
499 if (IS_ERR(vaddr))
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800500 return vaddr;
501 buffer->vaddr = vaddr;
502 buffer->kmap_cnt++;
503 return vaddr;
504}
505
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800506static void *ion_handle_kmap_get(struct ion_handle *handle)
507{
508 struct ion_buffer *buffer = handle->buffer;
509 void *vaddr;
510
511 if (handle->kmap_cnt) {
512 handle->kmap_cnt++;
513 return buffer->vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800514 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800515 vaddr = ion_buffer_kmap_get(buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800516 if (IS_ERR(vaddr))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800517 return vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800518 handle->kmap_cnt++;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800519 return vaddr;
520}
521
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800522static void ion_buffer_kmap_put(struct ion_buffer *buffer)
523{
524 buffer->kmap_cnt--;
525 if (!buffer->kmap_cnt) {
526 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
527 buffer->vaddr = NULL;
528 }
529}
530
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800531static void ion_handle_kmap_put(struct ion_handle *handle)
532{
533 struct ion_buffer *buffer = handle->buffer;
534
Mitchel Humpherys22f6b972014-05-23 13:01:22 -0700535 if (!handle->kmap_cnt) {
536 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
537 return;
538 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800539 handle->kmap_cnt--;
540 if (!handle->kmap_cnt)
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800541 ion_buffer_kmap_put(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800542}
543
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800544void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
545{
546 struct ion_buffer *buffer;
547 void *vaddr;
548
549 mutex_lock(&client->lock);
550 if (!ion_handle_validate(client, handle)) {
551 pr_err("%s: invalid handle passed to map_kernel.\n",
552 __func__);
553 mutex_unlock(&client->lock);
554 return ERR_PTR(-EINVAL);
555 }
556
557 buffer = handle->buffer;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800558
559 if (!handle->buffer->heap->ops->map_kernel) {
560 pr_err("%s: map_kernel is not implemented by this heap.\n",
561 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800562 mutex_unlock(&client->lock);
563 return ERR_PTR(-ENODEV);
564 }
565
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800566 mutex_lock(&buffer->lock);
567 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800568 mutex_unlock(&buffer->lock);
569 mutex_unlock(&client->lock);
570 return vaddr;
571}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800572EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800573
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800574void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
575{
576 struct ion_buffer *buffer;
577
578 mutex_lock(&client->lock);
579 buffer = handle->buffer;
580 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800581 ion_handle_kmap_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800582 mutex_unlock(&buffer->lock);
583 mutex_unlock(&client->lock);
584}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800585EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800586
Neil Zhang948c4db2016-01-26 17:39:06 +0800587static struct mutex debugfs_mutex;
588static struct rb_root *ion_root_client;
589static int is_client_alive(struct ion_client *client)
590{
591 struct rb_node *node;
592 struct ion_client *tmp;
593 struct ion_device *dev;
594
595 node = ion_root_client->rb_node;
596 dev = container_of(ion_root_client, struct ion_device, clients);
597
598 down_read(&dev->lock);
599 while (node) {
600 tmp = rb_entry(node, struct ion_client, node);
601 if (client < tmp) {
602 node = node->rb_left;
603 } else if (client > tmp) {
604 node = node->rb_right;
605 } else {
606 up_read(&dev->lock);
607 return 1;
608 }
609 }
610
611 up_read(&dev->lock);
612 return 0;
613}
614
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800615static int ion_debug_client_show(struct seq_file *s, void *unused)
616{
617 struct ion_client *client = s->private;
618 struct rb_node *n;
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800619 size_t sizes[ION_NUM_HEAP_IDS] = {0};
Colin Crossf63958d2013-12-13 19:26:28 -0800620 const char *names[ION_NUM_HEAP_IDS] = {NULL};
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800621 int i;
622
Neil Zhang948c4db2016-01-26 17:39:06 +0800623 mutex_lock(&debugfs_mutex);
624 if (!is_client_alive(client)) {
625 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
626 client);
627 mutex_unlock(&debugfs_mutex);
628 return 0;
629 }
630
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800631 mutex_lock(&client->lock);
632 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
633 struct ion_handle *handle = rb_entry(n, struct ion_handle,
634 node);
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800635 unsigned int id = handle->buffer->heap->id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800636
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800637 if (!names[id])
638 names[id] = handle->buffer->heap->name;
639 sizes[id] += handle->buffer->size;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800640 }
641 mutex_unlock(&client->lock);
Neil Zhang948c4db2016-01-26 17:39:06 +0800642 mutex_unlock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800643
644 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800645 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800646 if (!names[i])
647 continue;
Colin Crosse61fc912013-12-13 19:26:14 -0800648 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800649 }
650 return 0;
651}
652
653static int ion_debug_client_open(struct inode *inode, struct file *file)
654{
655 return single_open(file, ion_debug_client_show, inode->i_private);
656}
657
658static const struct file_operations debug_client_fops = {
659 .open = ion_debug_client_open,
660 .read = seq_read,
661 .llseek = seq_lseek,
662 .release = single_release,
663};
664
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800665static int ion_get_client_serial(const struct rb_root *root,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200666 const unsigned char *name)
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800667{
668 int serial = -1;
669 struct rb_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +0900670
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800671 for (node = rb_first(root); node; node = rb_next(node)) {
672 struct ion_client *client = rb_entry(node, struct ion_client,
Didik Setiawanb2bcdad2016-08-24 16:08:01 +0700673 node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900674
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800675 if (strcmp(client->name, name))
676 continue;
677 serial = max(serial, client->display_serial);
678 }
679 return serial + 1;
680}
681
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800682struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800683 const char *name)
684{
685 struct ion_client *client;
686 struct task_struct *task;
687 struct rb_node **p;
688 struct rb_node *parent = NULL;
689 struct ion_client *entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800690 pid_t pid;
691
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800692 if (!name) {
693 pr_err("%s: Name cannot be null\n", __func__);
694 return ERR_PTR(-EINVAL);
695 }
696
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800697 get_task_struct(current->group_leader);
698 task_lock(current->group_leader);
699 pid = task_pid_nr(current->group_leader);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530700 /*
701 * don't bother to store task struct for kernel threads,
702 * they can't be killed anyway
703 */
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800704 if (current->group_leader->flags & PF_KTHREAD) {
705 put_task_struct(current->group_leader);
706 task = NULL;
707 } else {
708 task = current->group_leader;
709 }
710 task_unlock(current->group_leader);
711
Ben Marsh411059f2016-03-28 19:26:19 +0200712 client = kzalloc(sizeof(*client), GFP_KERNEL);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800713 if (!client)
714 goto err_put_task_struct;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800715
716 client->dev = dev;
717 client->handles = RB_ROOT;
Colin Cross47b40452013-12-13 14:24:50 -0800718 idr_init(&client->idr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800719 mutex_init(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800720 client->task = task;
721 client->pid = pid;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800722 client->name = kstrdup(name, GFP_KERNEL);
723 if (!client->name)
724 goto err_free_client;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800725
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800726 down_write(&dev->lock);
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800727 client->display_serial = ion_get_client_serial(&dev->clients, name);
728 client->display_name = kasprintf(
729 GFP_KERNEL, "%s-%d", name, client->display_serial);
730 if (!client->display_name) {
731 up_write(&dev->lock);
732 goto err_free_client_name;
733 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800734 p = &dev->clients.rb_node;
735 while (*p) {
736 parent = *p;
737 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800738
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800739 if (client < entry)
740 p = &(*p)->rb_left;
741 else if (client > entry)
742 p = &(*p)->rb_right;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800743 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800744 rb_link_node(&client->node, parent, p);
745 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800746
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800747 client->debug_root = debugfs_create_file(client->display_name, 0664,
Didik Setiawanb2bcdad2016-08-24 16:08:01 +0700748 dev->clients_debug_root,
749 client, &debug_client_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800750 if (!client->debug_root) {
751 char buf[256], *path;
Phong Tran04e14352014-08-13 20:37:05 +0700752
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800753 path = dentry_path(dev->clients_debug_root, buf, 256);
754 pr_err("Failed to create client debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200755 path, client->display_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800756 }
757
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800758 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800759
760 return client;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800761
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800762err_free_client_name:
763 kfree(client->name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800764err_free_client:
765 kfree(client);
766err_put_task_struct:
767 if (task)
768 put_task_struct(current->group_leader);
769 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800770}
Johan Mossberg9122fe82013-12-13 14:24:29 -0800771EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800772
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800773void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800774{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800775 struct ion_device *dev = client->dev;
776 struct rb_node *n;
777
778 pr_debug("%s: %d\n", __func__, __LINE__);
Neil Zhang948c4db2016-01-26 17:39:06 +0800779 mutex_lock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800780 while ((n = rb_first(&client->handles))) {
781 struct ion_handle *handle = rb_entry(n, struct ion_handle,
782 node);
783 ion_handle_destroy(&handle->ref);
784 }
Colin Cross47b40452013-12-13 14:24:50 -0800785
Colin Cross47b40452013-12-13 14:24:50 -0800786 idr_destroy(&client->idr);
787
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800788 down_write(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800789 if (client->task)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800790 put_task_struct(client->task);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800791 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800792 debugfs_remove_recursive(client->debug_root);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800793 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800794
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800795 kfree(client->display_name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800796 kfree(client->name);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800797 kfree(client);
Neil Zhang948c4db2016-01-26 17:39:06 +0800798 mutex_unlock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800799}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800800EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800801
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800802static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
803 struct device *dev,
804 enum dma_data_direction direction);
805
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800806static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
807 enum dma_data_direction direction)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800808{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800809 struct dma_buf *dmabuf = attachment->dmabuf;
810 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800811
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800812 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800813 return buffer->sg_table;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800814}
815
816static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
817 struct sg_table *table,
818 enum dma_data_direction direction)
819{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800820}
821
Colin Crosse946b202013-12-13 14:25:01 -0800822void ion_pages_sync_for_device(struct device *dev, struct page *page,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200823 size_t size, enum dma_data_direction dir)
Colin Crosse946b202013-12-13 14:25:01 -0800824{
825 struct scatterlist sg;
826
827 sg_init_table(&sg, 1);
828 sg_set_page(&sg, page, size, 0);
829 /*
830 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
Tapasweni Pathak8e4ec4f2014-10-06 11:26:39 +0530831 * for the targeted device, but this works on the currently targeted
Colin Crosse946b202013-12-13 14:25:01 -0800832 * hardware.
833 */
834 sg_dma_address(&sg) = page_to_phys(page);
835 dma_sync_sg_for_device(dev, &sg, 1, dir);
836}
837
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800838struct ion_vma_list {
839 struct list_head list;
840 struct vm_area_struct *vma;
841};
842
843static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
844 struct device *dev,
845 enum dma_data_direction dir)
846{
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800847 struct ion_vma_list *vma_list;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800848 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
849 int i;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800850
851 pr_debug("%s: syncing for device %s\n", __func__,
852 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800853
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800854 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800855 return;
856
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800857 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800858 for (i = 0; i < pages; i++) {
859 struct page *page = buffer->pages[i];
860
861 if (ion_buffer_page_is_dirty(page))
Colin Crosse946b202013-12-13 14:25:01 -0800862 ion_pages_sync_for_device(dev, ion_buffer_page(page),
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200863 PAGE_SIZE, dir);
Colin Crosse946b202013-12-13 14:25:01 -0800864
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800865 ion_buffer_page_clean(buffer->pages + i);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800866 }
867 list_for_each_entry(vma_list, &buffer->vmas, list) {
868 struct vm_area_struct *vma = vma_list->vma;
869
870 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
871 NULL);
872 }
873 mutex_unlock(&buffer->lock);
874}
875
Colin Crossf63958d2013-12-13 19:26:28 -0800876static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800877{
878 struct ion_buffer *buffer = vma->vm_private_data;
Colin Cross462be0c62013-12-13 19:26:24 -0800879 unsigned long pfn;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800880 int ret;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800881
882 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800883 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800884 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
Colin Cross462be0c62013-12-13 19:26:24 -0800885
886 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
887 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800888 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800889 if (ret)
890 return VM_FAULT_ERROR;
891
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800892 return VM_FAULT_NOPAGE;
893}
894
895static void ion_vm_open(struct vm_area_struct *vma)
896{
897 struct ion_buffer *buffer = vma->vm_private_data;
898 struct ion_vma_list *vma_list;
899
Ben Marsh411059f2016-03-28 19:26:19 +0200900 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800901 if (!vma_list)
902 return;
903 vma_list->vma = vma;
904 mutex_lock(&buffer->lock);
905 list_add(&vma_list->list, &buffer->vmas);
906 mutex_unlock(&buffer->lock);
907 pr_debug("%s: adding %p\n", __func__, vma);
908}
909
910static void ion_vm_close(struct vm_area_struct *vma)
911{
912 struct ion_buffer *buffer = vma->vm_private_data;
913 struct ion_vma_list *vma_list, *tmp;
914
915 pr_debug("%s\n", __func__);
916 mutex_lock(&buffer->lock);
917 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
918 if (vma_list->vma != vma)
919 continue;
920 list_del(&vma_list->list);
921 kfree(vma_list);
922 pr_debug("%s: deleting %p\n", __func__, vma);
923 break;
924 }
925 mutex_unlock(&buffer->lock);
926}
927
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -0700928static const struct vm_operations_struct ion_vma_ops = {
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800929 .open = ion_vm_open,
930 .close = ion_vm_close,
931 .fault = ion_vm_fault,
932};
933
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800934static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800935{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800936 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800937 int ret = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800938
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800939 if (!buffer->heap->ops->map_user) {
Iulia Manda7287bb52014-03-11 20:10:37 +0200940 pr_err("%s: this heap does not define a method for mapping to userspace\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200941 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800942 return -EINVAL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800943 }
944
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800945 if (ion_buffer_fault_user_mappings(buffer)) {
Colin Cross462be0c62013-12-13 19:26:24 -0800946 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
947 VM_DONTDUMP;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800948 vma->vm_private_data = buffer;
949 vma->vm_ops = &ion_vma_ops;
950 ion_vm_open(vma);
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -0800951 return 0;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800952 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800953
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -0800954 if (!(buffer->flags & ION_FLAG_CACHED))
955 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
956
957 mutex_lock(&buffer->lock);
958 /* now map it to userspace */
959 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
960 mutex_unlock(&buffer->lock);
961
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800962 if (ret)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800963 pr_err("%s: failure mapping buffer to userspace\n",
964 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800965
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800966 return ret;
967}
968
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800969static void ion_dma_buf_release(struct dma_buf *dmabuf)
970{
971 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +0900972
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800973 ion_buffer_put(buffer);
974}
975
976static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
977{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800978 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +0900979
Greg Hackmann12edf532013-12-13 14:24:00 -0800980 return buffer->vaddr + offset * PAGE_SIZE;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800981}
982
983static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
984 void *ptr)
985{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800986}
987
Tiago Vignatti831e9da2015-12-22 19:36:45 -0200988static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800989 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800990{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800991 struct ion_buffer *buffer = dmabuf->priv;
992 void *vaddr;
993
994 if (!buffer->heap->ops->map_kernel) {
995 pr_err("%s: map kernel is not implemented by this heap.\n",
996 __func__);
997 return -ENODEV;
998 }
999
1000 mutex_lock(&buffer->lock);
1001 vaddr = ion_buffer_kmap_get(buffer);
1002 mutex_unlock(&buffer->lock);
Sachin Kamatab0c0692014-01-27 12:17:05 +05301003 return PTR_ERR_OR_ZERO(vaddr);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001004}
1005
Chris Wilson18b862d2016-03-18 20:02:39 +00001006static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1007 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001008{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001009 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001010
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001011 mutex_lock(&buffer->lock);
1012 ion_buffer_kmap_put(buffer);
1013 mutex_unlock(&buffer->lock);
Chris Wilson18b862d2016-03-18 20:02:39 +00001014
1015 return 0;
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001016}
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001017
Colin Crossf63958d2013-12-13 19:26:28 -08001018static struct dma_buf_ops dma_buf_ops = {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001019 .map_dma_buf = ion_map_dma_buf,
1020 .unmap_dma_buf = ion_unmap_dma_buf,
1021 .mmap = ion_mmap,
1022 .release = ion_dma_buf_release,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001023 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1024 .end_cpu_access = ion_dma_buf_end_cpu_access,
1025 .kmap_atomic = ion_dma_buf_kmap,
1026 .kunmap_atomic = ion_dma_buf_kunmap,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001027 .kmap = ion_dma_buf_kmap,
1028 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001029};
1030
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001031static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
1032 struct ion_handle *handle,
1033 bool lock_client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001034{
Dmitry Kalinkin5605b182015-07-13 15:50:30 +03001035 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001036 struct ion_buffer *buffer;
1037 struct dma_buf *dmabuf;
1038 bool valid_handle;
Sumit Semwald8fbe342015-01-23 12:53:43 +05301039
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001040 if (lock_client)
1041 mutex_lock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001042 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001043 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -08001044 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001045 if (lock_client)
1046 mutex_unlock(&client->lock);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001047 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001048 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001049 buffer = handle->buffer;
1050 ion_buffer_get(buffer);
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001051 if (lock_client)
1052 mutex_unlock(&client->lock);
Colin Cross83271f62013-12-13 14:24:59 -08001053
Sumit Semwal72449cb2015-02-21 09:00:17 +05301054 exp_info.ops = &dma_buf_ops;
1055 exp_info.size = buffer->size;
1056 exp_info.flags = O_RDWR;
1057 exp_info.priv = buffer;
1058
Sumit Semwald8fbe342015-01-23 12:53:43 +05301059 dmabuf = dma_buf_export(&exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001060 if (IS_ERR(dmabuf)) {
1061 ion_buffer_put(buffer);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001062 return dmabuf;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001063 }
Johan Mossberg22ba4322013-12-13 14:24:34 -08001064
1065 return dmabuf;
1066}
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001067
1068struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1069 struct ion_handle *handle)
1070{
1071 return __ion_share_dma_buf(client, handle, true);
1072}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001073EXPORT_SYMBOL(ion_share_dma_buf);
1074
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001075static int __ion_share_dma_buf_fd(struct ion_client *client,
1076 struct ion_handle *handle, bool lock_client)
Johan Mossberg22ba4322013-12-13 14:24:34 -08001077{
1078 struct dma_buf *dmabuf;
1079 int fd;
1080
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001081 dmabuf = __ion_share_dma_buf(client, handle, lock_client);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001082 if (IS_ERR(dmabuf))
1083 return PTR_ERR(dmabuf);
1084
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001085 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbott55808b82013-12-13 14:23:57 -08001086 if (fd < 0)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001087 dma_buf_put(dmabuf);
Laura Abbott55808b82013-12-13 14:23:57 -08001088
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001089 return fd;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001090}
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001091
1092int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1093{
1094 return __ion_share_dma_buf_fd(client, handle, true);
1095}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001096EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001097
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001098int ion_share_dma_buf_fd_nolock(struct ion_client *client,
1099 struct ion_handle *handle)
1100{
1101 return __ion_share_dma_buf_fd(client, handle, false);
1102}
1103
Rohit kumar9f903812016-01-12 09:31:46 +05301104struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1105 struct dma_buf *dmabuf)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001106{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001107 struct ion_buffer *buffer;
1108 struct ion_handle *handle;
Colin Cross47b40452013-12-13 14:24:50 -08001109 int ret;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001110
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001111 /* if this memory came from ion */
1112
1113 if (dmabuf->ops != &dma_buf_ops) {
1114 pr_err("%s: can not import dmabuf from another exporter\n",
1115 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001116 return ERR_PTR(-EINVAL);
1117 }
1118 buffer = dmabuf->priv;
1119
1120 mutex_lock(&client->lock);
1121 /* if a handle exists for this buffer just take a reference to it */
1122 handle = ion_handle_lookup(client, buffer);
Colin Cross9e907652013-12-13 14:24:49 -08001123 if (!IS_ERR(handle)) {
Daniel Rosenberg3af20bd2018-08-30 16:09:46 -07001124 handle = ion_handle_get_check_overflow(handle);
Colin Cross83271f62013-12-13 14:24:59 -08001125 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001126 goto end;
1127 }
Colin Cross83271f62013-12-13 14:24:59 -08001128
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001129 handle = ion_handle_create(client, buffer);
Shawn Lin6fa92e22015-09-09 15:41:52 +08001130 if (IS_ERR(handle)) {
1131 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001132 goto end;
Shawn Lin6fa92e22015-09-09 15:41:52 +08001133 }
Colin Cross83271f62013-12-13 14:24:59 -08001134
Colin Cross47b40452013-12-13 14:24:50 -08001135 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001136 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001137 if (ret) {
1138 ion_handle_put(handle);
1139 handle = ERR_PTR(ret);
1140 }
Colin Cross83271f62013-12-13 14:24:59 -08001141
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001142end:
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001143 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001144}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001145EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001146
Rohit kumar9f903812016-01-12 09:31:46 +05301147struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1148{
1149 struct dma_buf *dmabuf;
1150 struct ion_handle *handle;
1151
1152 dmabuf = dma_buf_get(fd);
1153 if (IS_ERR(dmabuf))
1154 return ERR_CAST(dmabuf);
1155
1156 handle = ion_import_dma_buf(client, dmabuf);
1157 dma_buf_put(dmabuf);
1158 return handle;
1159}
1160EXPORT_SYMBOL(ion_import_dma_buf_fd);
1161
Laura Abbottb1fa6d82016-09-07 11:49:58 -07001162int ion_sync_for_device(struct ion_client *client, int fd)
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001163{
1164 struct dma_buf *dmabuf;
1165 struct ion_buffer *buffer;
1166
1167 dmabuf = dma_buf_get(fd);
Colin Cross9e907652013-12-13 14:24:49 -08001168 if (IS_ERR(dmabuf))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001169 return PTR_ERR(dmabuf);
1170
1171 /* if this memory came from ion */
1172 if (dmabuf->ops != &dma_buf_ops) {
1173 pr_err("%s: can not sync dmabuf from another exporter\n",
1174 __func__);
1175 dma_buf_put(dmabuf);
1176 return -EINVAL;
1177 }
1178 buffer = dmabuf->priv;
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001179
1180 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1181 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001182 dma_buf_put(dmabuf);
1183 return 0;
1184}
1185
Laura Abbott02b23802016-09-07 11:49:59 -07001186int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
1187{
1188 struct ion_device *dev = client->dev;
1189 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
1190 int ret = -EINVAL, cnt = 0, max_cnt;
1191 struct ion_heap *heap;
1192 struct ion_heap_data hdata;
1193
1194 memset(&hdata, 0, sizeof(hdata));
1195
1196 down_read(&dev->lock);
1197 if (!buffer) {
1198 query->cnt = dev->heap_cnt;
1199 ret = 0;
1200 goto out;
1201 }
1202
1203 if (query->cnt <= 0)
1204 goto out;
1205
1206 max_cnt = query->cnt;
1207
1208 plist_for_each_entry(heap, &dev->heaps, node) {
1209 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
1210 hdata.name[sizeof(hdata.name) - 1] = '\0';
1211 hdata.type = heap->type;
1212 hdata.heap_id = heap->id;
1213
Dan Carpentercf559022016-10-13 15:55:08 +03001214 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
1215 ret = -EFAULT;
1216 goto out;
1217 }
Laura Abbott02b23802016-09-07 11:49:59 -07001218
1219 cnt++;
1220 if (cnt >= max_cnt)
1221 break;
1222 }
1223
1224 query->cnt = cnt;
1225out:
1226 up_read(&dev->lock);
1227 return ret;
1228}
1229
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001230static int ion_release(struct inode *inode, struct file *file)
1231{
1232 struct ion_client *client = file->private_data;
1233
1234 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001235 ion_client_destroy(client);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001236 return 0;
1237}
1238
1239static int ion_open(struct inode *inode, struct file *file)
1240{
1241 struct miscdevice *miscdev = file->private_data;
1242 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1243 struct ion_client *client;
Laura Abbott483ed032014-02-17 13:58:35 -08001244 char debug_name[64];
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001245
1246 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbott483ed032014-02-17 13:58:35 -08001247 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1248 client = ion_client_create(dev, debug_name);
Colin Cross9e907652013-12-13 14:24:49 -08001249 if (IS_ERR(client))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001250 return PTR_ERR(client);
1251 file->private_data = client;
1252
1253 return 0;
1254}
1255
1256static const struct file_operations ion_fops = {
1257 .owner = THIS_MODULE,
1258 .open = ion_open,
1259 .release = ion_release,
1260 .unlocked_ioctl = ion_ioctl,
Rom Lemarchand827c8492013-12-13 14:24:55 -08001261 .compat_ioctl = compat_ion_ioctl,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001262};
1263
1264static size_t ion_debug_heap_total(struct ion_client *client,
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001265 unsigned int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001266{
1267 size_t size = 0;
1268 struct rb_node *n;
1269
1270 mutex_lock(&client->lock);
1271 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1272 struct ion_handle *handle = rb_entry(n,
1273 struct ion_handle,
1274 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001275 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001276 size += handle->buffer->size;
1277 }
1278 mutex_unlock(&client->lock);
1279 return size;
1280}
1281
1282static int ion_debug_heap_show(struct seq_file *s, void *unused)
1283{
1284 struct ion_heap *heap = s->private;
1285 struct ion_device *dev = heap->dev;
1286 struct rb_node *n;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001287 size_t total_size = 0;
1288 size_t total_orphaned_size = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001289
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001290 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
Iulia Manda164ad862014-03-11 20:12:29 +02001291 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001292
Neil Zhang948c4db2016-01-26 17:39:06 +08001293 mutex_lock(&debugfs_mutex);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001294 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001295 struct ion_client *client = rb_entry(n, struct ion_client,
1296 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001297 size_t size = ion_debug_heap_total(client, heap->id);
Seunghun Lee10f62862014-05-01 01:30:23 +09001298
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001299 if (!size)
1300 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001301 if (client->task) {
1302 char task_comm[TASK_COMM_LEN];
1303
1304 get_task_comm(task_comm, client->task);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001305 seq_printf(s, "%16s %16u %16zu\n", task_comm,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001306 client->pid, size);
1307 } else {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001308 seq_printf(s, "%16s %16u %16zu\n", client->name,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001309 client->pid, size);
1310 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001311 }
Neil Zhang948c4db2016-01-26 17:39:06 +08001312 mutex_unlock(&debugfs_mutex);
1313
Iulia Manda164ad862014-03-11 20:12:29 +02001314 seq_puts(s, "----------------------------------------------------\n");
1315 seq_puts(s, "orphaned allocations (info is from last known client):\n");
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001316 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001317 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1318 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1319 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001320 if (buffer->heap->id != heap->id)
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001321 continue;
1322 total_size += buffer->size;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001323 if (!buffer->handle_count) {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001324 seq_printf(s, "%16s %16u %16zu %d %d\n",
Colin Crosse61fc912013-12-13 19:26:14 -08001325 buffer->task_comm, buffer->pid,
1326 buffer->size, buffer->kmap_cnt,
Benjamin Gaignard092c3542013-12-13 14:24:22 -08001327 atomic_read(&buffer->ref.refcount));
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001328 total_orphaned_size += buffer->size;
1329 }
1330 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001331 mutex_unlock(&dev->buffer_lock);
Iulia Manda164ad862014-03-11 20:12:29 +02001332 seq_puts(s, "----------------------------------------------------\n");
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001333 seq_printf(s, "%16s %16zu\n", "total orphaned",
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001334 total_orphaned_size);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001335 seq_printf(s, "%16s %16zu\n", "total ", total_size);
Colin Cross2540c732013-12-13 14:24:47 -08001336 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001337 seq_printf(s, "%16s %16zu\n", "deferred free",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001338 heap->free_list_size);
Iulia Manda164ad862014-03-11 20:12:29 +02001339 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001340
1341 if (heap->debug_show)
1342 heap->debug_show(heap, s, unused);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001343
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001344 return 0;
1345}
1346
1347static int ion_debug_heap_open(struct inode *inode, struct file *file)
1348{
1349 return single_open(file, ion_debug_heap_show, inode->i_private);
1350}
1351
1352static const struct file_operations debug_heap_fops = {
1353 .open = ion_debug_heap_open,
1354 .read = seq_read,
1355 .llseek = seq_lseek,
1356 .release = single_release,
1357};
1358
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001359static int debug_shrink_set(void *data, u64 val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001360{
John Stultze1d855b2013-12-13 19:26:33 -08001361 struct ion_heap *heap = data;
1362 struct shrink_control sc;
1363 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001364
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001365 sc.gfp_mask = GFP_HIGHUSER;
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001366 sc.nr_to_scan = val;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001367
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001368 if (!val) {
1369 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1370 sc.nr_to_scan = objs;
1371 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001372
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001373 heap->shrinker.scan_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001374 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001375}
1376
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001377static int debug_shrink_get(void *data, u64 *val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001378{
John Stultze1d855b2013-12-13 19:26:33 -08001379 struct ion_heap *heap = data;
1380 struct shrink_control sc;
1381 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001382
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001383 sc.gfp_mask = GFP_HIGHUSER;
John Stultze1d855b2013-12-13 19:26:33 -08001384 sc.nr_to_scan = 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001385
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001386 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001387 *val = objs;
1388 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001389}
1390
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001391DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
John Stultze1d855b2013-12-13 19:26:33 -08001392 debug_shrink_set, "%llu\n");
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001393
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001394void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1395{
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001396 struct dentry *debug_file;
1397
Laura Abbottf82ad602016-08-08 09:52:56 -07001398 if (!heap->ops->allocate || !heap->ops->free)
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -08001399 pr_err("%s: can not add heap with invalid ops struct.\n",
1400 __func__);
1401
Mitchel Humpherys95e53dd2015-01-08 17:24:27 -08001402 spin_lock_init(&heap->free_lock);
1403 heap->free_list_size = 0;
1404
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001405 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1406 ion_heap_init_deferred_free(heap);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001407
Colin Crossb9daf0b2014-02-17 13:58:38 -08001408 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1409 ion_heap_init_shrinker(heap);
1410
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001411 heap->dev = dev;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001412 down_write(&dev->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +05301413 /*
1414 * use negative heap->id to reverse the priority -- when traversing
1415 * the list later attempt higher id numbers first
1416 */
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001417 plist_node_init(&heap->node, -heap->id);
1418 plist_add(&heap->node, &dev->heaps);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001419 debug_file = debugfs_create_file(heap->name, 0664,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001420 dev->heaps_debug_root, heap,
1421 &debug_heap_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001422
1423 if (!debug_file) {
1424 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001425
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001426 path = dentry_path(dev->heaps_debug_root, buf, 256);
1427 pr_err("Failed to create heap debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001428 path, heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001429 }
1430
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001431 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001432 char debug_name[64];
1433
1434 snprintf(debug_name, 64, "%s_shrink", heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001435 debug_file = debugfs_create_file(
1436 debug_name, 0644, dev->heaps_debug_root, heap,
1437 &debug_shrink_fops);
1438 if (!debug_file) {
1439 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001440
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001441 path = dentry_path(dev->heaps_debug_root, buf, 256);
1442 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001443 path, debug_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001444 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001445 }
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001446
Laura Abbott02b23802016-09-07 11:49:59 -07001447 dev->heap_cnt++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001448 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001449}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001450EXPORT_SYMBOL(ion_device_add_heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001451
1452struct ion_device *ion_device_create(long (*custom_ioctl)
1453 (struct ion_client *client,
1454 unsigned int cmd,
1455 unsigned long arg))
1456{
1457 struct ion_device *idev;
1458 int ret;
1459
Ben Marsh411059f2016-03-28 19:26:19 +02001460 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001461 if (!idev)
1462 return ERR_PTR(-ENOMEM);
1463
1464 idev->dev.minor = MISC_DYNAMIC_MINOR;
1465 idev->dev.name = "ion";
1466 idev->dev.fops = &ion_fops;
1467 idev->dev.parent = NULL;
1468 ret = misc_register(&idev->dev);
1469 if (ret) {
1470 pr_err("ion: failed to register misc device.\n");
Shailendra Verma283d9302015-05-19 20:29:00 +05301471 kfree(idev);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001472 return ERR_PTR(ret);
1473 }
1474
1475 idev->debug_root = debugfs_create_dir("ion", NULL);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001476 if (!idev->debug_root) {
1477 pr_err("ion: failed to create debugfs root directory.\n");
1478 goto debugfs_done;
1479 }
1480 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1481 if (!idev->heaps_debug_root) {
1482 pr_err("ion: failed to create debugfs heaps directory.\n");
1483 goto debugfs_done;
1484 }
1485 idev->clients_debug_root = debugfs_create_dir("clients",
1486 idev->debug_root);
1487 if (!idev->clients_debug_root)
1488 pr_err("ion: failed to create debugfs clients directory.\n");
1489
1490debugfs_done:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001491
1492 idev->custom_ioctl = custom_ioctl;
1493 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001494 mutex_init(&idev->buffer_lock);
1495 init_rwsem(&idev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001496 plist_head_init(&idev->heaps);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001497 idev->clients = RB_ROOT;
Neil Zhang948c4db2016-01-26 17:39:06 +08001498 ion_root_client = &idev->clients;
1499 mutex_init(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001500 return idev;
1501}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001502EXPORT_SYMBOL(ion_device_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001503
1504void ion_device_destroy(struct ion_device *dev)
1505{
1506 misc_deregister(&dev->dev);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001507 debugfs_remove_recursive(dev->debug_root);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001508 /* XXX need to free the heaps and clients ? */
1509 kfree(dev);
1510}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001511EXPORT_SYMBOL(ion_device_destroy);